{"seq_id":"385079171","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*\nimport pandas as pd\nimport os\n\n\ndata_folder = 'data'\ntickers = ['BBVA', 'DAX', 'DIA', 'ESTX50', 'FIE', 'ITX', 'QQQ', 'SAN', 'SPY', 'TEF', 'VIX']\nohlc_data_file = 'candlestick_data.csv'\nohlc_dtype = {'session_date': str, 'close': float, 'open': float, 'percentage_high': float, 'high': float, 'low':float, 'volume': float}\natm_percentage = 0.015\n\nfor ticker in tickers:\n ticker_data_folder = os.path.join(data_folder, ticker)\n daily_files = sorted([f for f in os.listdir(ticker_data_folder) if os.path.isfile(os.path.join(ticker_data_folder, f)) and f.lower().endswith('.json')])\n optvol = pd.DataFrame(columns=['session_date', 'itm_call_volume', 'atm_call_volume', 'otm_call_volume', 'itm_put_volume', 'atm_put_volume', 'otm_put_volume'])\n ohlc = None\n try:\n ohlc = pd.read_csv(os.path.join(data_folder, ticker, ohlc_data_file), encoding='ISO-8859-1', sep=';', decimal=',', dtype=ohlc_dtype)\n except ValueError as e:\n print('ERROR for {} while trying to read CSV data file: {}'.format(ticker, e))\n else:\n for dayf in daily_files:\n csvfile = ''\n try:\n csvfile = os.path.join(data_folder, ticker, dayf)\n df = pd.read_json(csvfile)\n except ValueError as e:\n print('ERROR while reading file {}: {}'.format(csvfile, e))\n \n today = ''\n close = 0\n try:\n # Get adjusted close price for this ticker this day\n today = df['session_date'].iloc[0]\n close_series = ohlc.loc[ohlc['session_date'] == today, 'close']\n if close_series.empty:\n print('WARNING: no OHLC data for {} on {}'.format(ticker, today))\n else:\n # Sum volume for all call ATM, OTM and ITM options\n close = float(close_series)\n itm_call_volume = int(df.loc[(df['right'] == 'C') & (df['strike'] < close * (1-atm_percentage)), 'volume'].sum())\n atm_call_volume = int(df.loc[(df['right'] == 'C') & (df['strike'] >= close * (1-atm_percentage)) & (df['strike'] <= close * (1+atm_percentage)), 'volume'].sum())\n otm_call_volume = int(df.loc[(df['right'] == 'C') & (df['strike'] > close * (1+atm_percentage)), 'volume'].sum())\n otm_put_volume = int(df.loc[(df['right'] == 'P') & (df['strike'] < close * (1-atm_percentage)), 'volume'].sum())\n atm_put_volume = int(df.loc[(df['right'] == 'P') & (df['strike'] >= close * (1-atm_percentage)) & (df['strike'] <= close * (1+atm_percentage)), 'volume'].sum())\n itm_put_volume = int(df.loc[(df['right'] == 'P') & (df['strike'] > close * (1+atm_percentage)), 'volume'].sum())\n \n optvol = optvol.append({\n 'session_date': today,\n 'itm_call_volume': itm_call_volume,\n 'atm_call_volume': atm_call_volume,\n 'otm_call_volume': otm_call_volume,\n 'itm_put_volume': itm_put_volume,\n 'atm_put_volume': atm_put_volume,\n 'otm_put_volume': otm_put_volume\n }, ignore_index=True)\n except TypeError as e:\n print('ERROR for {} while iterating {} for a close price of {}: {}'.format(ticker, today, close, e))\n \n # Join both dataframes\n df = ohlc.merge(optvol, on=['session_date'], how='inner')\n df = df.set_index('session_date')\n df.to_csv(os.path.join(ticker_data_folder, 'daily_option_volume.csv'), sep=',', decimal='.')","sub_path":"get_option_volume.py","file_name":"get_option_volume.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"375704530","text":"import json\nimport re\nfrom bs4 import BeautifulSoup\n\nfreq = {}\nfor i in range(1, 8675):\n try:\n with open(f'biblehub/{i}.html') as fp:\n hub = BeautifulSoup(fp.read(), 'html.parser')\n except FileNotFoundError:\n continue\n for word in hub.text.split():\n word = re.sub(r'[^A-Za-zא-ת]', '', word)\n if len(word) < 2:\n continue\n try:\n freq[word] += 1\n except KeyError:\n freq[word] = 1\n\nprint(sorted(freq, key=lambda x: freq[x])[:50])\n\nwith open('freq.json', 'w', encoding='utf-8') as fp:\n json.dump(freq, fp, ensure_ascii=False)\n","sub_path":"sources/bdb/freq.py","file_name":"freq.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"216246614","text":"from CipherInterface import *\n\nclass Vigenere(CipherInterface):\n\tdef __init__(self):\n\t\tCipherInterface.__init__(self)\n\t\tself.alphabet = self.create_alphabet()\n\t\t\n\tdef setKey(self, key):\n\t\tself.key = key\n\t\t\n\tdef encrypt(self, plain_text):\n\t\tcipher_text = \"\"\n\t\tcounter = 0\n\t\tkey_size = len(str(self.key))\n\t\t\n\t\tfor char in plain_text:\n\t\t\tplain_index = self.alphabet.index(char)\n\t\t\tkey_index = self.alphabet.index(self.key[counter])\n\t\t\t\n\t\t\tcipher_value = (plain_index + key_index) % 26\n\t\t\tcipher_text += self.alphabet[cipher_value]\n\t\t\tcounter += 1\n\t\t\t\n\t\t\tif counter == key_size:\n\t\t\t\tcounter = 0\n\t\t\n\t\treturn cipher_text\n\t\t\n\tdef decrypt(self, cipher_text):\n\t\tplain_text = \"\"\n\t\tcounter = 0\n\t\tkey_size = len(str(self.key))\n\t\t\n\t\tfor char in cipher_text:\n\t\t\tcipher_index = self.alphabet.index(char)\n\t\t\tkey_index = self.alphabet.index(self.key[counter])\n\t\t\t\n\t\t\tplain_value = (cipher_index - key_index) % 26\n\t\t\tplain_text += self.alphabet[plain_value]\n\t\t\t\n\t\t\tcounter += 1\n\t\t\t\n\t\t\tif counter == key_size:\n\t\t\t\tcounter = 0\n\t\t\t\t\n\n\t\treturn plain_text\n\t\t\n\tdef create_alphabet(self):\n\t\talphabet_list = []\n\t\t\n\t\tfor ascii_val in range(97, 123):\n\t\t\talphabet_list.append(chr(ascii_val))\n\t\t\n\t\t\n\t\treturn alphabet_list","sub_path":"Vigenere.py","file_name":"Vigenere.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"399204501","text":"import os\nimport numpy as np\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.models import load_model\nfrom tensorflow.python.keras.layers import Dense, Input\nfrom tensorflow.python.keras.layers import LSTM, GRU\nfrom tensorflow.python.keras.layers import Dropout, BatchNormalization, Activation, TimeDistributed, Masking\nfrom tensorflow.python.keras.layers.convolutional import Conv1D, Conv2D\nfrom tensorflow.python.keras.layers.convolutional import MaxPooling1D, MaxPooling2D\n\nfrom astrorapid.prepare_arrays import PrepareTrainingSetArrays\nfrom astrorapid.plot_metrics import plot_metrics\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef train_model(X_train, X_test, y_train, y_test, sample_weights=None, fig_dir='.', retrain=True, epochs=25):\n \"\"\" Train Neural Network classifier and save model. \"\"\"\n\n model_filename = os.path.join(fig_dir, \"keras_model.hdf5\")\n #TODO: Try standard scaling and try normalising by peak and try reinputting such that it always normalises by largest value so far\n\n # colour = np.log10(X_train[:,:,0]) - np.log10(X_train[:,:,1])\n # X_train = np.dstack((X_train, colour))\n # colour = np.log10(X_test[:,:,0]) - np.log10(X_test[:,:,1])\n # X_test = np.dstack((X_test, colour))\n print(\"training...\")\n if not retrain and os.path.isfile(model_filename):\n model = load_model(model_filename)\n else:\n num_classes = y_test.shape[-1]\n\n model = Sequential()\n\n model.add(Masking(mask_value=0.))\n\n # model.add(Conv1D(filters=32, kernel_size=3))\n # model.add(BatchNormalization())\n # model.add(Activation('relu'))\n # model.add(MaxPooling1D(pool_size=1))\n # model.add(Dropout(0.2, seed=42))\n\n model.add(LSTM(100, return_sequences=True))\n # model.add(Dropout(0.2, seed=42))\n # model.add(BatchNormalization())\n\n model.add(LSTM(100, return_sequences=True))\n # model.add(Dropout(0.2, seed=42))\n # model.add(BatchNormalization())\n # model.add(Dropout(0.2, seed=42))\n\n model.add(TimeDistributed(Dense(num_classes, activation='softmax')))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=500, verbose=2, sample_weight=sample_weights)\n\n print(model.summary())\n model.save(model_filename)\n\n return model\n\n\ndef main():\n \"\"\" Train Neural Network classifier \"\"\"\n\n passbands = ('g', 'r')\n contextual_info = (0,)\n\n aggregate_classes = True\n reread_hdf5_data = False\n retrain_rnn = False\n train_epochs = 50\n\n otherchange = '' # nonuniformtime' #'withdropout'##'no_dc_and_late_start_lcs_with_colour'\n nchunks = 10000\n\n # Train + Test cuts\n zcut = 0.5\n bcut = True\n variablescut = True\n\n class_nums = (1, 2, 12, 14, 3, 13, 41, 43, 51, 60, 61, 62, 63, 64, 70, 80, 81, 83, 90, 91, 92)\n nprocesses = None # None means os.cpu_count() otherwise use integer\n\n SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n data_dir = os.path.join(SCRIPT_DIR, '..', 'data/ZTF_20190512')\n training_set_dir = os.path.join(SCRIPT_DIR, '..', 'training_set_files')\n save_dir = os.path.join(SCRIPT_DIR, '..', 'data/saved_light_curves')\n for dirname in [training_set_dir, data_dir, save_dir]:\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n fig_dir = os.path.join(training_set_dir, 'Figures', 'classify', 'ZTF_{}_noAGN_batch500_unnormalised_epochs{}_ag{}_ci{}_zcut{}_bcut{}_varcut{}'.format(otherchange, train_epochs, aggregate_classes, contextual_info, zcut, bcut, variablescut))\n for dirname in [fig_dir, fig_dir+'/cf_since_trigger', fig_dir+'/cf_since_t0', fig_dir+'/roc_since_trigger', fig_dir+'/lc_pred', fig_dir+'/pr_since_trigger', fig_dir+'/truth_table_since_trigger']:\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n preparearrays = PrepareTrainingSetArrays(passbands, contextual_info, reread_hdf5_data, aggregate_classes, bcut, zcut, variablescut, nchunks=nchunks, training_set_dir=training_set_dir, data_dir=data_dir, save_dir=save_dir)\n X_train, X_test, y_train, y_test, labels_train, labels_test, class_names, class_weights, sample_weights, timesX_train, timesX_test, orig_lc_train, orig_lc_test, objids_train, objids_test = preparearrays.prepare_training_set_arrays(otherchange, class_nums, nprocesses)\n model = train_model(X_train, X_test, y_train, y_test, sample_weights=sample_weights, fig_dir=fig_dir, retrain=retrain_rnn, epochs=train_epochs)\n plot_metrics(class_names, model, X_test, y_test, fig_dir, timesX_test=timesX_test, orig_lc_test=orig_lc_test, objids_test=objids_test, passbands=passbands)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"astrorapid/train_neural_network.py","file_name":"train_neural_network.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"432127445","text":"#!/usr/bin/env python\n\nimport csv\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ncsv_lines = []\n\nwith open(\"drive_data/training_data/drive_data.csv\") as csvfile:\n\treader = csv.reader(csvfile, delimiter=\",\", quotechar='|')\n\tfor line in reader:\n\t\tcsv_lines.append(line)\n\nsteering_commands = []\n\nleft_count = 0\nright_count = 0\nzero_count = 0\ncount = 0\nfor csv_line in csv_lines:\n if count != 0:\n steering_angle = float(csv_line[1])\n \n if steering_angle > 0:\n left_count += 1\n elif steering_angle < 0:\n right_count += 1\n else:\n zero_count +=1\n count += 1\n\nprint(\"Left turns count: \" + str(left_count))\nprint(\"Right turns count: \" + str(right_count))\nprint(\"Zero count: \" + str(zero_count))\n","sub_path":"model/expore_training.py","file_name":"expore_training.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"345764677","text":"import plotly.graph_objs as go\n\nimport funnelview\n\ndef create_ad_ratio(community):\n ad_ = community.ad_ratio()\n return [go.Pie(labels=list(ad_.keys()), values=list(ad_.values()))]\n\ndef create_sex_dist(community):\n sex = community.sex_dist(debug=True)\n return [go.Pie(labels=list(sex.keys()), values=list(sex.values()))]\n\n\ndef create_platform_dist(community):\n platform, system = community.platform_dist()\n pie_platform = [go.Pie(\n labels=list(platform.keys()), values=list(platform.values()),\n hoverinfo='label+percent', textinfo='value',\n marker=dict(colors=['#66CDAA', '#EE5C42', '#1874CD']), opacity=0.9)]\n pie_system = [go.Pie(\n labels=list(system.keys()), values=list(system.values()),\n hoverinfo='label+percent', textinfo='value',\n marker=dict(colors=['#8B8386', '#FFE4C4']), opacity=0.9)]\n return pie_platform, pie_system\n\n\ndef create_ages_gist(community):\n ages_female, xbins_female, ages_male, xbins_male, ukn = community.age_dict()\n gist_female = go.Histogram(\n x=ages_female, histnorm='percent', xbins=xbins_female,\n marker=dict(color='#FFD7E9', ), name='Female',\n opacity=0.75)\n gist_male = go.Histogram(\n x=ages_male, histnorm='percent', xbins=xbins_male,\n marker=dict(color='#6495ED', ), name='Male',\n opacity=0.75)\n\n max_age = max(xbins_female['end'], xbins_male['end'])\n step = xbins_female['size']\n layout = go.Layout(\n title=\"Sex/Age\",\n xaxis=dict(\n rangemode='tozero', showticklabels=True,\n ticktext=['{0}'.format(a) for a in range(0, max_age + step, step)],\n tickvals=[a for a in range(0, max_age + step, step)],\n title='Age ({0} Uknown)'.format(ukn)),\n yaxis=dict(title='Percent'))\n return go.Figure(data=[gist_female, gist_male], layout=layout)\n\n\ndef create_likes_funnel(community):\n funnel = community.likes_funnel(debug=True)\n\n phase = [\"Views\", \"Likes\", 'Reposts']\n values = []\n\n for each in phase:\n values.append(funnel[each])\n return funnelview.funnel_fig(values, phase)\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"298447928","text":"#!/usr/bin/env python\n\nfrom myjob import job,schedule\nimport time\nimport logging\n\nlogger = logging.getLogger('myjob')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\nif __name__ == \"__main__\":\n s = schedule.schedule(\"myschedule\")\n j1=job.job_command(\"tata\",\"sleep 5\")\n s.addjob(j1)\n\n j1.start()\n while 1:\n time.sleep(1)\n print(s)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"273052256","text":"import os\nimport csv\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-dataset', required= True)\nparser.add_argument('-block', required= False, default=20)\nopt = parser.parse_args()\ndataset = opt.dataset\nblock = opt.block\n\ndata_path = \"../../data/%s\" %(dataset)\n\nvalid_path = os.path.join(data_path, \"valid.csv\")\ntrain_path = os.path.join(data_path, \"train.csv\")\ntest_path = os.path.join(data_path, \"test.csv\")\n\ndef readcsv(filename):\n with open(filename,newline= \"\\n\", encoding=\"ISO-8859-1\") as f:\n reader = csv.reader(f)\n return list(reader)\n\ndef makefile(valid, train, test):\n match=[]\n \n for i in range(len(valid[1:])):\n if(valid[i][2]==\"1\"):\n match.insert(i,[valid[i][0],valid[i][1]])\n \n lenv = len(valid[1:])\n for j in range(len(train[1:])):\n if (train[j][2]==\"1\"):\n if not ([train[j][0],train[j][1]]) in match:\n match.insert((j+(lenv)),[train[j][0],train[j][1]])\n \n lent = len (match)\n for k in range(len(test[1:])):\n if (test[k][2]==\"1\"):\n if not ([test[k][0],test[k][1]]) in match:\n match.insert((k+(lent)),[test[k][0],test[k][1]])\n\n match_def=\"\"\n for touple in match:\n match_def += ((str(touple[0])) + \"\\t\" + (str(touple[1])) + \"\\n\")\n return match_def\n\nnew_match = makefile(readcsv(valid_path), readcsv(train_path), readcsv(test_path))\n\nwith open ((data_path + \"/match.txt\"), \"w+\", encoding=\"utf-8\") as f:\n f.write(new_match)","sub_path":"src/training/create_match.py","file_name":"create_match.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"188924694","text":"from rest_framework import serializers\n\nfrom .models import Account, AccountHistory\n\n\nclass AccountHistorySerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for AccountHistory model\n \"\"\"\n\n class Meta:\n model = AccountHistory\n exclude = ('related_account',)\n\n\nclass BaseAccountSerializer(serializers.ModelSerializer):\n \"\"\"\n Basic Account model serializer containing last history registry\n \"\"\"\n last_history = AccountHistorySerializer(read_only=True)\n\n class Meta:\n model = Account\n fields = ('region', 'battle_tag', 'last_updated', 'last_played',\n 'heroes', 'guild_name', 'time_played', 'last_history')\n\n\nclass AccountSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for displaying all data related to a single Account.\n Contains all Account history and calculates ranks.\n \"\"\"\n history = AccountHistorySerializer(many=True, read_only=True)\n\n def to_representation(self, instance):\n data = super(AccountSerializer, self).to_representation(instance)\n leagues = {\n 'rank_sc': 'paragon_sc',\n 'rank_hc': 'paragon_hc',\n 'rank_sc_s': 'paragon_sc_s',\n 'rank_hc_s': 'paragon_hc_s',\n }\n for rank, paragon in leagues.items():\n filter = {'last_history__'+paragon+'__gt':\n getattr(instance.last_history, paragon)}\n data[rank] = Account.objects.filter(**filter).count() + 1\n return data\n\n class Meta:\n model = Account\n fields = ('region', 'battle_tag', 'last_updated', 'last_played',\n 'heroes', 'guild_name', 'time_played', 'history',)\n","sub_path":"accounts/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"385950551","text":"import libpagure\n\n\nclass OurPagure(libpagure.Pagure):\n \"\"\"TODO: Move this functionality to upstream libpagure\"\"\"\n\n def __init__(self, token=None, repo=None, namespace=None, username=None, **kwargs):\n kwargs.setdefault(\"fork_username\", username)\n if repo and namespace:\n kwargs.setdefault(\"pagure_repository\", f\"{namespace}/{repo}\")\n kwargs.setdefault(\"pagure_token\", token)\n super().__init__(**kwargs)\n self.namespace = namespace\n\n @property\n def api_url(self):\n return f\"{self.instance}/api/0/\"\n\n @property\n def repo_name(self):\n return self.repo.split(\"/\")[1]\n\n def get_api_url(self, *args, add_fork=True, add_api_endpoint_part=True):\n args_list = []\n\n if self.username and add_fork:\n args_list += [\"fork\", self.username]\n\n args_list += filter(lambda x: x is not None, args)\n\n if add_api_endpoint_part:\n return self.api_url + \"/\".join(args_list)\n return f\"{self.instance}/\" + \"/\".join(args_list)\n\n def whoami(self):\n request_url = self.get_api_url(\"-\", \"whoami\", add_fork=False)\n\n return_value = self._call_api(url=request_url, method=\"POST\", data={})\n return return_value[\"username\"]\n\n def create_request(self, title, body, target_branch, source_branch):\n \"\"\"\n PAGURE DOCS:\n\n Create pull-request\n -------------------\n Open a new pull-request from this project to itself or its parent (if\n this project is a fork).\n\n ::\n\n POST /api/0//pull-request/new\n POST /api/0///pull-request/new\n\n ::\n\n POST /api/0/fork///pull-request/new\n POST /api/0/fork////pull-request/new\n\n Input\n ^^^^^\n\n +--------------------+----------+---------------+----------------------+\n | Key | Type | Optionality | Description |\n +====================+==========+===============+======================+\n | ``title`` | string | Mandatory | The title to give to |\n | | | | this pull-request |\n +--------------------+----------+---------------+----------------------+\n | ``branch_to`` | string | Mandatory | The name of the |\n | | | | branch the submitted |\n | | | | changes should be |\n | | | | merged into. |\n +--------------------+----------+---------------+----------------------+\n | ``branch_from`` | string | Mandatory | The name of the |\n | | | | branch containing |\n | | | | the changes to merge |\n +--------------------+----------+---------------+----------------------+\n | ``initial_comment``| string | Optional | The intial comment |\n | | | | describing what these|\n | | | | changes are about. |\n +--------------------+----------+---------------+----------------------+\n\n Sample response\n ^^^^^^^^^^^^^^^\n\n ::\n\n {\n \"assignee\": null,\n \"branch\": \"master\",\n \"branch_from\": \"master\",\n \"closed_at\": null,\n \"closed_by\": null,\n \"comments\": [],\n \"commit_start\": null,\n \"commit_stop\": null,\n \"date_created\": \"1431414800\",\n \"id\": 1,\n \"project\": {\n \"close_status\": [],\n \"custom_keys\": [],\n \"date_created\": \"1431414800\",\n \"description\": \"test project #1\",\n \"id\": 1,\n \"name\": \"test\",\n \"parent\": null,\n \"user\": {\n \"fullname\": \"PY C\",\n \"name\": \"pingou\"\n }\n },\n\n \"repo_from\": {\n \"date_created\": \"1431414800\",\n \"description\": \"test project #1\",\n \"id\": 1,\n \"name\": \"test\",\n \"parent\": null,\n \"user\": {\n \"fullname\": \"PY C\",\n \"name\": \"pingou\"\n }\n },\n \"status\": \"Open\",\n \"title\": \"test pull-request\",\n \"uid\": \"1431414800\",\n \"updated_on\": \"1431414800\",\n \"user\": {\n \"fullname\": \"PY C\",\n \"name\": \"pingou\"\n }\n }\n \"\"\"\n request_url = self.get_api_url(\n self.namespace, self.repo_name, \"pull-request\", \"new\"\n )\n\n return_value = self._call_api(\n url=request_url,\n method=\"POST\",\n data={\n \"title\": title,\n \"branch_to\": target_branch,\n \"branch_from\": source_branch,\n \"initial_comment\": body,\n },\n )\n return return_value\n\n def get_raw_request(\n self,\n *url_parts,\n method=\"GET\",\n params=None,\n data=None,\n api_url=True,\n repo_name=False,\n namespace=False,\n ):\n url_parts = list(url_parts)\n if repo_name:\n url_parts.insert(0, self.repo_name)\n if namespace:\n url_parts.insert(0, self.namespace)\n\n request_url = self.get_api_url(*url_parts, add_api_endpoint_part=api_url)\n\n req = self.session.request(\n method=method,\n url=request_url,\n params=params,\n headers=self.header,\n data=data,\n verify=not self.insecure,\n )\n return req\n\n def get_fork(self):\n\n request_url = self.get_api_url(self.repo)\n\n try:\n return_value = self._call_api(url=request_url, method=\"GET\", data={})\n return return_value\n except Exception:\n return None\n\n def create_fork(self):\n \"\"\"\n PAGURE DOCS:\n\n Fork a project\n\n --------------------\n\n Fork a project on this pagure instance.\n This is an asynchronous call.\n\n ::\n\n POST /api/0/fork\n\n Input\n\n ^^^^^\n\n +------------------+---------+--------------+---------------------------+\n | Key | Type | Optionality | Description |\n +==================+=========+==============+===========================+\n | ``repo`` | string | Mandatory | | The name of the project |\n | | | | to fork. |\n +------------------+---------+--------------+---------------------------+\n | ``namespace`` | string | Optional | | The namespace of the |\n | | | | project to fork. |\n +------------------+---------+--------------+---------------------------+\n | ``username`` | string | Optional | | The username of the user|\n | | | | of the fork. |\n +------------------+---------+--------------+---------------------------+\n | ``wait`` | boolean | Optional | | A boolean to specify if |\n | | | | this API call should |\n | | | | return a taskid or if it|\n | | | | should wait for the task|\n | | | | to finish. |\n +------------------+---------+--------------+---------------------------+\n\n Sample response\n\n ^^^^^^^^^^^^^^^\n\n ::\n\n wait=False:\n\n {\n \"message\": \"Project forking queued\",\n \"taskid\": \"123-abcd\"\n }\n\n\n\n wait=True:\n\n {\n \"message\": 'Repo \"test\" cloned to \"pingou/test\"\n }\n\n\n \"\"\"\n request_url = self.get_api_url(\"fork\")\n\n return_value = self._call_api(\n url=request_url,\n method=\"POST\",\n data={\"repo\": self.repo_name, \"namespace\": self.namespace, \"wait\": True},\n )\n return return_value\n\n def project_exists(self):\n request_url = self.get_api_url(self.repo)\n try:\n self._call_api(url=request_url, method=\"GET\", data={})\n return True\n except libpagure.APIError:\n return False\n\n def get_project_info(self):\n request_url = self.get_api_url(self.repo)\n\n return_value = self._call_api(url=request_url, method=\"GET\", data={})\n return return_value\n\n def get_project_description(self):\n return self.get_project_info()[\"description\"]\n\n def get_parent(self):\n return self.get_project_info()[\"parent\"]\n\n def get_git_urls(self):\n request_url = self.get_api_url(self.repo, \"git\", \"urls\")\n\n return_value = self._call_api(url=request_url, method=\"GET\", data={})\n urls = return_value[\"urls\"]\n rendered_urls = {}\n for k, v in urls.items():\n # https://pagure.io/pagure/issue/4427\n if \"{username}\" in v:\n v = v.format(username=self.whoami())\n rendered_urls[k] = v\n return rendered_urls\n\n def get_branches(self):\n request_url = self.get_api_url(self.repo, \"git\", \"branches\")\n\n return_value = self._call_api(url=request_url, method=\"GET\", data={})\n return return_value[\"branches\"]\n\n def get_commit_flags(self, commit):\n request_url = self.get_api_url(self.repo, \"c\", commit, \"flag\")\n\n return_value = self._call_api(url=request_url, method=\"GET\", data={})\n return return_value[\"flags\"]\n\n def change_token(self, token):\n self.token = token\n self.header = {\"Authorization\": \"token \" + self.token}\n","sub_path":"ogr/services/our_pagure.py","file_name":"our_pagure.py","file_ext":"py","file_size_in_byte":10196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"102531621","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.3 (3230)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/lib64/python3.3/site-packages/batman/batch_downloader.py\n# Compiled at: 2014-02-05 11:33:51\n# Size of source mod 2**32: 13202 bytes\n\"\"\"This module implements an interface to Pafy objects with batch-ing and\nconversion in focus.\n\n\"\"\"\nimport re, tempfile, batman.pafy, os, subprocess, sys, random\nfrom multiprocessing import Pool, freeze_support\nfrom batman import definitions\nfrom batman.definitions import path_with\nfrom batman.codec_interface import base_codec\nfrom batman.codec_interface import libmp3lame\nimport logging\n\nclass DownloaderAndEncoder(object):\n __doc__ = \"DownloaderAndEncoder is a instance of a video being downloaded and encoded.\\n Internally, it uses batman.pafy.Pafy(self.video) to grab it's data and\\n to download.\\n \\n Encoding uses command-line functions(bad, so bad) to call ffmpeg to extract\\n .WAVs, and LAME to convert them to MP3.\\n \\n \"\n on_download_progress = None\n\n def __init__(self, url, outfolder, quality, VBRquality=2):\n \"\"\"Creates a new DownloaderAndEncoder object.\n \n Keyword arguments:\n url -- The YouTube video url.\n outfolder -- The path of the destination folder.\n quality -- YouTube video quality(e.g. 360, 480, 720).\n VBRquality -- The -V argument passed to LAME, i.e. the quality setting\n for VBR. Ranges from 0 to 9 - lower is better quality, higher is better\n compression. (default: 2)\"\"\"\n for i in range(3):\n try:\n self.video = batman.pafy.Pafy(url)\n break\n except RuntimeError:\n raise RuntimeError('Invalid URL')\n except:\n if i != 2:\n continue\n else:\n raise RuntimeError(\"Couldn't load video\")\n\n if definitions.WINDOWS:\n self.original_video = tempfile.NamedTemporaryFile(delete=False)\n self.original_video.close()\n else:\n self.original_video = tempfile.NamedTemporaryFile()\n if definitions.OPTIONS.audioCodecEnabled != definitions.OPTIONS.videoCodecEnabled:\n if definitions.OPTIONS.audioCodecEnabled:\n self.codec = definitions.OPTIONS.audioCodec()\n else:\n self.codec = definitions.OPTIONS.videoCodec()\n self.solo_encoding = True\n else:\n if definitions.OPTIONS.audioCodecEnabled and definitions.OPTIONS.videoCodecEnabled:\n self.interactor = definitions.OPTIONS.interactor()\n self.solo_encoding = False\n else:\n raise RuntimeError('No codecs are set.')\n if self.solo_encoding:\n self.mp3_out = outfolder + '/' + self.codec.make_valid_file_name_from_caption(self.video.title)\n else:\n self.mp3_out = outfolder + '/' + self.interactor.make_valid_file_name_from_caption(self.video.title)\n self.quality = quality\n self.VBRquality = VBRquality\n self.download_progress = None\n return\n\n def set_outfolder(self, outfolder):\n \"\"\"Changes the destination of the converted video.\"\"\"\n if self.solo_encoding:\n self.mp3_out = outfolder + '/' + self.codec.make_valid_file_name_from_caption(self.video.title)\n else:\n self.mp3_out = outfolder + '/' + self.interactor.make_valid_file_name_from_caption(self.video.title)\n\n def _progress_callback(self, total, bytesdone, pct, rate, eta):\n self.download_progress = (total, bytesdone, pct, rate, eta)\n try:\n self.on_download_progress(self)\n except TypeError:\n pass\n\n def download(self):\n \"\"\"Downloads the video(doesn't encodes).\"\"\"\n current_choice = None\n if self.solo_encoding:\n if base_codec.is_codec_an_audio_codec(self.codec):\n current_choice = self.video.getbestaudio()\n if current_choice == None:\n for stream in self.video.streams:\n stream_res = int(stream.resolution.split('x')[0])\n if self.quality == stream_res:\n if stream.extension == 'mp4':\n current_choice = stream\n break\n if current_choice == None:\n current_choice = stream\n continue\n else:\n if current_choice.extension == 'mp4':\n if stream.extension != 'mp4':\n continue\n current_res = int(current_choice.resolution.split('x')[0])\n if abs(self.quality - stream_res) < abs(self.quality - current_res):\n current_choice = stream\n continue\n\n for i in range(0, 3):\n try:\n self.download_progress = None\n current_choice.download(quiet=True, callback=self._progress_callback, filepath=self.original_video.name)\n break\n except:\n if i != 2:\n continue\n else:\n raise RuntimeError('Last try failed. Exiting')\n\n return\n\n def __del__(self):\n if definitions.WINDOWS:\n logging.debug('Deleting temporary files %s and %s', self.original_video.name, self.wav_file.name)\n os.unlink(self.original_video.name)\n os.unlink(self.wav_file.name)\n\n\ndef _helper_encode_solo(codec, orig_p, mp3_o, VBRquality, ticket):\n codec.encode(mp3_o, orig_p)\n return ticket\n\n\ndef _helper_encode_interaction(interactor, orig_p, mp3_o, VBRquality, ticket):\n interactor.encode(mp3_o, orig_p)\n return ticket\n\n\nclass DownloadAndEncodeMarshaller(object):\n __doc__ = 'DownloadAndEncodeMarshaller is a manager of DownloadAndEncode objects.'\n on_video_start_download = None\n on_video_start_encoding = None\n on_video_progress = None\n on_video_finish = None\n NON_EXISTANT = 1\n PENDING = 2\n DOWNLOADING = 3\n ENCODING = 4\n FINISHED = 5\n NOT_FOUND = 6\n\n def __init__(self, let_invalid_url_errors_pass=True):\n self.all = []\n self.pending = []\n self.downloading = []\n self.encoding = {}\n self.encoder_pool = Pool(processes=2)\n self.finished = []\n self.event_starter_quit = False\n self.let_invalid_url_errors_pass = let_invalid_url_errors_pass\n self.reload_codecs()\n\n def reload_codecs(self):\n if definitions.OPTIONS.audioCodecEnabled != definitions.OPTIONS.videoCodecEnabled:\n if definitions.OPTIONS.audioCodecEnabled:\n self.codec = definitions.OPTIONS.audioCodec()\n else:\n self.codec = definitions.OPTIONS.videoCodec()\n self.solo_encoding = True\n else:\n if definitions.OPTIONS.audioCodecEnabled and definitions.OPTIONS.videoCodecEnabled:\n self.interactor = definitions.OPTIONS.interactor()\n self.solo_encoding = False\n else:\n raise RuntimeError('No codecs are set.')\n\n def find_state_of_video(self, video):\n if video not in self.all:\n return self.NON_EXISTANT\n else:\n if video in self.pending:\n return self.PENDING\n else:\n if video in self.downloading:\n return self.DOWNLOADING\n if video in self.encoding.values():\n return self.ENCODING\n if video in self.finished:\n pass\n return self.FINISHED\n return self.NOT_FOUND\n\n def add_video_to_download(self, url, outfolder, quality, VBRquality=2):\n try:\n downloaderAndEncoder = DownloaderAndEncoder(url, outfolder, quality, VBRquality)\n except RuntimeError as e:\n if str(e) == 'Invalid URL':\n if self.let_invalid_url_errors_pass:\n return\n raise\n\n downloaderAndEncoder.on_download_progress = self.on_video_progress\n self.all.append(downloaderAndEncoder)\n self.pending.append(downloaderAndEncoder)\n\n def _generate_ticket(self):\n while True:\n n = random.randrange(0, 10000)\n if n not in self.encoding:\n return n\n\n def _finish_encode(self, ticket):\n try:\n v = self.encoding[ticket]\n self.finished.append(v)\n del self.encoding[ticket]\n self.on_video_finish(self, v)\n except IndexError:\n pass\n\n def start(self):\n while len(self.pending) > 0:\n self.downloading.append(self.pending.pop(0))\n try:\n if self.on_video_start_download != None:\n self.on_video_start_download(self, self.downloading[(-1)])\n self.downloading[(-1)].download()\n ticket = self._generate_ticket()\n self.encoding[ticket] = self.downloading.pop(-1)\n if self.on_video_start_encoding != None:\n self.on_video_start_encoding(self, self.encoding[ticket], ticket)\n if self.solo_encoding:\n helper = _helper_encode_solo\n args = (self.codec,\n self.encoding[ticket].original_video.name,\n self.encoding[ticket].mp3_out,\n self.encoding[ticket].VBRquality,\n ticket)\n else:\n helper = _helper_encode_interaction\n args = (self.interactor,\n self.encoding[ticket].original_video.name,\n self.encoding[ticket].mp3_out,\n self.encoding[ticket].VBRquality,\n ticket)\n self.encoder_pool.apply_async(helper, args, callback=lambda t: self._finish_encode(t), error_callback=lambda e: print(e))\n except RuntimeError:\n raise\n\n return\n\n def event_starter(self, event):\n while not self.event_starter_quit:\n self.start()\n event.clear()\n if len(self.pending) > 0:\n continue\n event.wait()\n\n def __del__(self):\n for l in [self.all, self.pending, self.downloading, self.finished]:\n for video in l:\n del video\n\n l.clear()\n\n for video in self.encoding.values():\n del video\n\n del self.encoding\n\n\ndef batch_download(txt_file, outfolder, quality, VBRquality):\n marshall = DownloadAndEncodeMarshaller()\n for line in txt_file:\n line = line.rstrip()\n if line == '':\n continue\n marshall.add_video_to_download(line, outfolder, quality, VBRquality)\n\n marshall.start()\n marshall.encoder_pool.close()\n marshall.encoder_pool.join()\n\n\ndef main():\n freeze_support()\n txt_file = sys.argv[1]\n outfolder = sys.argv[2]\n quality = int(sys.argv[3])\n VBRquality = int(sys.argv[4])\n batch_download(open(txt_file, 'r'), outfolder, quality, VBRquality)\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/Bat-man-0.3.1.linux-x86_64.tar/batch_downloader.cpython-33.py","file_name":"batch_downloader.cpython-33.py","file_ext":"py","file_size_in_byte":11294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"504932555","text":"import os.path\nimport datetime\n\nfrom django.core.urlresolvers import resolve\nfrom django.test import TestCase\nfrom django.http import HttpRequest\n\nfrom attachment_parser import convert_to_text\nfrom attachment_parser.views import home_page, json_page, misc_page\n\n\nLOCAL_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass TestingExtractFlicaZip(TestCase):\n\n def setUp(self):\n self.zip_path = os.path.join(\n LOCAL_DIR,\n 'test_files/LGA-EM7-CA-Run1117___175444.zip'\n )\n\n def test_extract_to_file(self):\n desired_files = convert_to_text.new_new_extract(self.zip_path, os.path.join(LOCAL_DIR, 'test_files'))\n self.assertIsNotNone(desired_files.get('Audit'))\n self.assertTrue(os.path.exists(\n os.path.join(LOCAL_DIR, 'test_files', desired_files.get('Audit'))\n ))\n\n def test_extract_pdf(self):\n self.assertTrue(os.path.join(LOCAL_DIR, 'test_files/LGA-EM7-CA-Summary.pdf'))\n with open(os.path.join(LOCAL_DIR, 'test_files/LGA-EM7-CA-Summary.pdf'), 'rb') as f:\n extracted = convert_to_text.extract_pdf(f)\n self.assertTrue(extracted)\n\n\nclass TestProcessFlicaZips(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n if not os.path.exists(os.path.join(LOCAL_DIR, 'test_files/LGA-EM7-CA-Summary.pdf')):\n raise FileNotFoundError()\n with open(os.path.join(LOCAL_DIR, 'test_files/LGA-EM7-CA-Summary.pdf'), 'rb') as f:\n cls.extracted = convert_to_text.extract_pdf(f)\n\n def test_pull_data(self):\n run_date = convert_to_text.find_date(self.extracted[0])\n self.assertIsInstance(run_date, datetime.date)\n\n def test_base_seat_aircraft(self):\n bsa = convert_to_text.find_base_seat_ac(self.extracted[0])\n self.assertEqual(bsa, ('LGA', 'CA', 'EM7'))\n\n def test_find_min_threshold_max(self):\n data = convert_to_text.find_min_threshold_max(self.extracted[3])\n self.assertEqual(data.get('Threshold'), '149:00')\n\n def test_find_run_statistics(self):\n data = convert_to_text.find_run_statistics(self.extracted[6], self.extracted[8])\n self.assertEqual(data.get('Crewmembers'), '76')\n\n\nclass HandleEmailAndAttachments(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n with open('attachment_parser/test_files/LGA-EM7-CA-Summary.pdf', 'rb') as f:\n cls.zip_file = f.read()\n\n def setUp(self):\n self.email_data = {\n 'sender': \"joeblogs@gmail.com\",\n 'from': \"joeblogs@gmail.com\",\n 'message-headers': \"fooobar\",\n 'stripped-html': \"fooobar\",\n 'stripped-signature': \"fooobar\",\n 'content-id-map': \"fooobar\",\n 'body-plain': \"fooobar\",\n 'stripped-text': \"fooobar\",\n 'body-html': \"fooobar\",\n 'recipient': \"sall@example.com\",\n 'subject': \"Spam\",\n }\n self.attachments = [self.zip_file]\n\n def test_accept_attachments(self):\n pass\n # self.fail('Finish the test.')\n\n\nclass HomePageTest(TestCase):\n\n def test_root_url_resolves_to_home_page_view(self):\n found = resolve('/')\n self.assertEqual(found.func, home_page)\n\n def test_home_page_returns_correct_html(self):\n request = HttpRequest()\n response = home_page(request)\n self.assertTrue(response.content.startswith(b''))\n self.assertIn(b'Pairing Info.', response.content)\n self.assertTrue(response.content.strip().endswith(b''))\n\n def test_home_page_has_proper_content(self):\n request = HttpRequest()\n response = home_page(request)\n self.assertIn(b'CMH AA JAN 2016', response.content)\n url = b'localhost:8000'\n self.assertIn(b'http://' + url + b'/2016/JAN/AA/CMH', response.content)\n\n def test_misc_data_page(self):\n \"\"\"\n Misc page should go to a placeholder for now.\n\n :return:\n \"\"\"\n found = resolve('/2016/JAN/AA/MISC/')\n self.assertEqual(found.func, misc_page)\n\n def test_misc_page(self):\n request = HttpRequest()\n response = misc_page(request)\n self.assertIn(b'System-wide data.', response.content)\n\n\nclass JSONtest(TestCase):\n\n def test_url_resolves_to_json_view(self):\n found = resolve('/2016/JAN/AA/IND/')\n self.assertEqual(found.func, json_page)\n\n def test_json_page_returns_json_data(self):\n request = HttpRequest()\n response = json_page(request, 'IND')\n self.assertIn(b'IND', response.content)\n\n","sub_path":"myproject/attachment_parser/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"35129554","text":"import socket\n\nIP = '127.0.0.1'\nPUERTO = 8083\n\ncliente = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntry:\n cliente.connect((IP, PUERTO))\n servidor = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"Bienvenido a caliente, caliente\")\n\n c_abierta = True\n while c_abierta:\n\n intro = (input(\":\"))\n intro = str.encode(intro)\n cliente.send(intro)\n msg = (cliente.recv(1000).decode('utf-8'))\n\n if msg == \"Felicidades! Ha acertado el número\":\n print(msg)\n cliente.close()\n else:\n print(\"Vuelva a introducirlo\")\n\n\nexcept KeyboardInterrupt:\n cliente.close()\n print('Cerrando la calculadora...')\n","sub_path":"cliente_caliente.py","file_name":"cliente_caliente.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"272937260","text":"#!/usr/bin/env python\n\nprint ()\nprint (\"Predict the formation energy and band-gap of semicondoctors with chemical formular (Al_x Ga_y In_z)_2N O_3N\")\nprint ()\n\nimport argparse\nparser = argparse.ArgumentParser(description='Predict the formation energy and band-gap of semicondoctors with chemical formular (Al_x Ga_y In_z)_2N O_3N')\n# Arguments supported by the code.\nparser.add_argument(\"--file_train\", default='train.csv', help='training data. Default: train.csv')\nparser.add_argument(\"--test_size\", type=float, default=0.2, help='train-test split: test size. Default: 0.2')\nparser.add_argument(\"--rstate\", type=int, default=42, help='train-test split: random state. Default: 42')\nparser.add_argument(\"--n_est\", type=int, default=100, help='random forest input: n_estimators. Default: 100')\nparser.add_argument(\"--max_depth\", type=int, default=5, help='random forest input: max_depth. Default: 5')\nparser.add_argument(\"--file_crystal\", default=\"POSCAR\", help='file POSCAR format. Default: POSCAR')\n\nargs = parser.parse_args()\nfile_train = args.file_train\ntest_size = args.test_size\nrstate = args.rstate\nn_est = args.n_est\nmax_depth = args.max_depth\nfile_crystal = args.file_crystal\n\nimport utils\n\nimport pandas as pd\n\n# Load the data and rename the columns\n\ndf_data = pd.read_csv(file_train)\ndf_data = df_data.rename(columns={'spacegroup' : 'sg',\n 'number_of_total_atoms' : 'Natoms',\n 'percent_atom_al' : 'x_Al',\n 'percent_atom_ga' : 'x_Ga',\n 'percent_atom_in' : 'x_In',\n 'lattice_vector_1_ang' : 'a',\n 'lattice_vector_2_ang' : 'b',\n 'lattice_vector_3_ang' : 'c',\n 'lattice_angle_alpha_degree' : 'alpha',\n 'lattice_angle_beta_degree' : 'beta',\n 'lattice_angle_gamma_degree' : 'gamma',\n 'formation_energy_ev_natom' : 'E',\n 'bandgap_energy_ev' : 'Eg'})\n\n# adding more features\ndf_data['vol'] = utils.get_vol(df_data['a'], df_data['b'], df_data['c'],\n df_data['alpha'], df_data['beta'], df_data['gamma'])\ndf_data['atomic_density'] = df_data['Natoms'] / df_data['vol'] \n\n# select important feature\nnon_features = ['id', 'E', 'Eg']\nfeatures = [col for col in list(df_data) if col not in non_features]\nprint('%i features used in the ML model %s' % (len(features), features))\nprint()\n\n# Read POSCAR\ndf_input = utils.readPOSCAR(file_crystal)\nprint (\"result after reading POSCAR:\")\nprint (df_input)\n\n# Prepare training data and fit the Random Forest model\nX = df_data[features].values\ny_E = df_data['E'].values\ny_Eg = df_data['Eg'].values\nutils.modelRandomForestRegressor(X, y_E, y_Eg, features, test_size, rstate, n_est, max_depth, df_input)\n\n\n","sub_path":"others/Nomad2018/Nomad2018_v3/band_gap.py","file_name":"band_gap.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"645850252","text":"class Solution():\n \"\"\"A class to hold solutions and their scores.\"\"\"\n\n def __init__(self):\n self.square_words = []\n self.values = []\n self.total_score = 0\n self.average_score = 0\n self.lowest_score = 0\n\n def get_scores(self, square_words, word_value):\n self.square_words = square_words.copy()\n for word in self.square_words:\n try:\n value = word_value[word]\n except KeyError:\n value = 1\n finally:\n self.values.append(value)\n self.total_score = sum(self.values)\n self.average_score = self.total_score / len(self.square_words)\n self.lowest_score = self.values[1] #don't check seed word value\n for x in self.values:\n if x < self.lowest_score:\n self.lowest_score = x","sub_path":"solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"121951923","text":"import pandas as pd\n\npd.set_option(\"max_columns\", 8)\n\nrestaurants = pd.read_csv(\"groningenRestaurants.csv\")\nrankings = pd.read_excel(\"rankingsRaw.xlsx\")\n\nrankings['restaurant'] = rankings['name']\ndel(rankings['name'])\n\nrestaurant_rankings = restaurants.merge(rankings, how=\"inner\", on=\"restaurant\")\n\n# print(rankings.head(5))\n# print(restaurants.head(5))\nprint(restaurant_rankings.head(5))\n","sub_path":"week5/session/rankings.py","file_name":"rankings.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"207192852","text":"import mysql.connector\n\nmydb = mysql.connector.connect(user = 'root',\n passwd = 'root123',\n host = 'localhost',\n auth_plugin = 'mysql_native_password',\n database = 'BankDB')\n#Created Database BankDB\n#mycursor.execute('create databese BankDB')\nmycursor = mydb.cursor()\n\nb = 5\n#Function to display the menu\ndef Menu():\n print(\"Main Menu\".rjust(b, \" \"))\n print(\"1. Insert Record/Records\".rjust(b, \" \"))\n print(\"2. Display Records as per Account Number\".rjust(b, \" \"))\n print(\" a. Sorted as per Account Number\".rjust(b, \" \"))\n print(\" b. Sorted as per Customer Balance\".rjust(b, \" \"))\n print(\"3. Update Record\".rjust(b, \" \"))\n print(\"4. Delete Record\".rjust(b, \" \"))\n print(\"5. Transactions Debt/Withdraw from the account\".rjust(b, \" \"))\n print(\" a. Debit/Withdraw from the account\".rjust(b, \" \"))\n print(\" b. Credit into the the account\".rjust(b, \" \"))\n print(\"6. Exit\".rjust(b, \" \"))\n\ndef MenuSort():\n print(\" a. Sorted as per Account Number\".rjust(b, \" \"))\n print(\" b. Sorted as per Customer Name\".rjust(b, \" \"))\n print(\" c. Sorted as per Customer Balance\".rjust(b, \" \"))\n print(\" d. Back\".rjust(b, \" \"))\n\ndef MenuTransaction():\n print(\" a. Debit/Withdraw from the account\".rjust(b, \" \"))\n print(\" b. Credit into the account\".rjust(b, \" \"))\n print(\" c. Back\".rjust(b, \" \"))\n\ndef Create():\n try:\n mycursor.execute(\"create table bank(ACCNO varchar(10), NAME varchar(20), Mobile varchar(10), Email varchar(20), ADDRESS varchar(20)\")\n print(\"Table Created\")\n Insert()\n except:\n print(\"Table Exit\")\n Insert()\n\ndef Insert():\n #Loop for accepting records\n while True:\n Acc = input(\"Enter Account No.\")\n Name = input(\"Enter Name\")\n Mob = input(\"Enter Mobile No.\")\n email = input(\"Enter Email\")\n Add = input(\"Enter Address\")\n City = input(\"Enter City\")\n Country = input(\"Enter Country\")\n Bal = float(input(\"Enter Balance\"))\n Rec = [Acc, Name.upper(), Mob, email.upper(), Add.upper(), City.upper(), Country.upper(), Bal]\n Cmd = \"insert into BANK values(%s,%s,%s,%s,%s,%s,%s,%s)\"\n mycursor.execute(Cmd, Rec)\n mydb.commit()\n ch = input(\"Do you want to enter more records\")\n if ch == 'N' or ch == 'n':\n break\n#Function to Display records as per ascending order of Account Number\ndef DispSortAcc():\n try:\n cmd = \"select * from BANK order by ACCINO\"\n mycursor.execute(cmd)\n F = \"%15s %15s %15s %15s %15s %15s %15s %15s\"\n print(F%(\"ACCNO\", \"NAME\", \"MOBILE\", \"EMAIL ADDRESS\", \"COMPLETE ADDRESS\", \"CITY\", \"COUNTRY\", \"BALANCE\"))\n print()\n for i in mycursor:\n for j in i:\n print(\"%14s\" %j, end=\" \")\n print()\n print()\n except:\n print(\"Table doesn't exist\")\n\ndef DispSortBal():\n try:\n cmd = \"select * from BANK\"\n mycursor.execute(cmd)\n ch = input(\"Enter the account no to be searched\")\n for i in mycursor:\n if i[0] == ch:\n print()\n F = \"%15s %15s %15s %15s %15s %15s %15s %15s\"\n print(F % (\"ACCNO\", \"NAME\", \"MOBILE\", \"EMAIL ADDRESS\", \"COMPLETE ADDRESS\", \"CITY\", \"COUNTRY\", \"BALANCE\"))\n print()\n for j in i:\n print(\"%14s\" %j,end=\" \")\n print()\n break\n else:\n print(\"Record Not Found\")\n except:\n print(\"Table doesn't exist\")\n\n#Function to change the details of a customer\ndef Update():\n try:\n cmd = \"select * from BANK\"\n mycursor.execute(cmd)\n A = input(\"Enter the account no whose details to be changed\")\n for i in mycursor:\n i = list(i)\n if i[0] == A:\n ch = input(\"Change Name(Y/N)\")\n if ch == 'Y' or ch == 'y':\n i[1] = input(\"Enter Name\")\n i[1] = i[1].upper()\n\n ch = input(\"Change Mobile(Y/N)\")\n if ch == 'Y' or ch == 'y':\n i[2] = input(\"Enter Mobile\")\n\n ch = input(\"Change Email(Y/N)\")\n if ch == 'Y' or ch == 'y':\n i[3] = input(\"Enter email\")\n i[3] = i[3].upper()\n\n ch = input(\"Change Address(Y/N)\")\n if ch == 'Y' or ch == 'y':\n i[4] = input(\"Enter Address\")\n i[4] = i[4].upper()\n\n ch = input(\"Change City(Y/N)\")\n if ch == 'Y' or ch == 'y':\n i[5] = input(\"Enter City\")\n i[5] = i[5].upper()\n\n ch = input(\"Change Country(Y/N)\")\n if ch == 'Y' or ch == 'y':\n i[6] = input(\"Enter Country\")\n i[6] = i[6].upper()\n\n ch = input(\"Change Balance(Y/N)\")\n if ch == 'Y' or ch == 'y':\n i[7] = float(input(\"Enter Balance\"))\n cmd = \"UPDATE BANK SET NAME = %s, MOBILE = %s, EMAIL = %s, ADDRESS = %s, CITY = %s, COUNTRY = %s, BALANCE = %s WHERE ACCNO = %s\"\n val = (i[1], i[2], i[3], i[4], i[5], i[6], i[7], i[0])\n mycursor.execute(cmd, val)\n mydb.commit()\n print(\"Account Updated\")\n break\n else:\n print(\"Record not found\")\n except:\n print(\"No such table\")\n\n#function to delete the details of the customer\ndef Delete():\n try:\n cmd = \"select * from BANK\"\n mycursor.execute(cmd)\n A = input(\"Enter the account no whose details to be changed\")\n for i in mycursor:\n i = list(i)\n if i[0]==A:\n cmd = \"delete from bank where accno = %s\"\n val = (i[0])\n mycursor.execute(cmd, val)\n mydb.commit()\n print(\"Account Deleted\")\n break\n else:\n print(\"Record not found\")\n except:\n print(\"No such Table\")\n\n#function to Withdraw the amount by assuring the min balance of Rs 5000\ndef Debit():\n try:\n cmd = \"select * from BANK\"\n mycursor.execute(cmd)\n print(\"Please Note that the money can only be debited if min balance of Rs 5000 exists\")\n acc = input(\"Enter the account np from which the money is to be debited\")\n for i in mycursor:\n i = list(i)\n if i[0] == acc:\n Amt = float(input(\"Enter the amount to be withdraw\"))\n if i[7]-Amt>=5000:\n i[7] -= Amt\n cmd = \"UPDATE BANK SET BALANCE = %s WHERE ACCNO = %s\"\n val = (i[7], i[0])\n mycursor.execute(cmd, val)\n mydb.commit()\n print(\"Amount Debited\")\n break\n else:\n print(\"There must be min balance of Rs 5000\")\n break\n else:\n print(\"Record not Found\")\n except:\n print(\"Table doesn't exist\")\n\n#function to Withdraw the amount by assuring the min balance of Rs 5000\ndef Credit():\n try:\n cmd = \"select * from BANK\"\n mycursor.execute(cmd)\n S = mycursor.fetchall()\n acc = input(\"Enter the account np from which the money is to be credited\")\n for i in S:\n i = list(i)\n if i[0] == acc:\n Amt = float(input(\"Enter the amount to be credited\"))\n i[7]+=Amt\n cmd = \"UPDATE BANK SET BALANCE = %s WHERE ACCNO = %s\"\n val = (i[7], i[0])\n mycursor.execute(cmd, val)\n mydb.commit()\n print(\"Amount Credited\")\n break\n else:\n print(\"Record not found\")\n except:\n print(\"Table doesn't exist\")\nwhile True:\n Menu()\n ch = input(\"Enter your choice\")\n if ch == \"1\":\n Create()\n elif ch == \"2\":\n while True:\n MenuSort()\n ch1 = input(\"Enter choie a/b/c/d\")\n if ch1 in ['a', 'A']:\n DispSortAcc()\n elif ch1 in ['b', 'B']:\n DispSortBal()\n elif ch1 in ['c', 'C']:\n print(\"Back to main menu\")\n break\n else:\n print(\"Invalid choice\")\n elif ch == \"3\":\n Update()\n elif ch == \"4\":\n Delete()\n elif ch == \"5\":\n while True:\n MenuTransaction()\n ch1 = input(\"Enter choice a/b/c\")\n if ch1 in ['a', 'A']:\n Debit()\n elif ch1 in ['b', 'B']:\n Credit()\n elif ch1 in ['c', 'C']:\n print(\"Back to the main Menu\")\n break\n else:\n print(\"Invalid Choice\")\n elif ch == \"6\":\n print(\"Exiting...\")\n else:\n print(\"Wrong Choice Entered\")\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"645249538","text":"try:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nconfig = {\n 'description': 'Scrape the web for text documents',\n 'author': 'David Roddick',\n 'author_email': 'dgroddick@gmail.com',\n 'url': 'https://github.com/dgroddick/scraper.git',\n 'version': '0.1',\n 'install_requires': ['nose', 'MySQL-python'],\n 'packages': ['scrape'],\n 'name': 'Scraper'\n}\n\nsetup(**config)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"628296844","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\nimport re\nimport os.path\nimport random\nclass Crawler:\n def __init__(self,page):\n self.current_target_index = 0\n self.currentUrl = page\n self.jsonFile = os.path.join( \"data\",self.currentUrl.split(\"/\")[2]+str(random.randint(10,1000))+\".json\")\n \n self.currentPageContent = None\n\n self.__newslist = {}\n self.__newslist[\"news\"] = []\n \n self.httpResponse = requests.get(self.currentUrl)\n self.bs = BeautifulSoup(self.httpResponse.text)\n self.TARGETFILE = \"targets\"\n self.HISTORYFILENAME = \"already_visited\"\n #self.raw_json = self.httpResponse.json()\n try:\n print(\"opening json: \")\n with open(self.jsonFile,\"r+\") as f:\n print(\"reading json..\")\n self.data = json.load(f)\n print(self.data)\n f.close()\n except:\n print(\"json not found, making from scratch!\")\n self.data = [{\n \"links\" : [],\n \"articles\": [],\n \"emails\": [],\n \"titles\": [],\n \"headers\": [],\n }]\n with open(self.jsonFile,\"w+\") as f:\n print(\"saving json..\")\n json.dump(self.data,f)\n f.close()\n \n \n\n\n def saveJson(self,data):\n with open(self.jsonFile,\"w+\") as f:\n print(\"saving json..\")\n self.data = data\n json.dump(self.data,f)\n f.close()\n \n\n\n \n def getLinks(self):\n \n for a in self.bs.find_all(\"a\" ,href=True):\n with open(\"targets\",\"a+\") as f:\n link = a[\"href\"]\n if link.startswith(\"htt\"):\n self.data[0][\"links\"].append(link)\n f.write(link+\"\\n\")\n self.saveJson(self.data)\n\n \n \n \n def getArticles(self):\n for article in self.bs.find_all(\"article\"):\n self.data[0][\"articles\"].append(article.text)\n self.saveJson(self.data)\n\n\n def __extractEmail(self,st):\n regex = r'([\\w0-9._-]+@[\\w0-9._-]+\\.[\\w0-9_-]+)'\n return re.findall(regex, st, re.M|re.I)\n \n def getEmailAddresses(self):\n result = []\n for line in self.bs.find_all():\n \n extracted = self.__extractEmail(line.text)\n print(extracted)\n if len(extracted) > 0:\n result.append(extracted)\n\n if len(result) > 0:\n self.data[0][\"emails\"].append(result)\n self.saveJson(self.data)\n else:\n print(self.currentUrl+\" No email founds on this site.\")\n\n\n def history_addCurrentURL(self):\n with open(self.HISTORYFILENAME,\"a+\") as f:\n f.write(self.currentUrl+\"\\n\")\n\n def history_getHistory(self,url):\n f = open(self.HISTORYFILENAME,\"r+\")\n for line in f.readlines():\n if line == url:\n return True\n else:\n return False\n\n def chose_target(self):\n with open(self.TARGETFILE,\"r+\") as f:\n lines = f.readlines()\n self.currentUrl = lines[self.current_target_index]\n self.httpResponse = requests.get(self.currentUrl)\n self.bs = BeautifulSoup(self.httpResponse.text)\n self.jsonFile = os.path.join( \"data\",self.currentUrl.split(\"/\")[2]+str(random.randint(10,1000))+\".json\")\n f.close()\n\n def crawl(self,depth):\n active = True\n\n while active:\n #main loop!\n self.current_target_index += 1\n print(\"===CURRENT URL: (\"+self.currentUrl+\")=====\")\n #modos operandi:\n #\n \n print(\"Getting articles...\")\n self.getArticles()\n print(\"getting emails...\")\n self.getEmailAddresses()\n print(\"Getting links\")\n self.getLinks()\n print(\"done. switching website\")\n s\n self.chose_target()\n\n\n#testiing\ns = Crawler(\"https://www.estbarreiro.ips.pt/\")\ns.crawl(3)\n\n\n\n","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"20851321","text":"# utils.py\n\nimport torch\nfrom torchtext import data\nfrom torchtext.vocab import Vectors\nimport spacy\nimport joblib\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\n\nclass Dataset(object):\n def __init__(self, config):\n self.config = config\n self.train_iterator = None\n self.test_iterator = None\n self.val_iterator = None\n self.vocab = []\n self.word_embeddings = {}\n \n def parse_label(self, label):\n '''\n Get the actual labels from label string\n Input:\n label (string) : labels of the form '__label__2'\n Returns:\n label (int) : integer value corresponding to label string\n '''\n return int(label.strip()[-1])\n\n def get_pandas_df(self, filename):\n '''\n Load the data into Pandas.DataFrame object\n This will be used to convert data to torchtext object\n '''\n with open(filename, 'r') as datafile: \n data = [line.strip().split(',', maxsplit=1) for line in datafile]\n data_text = list(map(lambda x: x[1], data))\n data_label = list(map(lambda x: self.parse_label(x[0]), data))\n\n full_df = pd.DataFrame({\"text\":data_text, \"label\":data_label})\n return full_df\n\n def get_my_pandas_df(self, filename, context_flag):\n \"\"\"\n Load data from pkl file\n :param filename:\n :param context_flag: # 0: bairly include pairs\n # 1: include pairs and local context\n # 2: include pairs and global context\n # 3: include pairs, local context and global context\n :return:\n \"\"\"\n pairs = joblib.load(filename)\n if context_flag == 0:\n data_pairs = [row[0] + row[1] for row in pairs]\n elif context_flag == 1:\n data_pairs = [row[0] + row[1] + row[3] for row in pairs]\n elif context_flag == 2:\n data_pairs = [row[0] + row[1] + row[4] for row in pairs]\n elif context_flag == 3:\n data_pairs = [row[0] + row[1] + row[3] + row[4] for row in pairs]\n data_label = [row[2] for row in pairs]\n train_text, val_text, train_label, val_label = train_test_split(data_pairs, data_label,\n test_size=0.20,\n shuffle=True)\n train_text, test_text, train_label, test_label = train_test_split(train_text, train_label,\n test_size=0.25,\n shuffle=True)\n train_df = pd.DataFrame({\"text\": train_text, \"label\": train_label})\n test_df = pd.DataFrame({\"text\": test_text, \"label\": test_label})\n val_df = pd.DataFrame({\"text\": val_text, \"label\": val_label})\n return train_df, test_df, val_df\n \n def load_data(self, w2v_file, train_file, test_file, val_file=None):\n '''\n Loads the data from files\n Sets up iterators for training, validation and test data\n Also create vocabulary and word embeddings based on the data\n \n Inputs:\n w2v_file (String): absolute path to file containing word embeddings (GloVe/Word2Vec)\n train_file (String): absolute path to training file\n test_file (String): absolute path to test file\n val_file (String): absolute path to validation file\n '''\n\n NLP = spacy.load('en')\n tokenizer = lambda sent: [x.text for x in NLP.tokenizer(sent) if x.text != \" \"]\n \n # Creating Field for data\n TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True, fix_length=self.config.max_sen_len)\n LABEL = data.Field(sequential=False, use_vocab=False)\n datafields = [(\"text\",TEXT),(\"label\",LABEL)]\n \n # Load data from pd.DataFrame into torchtext.data.Dataset\n train_df = self.get_pandas_df(train_file)\n train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]\n train_data = data.Dataset(train_examples, datafields)\n \n test_df = self.get_pandas_df(test_file)\n test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]\n test_data = data.Dataset(test_examples, datafields)\n \n # If validation file exists, load it. Otherwise get validation data from training data\n if val_file:\n val_df = self.get_pandas_df(val_file)\n val_examples = [data.Example.fromlist(i, datafields) for i in val_df.values.tolist()]\n val_data = data.Dataset(val_examples, datafields)\n else:\n train_data, val_data = train_data.split(split_ratio=0.8)\n \n TEXT.build_vocab(train_data, vectors=Vectors(w2v_file))\n self.word_embeddings = TEXT.vocab.vectors\n self.vocab = TEXT.vocab\n \n self.train_iterator = data.BucketIterator(\n (train_data),\n batch_size=self.config.batch_size,\n sort_key=lambda x: len(x.text),\n repeat=False,\n shuffle=True)\n \n self.val_iterator, self.test_iterator = data.BucketIterator.splits(\n (val_data, test_data),\n batch_size=self.config.batch_size,\n sort_key=lambda x: len(x.text),\n repeat=False,\n shuffle=False)\n \n print (\"Loaded {} training examples\".format(len(train_data)))\n print (\"Loaded {} test examples\".format(len(test_data)))\n print (\"Loaded {} validation examples\".format(len(val_data)))\n\n def load_my_data(self, word_embedding_pkl, pairs_pkl, context_flag=0):\n \"\"\"\n Loads the data from file\n :param word_embedding_pkl: absolute path to word_embeddings {Glove/Word2Vec}\n :param pairs_pkl: # pkl file save data\n :param context_flag: # 0: bairly include pairs\n # 1: include pairs and local context\n # 2: include pairs and global context\n # 3: include pairs, local context and global context\n :return:\n \"\"\"\n tokenizer = lambda text: [x for x in text]\n\n TEXT = data.Field(sequential=True, tokenize=tokenizer, fix_length=self.config.max_sen_len)\n LABEL = data.Field(sequential=False, use_vocab=False)\n datafields = [(\"text\", TEXT), (\"label\", LABEL)]\n\n # Load data from pd.DataFrame into torchtext.data.Dataset\n train_df, test_df, val_df = self.get_my_pandas_df(pairs_pkl, context_flag)\n\n train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]\n train_data = data.Dataset(train_examples, datafields)\n\n test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]\n test_data = data.Dataset(test_examples, datafields)\n\n val_examples = [data.Example.fromlist(i, datafields) for i in val_df.values.tolist()]\n val_data = data.Dataset(val_examples, datafields)\n\n TEXT.build_vocab(train_data, vectors=Vectors(name=word_embedding_pkl))\n self.word_embeddings = TEXT.vocab.vectors\n self.vocab = TEXT.vocab\n\n self.train_iterator = data.BucketIterator(\n train_data,\n batch_size=self.config.batch_size,\n sort_key=lambda x: len(x.text),\n repeat=False,\n shuffle=True)\n\n self.val_iterator, self.test_iterator = data.BucketIterator.splits(\n (val_data, test_data),\n batch_size=self.config.batch_size,\n sort_key=lambda x: len(x.text),\n repeat=False,\n shuffle=True)\n\n print('Loaded %d training example' % len(train_data))\n print('Loaded %d test example ' % len(test_data))\n print('Loaded %d validation examples' % len(val_data))\n\n\ndef evaluate_model(model, iterator):\n all_preds = []\n all_y = []\n for idx,batch in enumerate(iterator):\n if torch.cuda.is_available():\n x = batch.text.cuda()\n else:\n x = batch.text\n y_pred = model(x)\n predicted = torch.max(y_pred.cpu().data, 1)[1]\n all_preds.extend(predicted.numpy())\n all_y.extend(batch.label.numpy())\n score = accuracy_score(all_y, np.array(all_preds).flatten())\n return score","sub_path":"CNN/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"170124007","text":"import sys\nfrom PyQt5 import QtWidgets\nimport XRRRedGUI\nimport pyqtgraph as pg\nfrom ReflectometryData import ReflectometryData\nfrom ReflectometryBundle import ReflectometryBundle\nimport numpy as np\nfrom CustomViewBox import CustomViewBox\n\nclass XRRRedGUI(QtWidgets.QMainWindow, XRRRedGUI.Ui_MainWindow, object):\n\n\tdef __init__(self):\n\t\tQtWidgets.QMainWindow.__init__(self)\n\t\tself.setupUi(self)\n\t\tself.connectButtons()\n\n\t\tself.dataBundle = ReflectometryBundle()\n\t\tself.initializePlot()\n\t\tself.refreshPlot()\n\n\t\treturn\n\n\tdef connectButtons(self):\n\n\t\tself.loadSpecButton.clicked.connect(self.loadSpecButtonClicked)\n\t\tself.loadBackButton.clicked.connect(self.loadBackButtonClicked)\n\t\tself.loadSlitButton.clicked.connect(self.loadSlitButtonClicked)\n\t\tself.footprintCalcGuess.clicked.connect(self.footprintCalcGuessClicked)\n\t\tself.footprintCalcFromGraph.clicked.connect(self.footprintCalcFromGraphClicked)\n\t\tself.footprintRangeFull.clicked.connect(self.footprintRangeFullClicked)\n\t\tself.footprintRangeFromGraph.clicked.connect(self.footprintRangeFromGraphClicked)\n\t\tself.footprintApply.clicked.connect(self.footprintApplyClicked)\n\t\tself.combineScansButton.clicked.connect(self.combineScansButtonClicked)\n\n\t\tself.yAxisLog.toggled.connect(self.yAxisLogToggled)\n\t\tself.xAxisQ.toggled.connect(self.xAxisQToggled)\n\n\t\tself.deleteBackButton.clicked.connect(self.deleteBackButtonClicked)\n\t\tself.deleteSpecButton.clicked.connect(self.deleteSpecButtonClicked)\n\t\tself.deleteSlitButton.clicked.connect(self.deleteSlitButtonClicked)\n\n\t\treturn\n\n\tdef deleteSpecButtonClicked(self):\n\t\tfor item in self.specWidget.selectedItems():\n\t\t\tindex = self.specWidget.row(item)\n\t\t\tself.specWidget.takeItem(index)\n\t\t\tself.dataBundle.deleteSpec(index)\n\t\tself.plotDataBundle()\n\t\treturn\n\n\tdef deleteBackButtonClicked(self):\n\t\tfor item in self.backWidget.selectedItems():\n\t\t\tindex = self.backWidget.row(item)\n\t\t\tself.backWidget.takeItem(index)\n\t\t\tself.dataBundle.deleteBack(index)\n\t\tself.plotDataBundle()\n\t\treturn\n\n\tdef deleteSlitButtonClicked(self):\n\t\tfor item in self.slitWidget.selectedItems():\n\t\t\tindex = self.slitWidget.row(item)\n\t\t\tself.slitWidget.takeItem(index)\n\t\t\tself.dataBundle.deleteSlit(index)\n\t\tself.plotDataBundle()\n\t\treturn\n\n\tdef yAxisLogToggled(self, b):\n\t\tself.plot.getPlotItem().setLogMode(y=b)\n\t\treturn\n\n\tdef xAxisQToggled(self, b):\n\t\tself.plotDataBundle()\n\t\treturn\n\n\tdef plotDataBundle(self):\n\t\tself.plot.clear()\n\n\n\t\tif self.dataBundle.isProcessed():\n\t\t\tif self.xAxisQ.isChecked():\n\t\t\t\tself.plot.addItem(pg.PlotDataItem(x=self.dataBundle.getProcessed().getQ(), y=self.dataBundle.getProcessed().getIntensity(), symbol='t', pen=None,symbolPen=None, symbolSize=10, symbolBrush=(255, 100, 100, 100), name=\"Data\"))\n\t\t\telse:\n\t\t\t\tself.plot.addItem(pg.PlotDataItem(x=self.dataBundle.getProcessed().getTwoTheta(), y=self.dataBundle.getProcessed().getIntensity(), symbol='t', pen=None,symbolPen=None, symbolSize=10, symbolBrush=(255, 100, 100, 100), name=\"Data\"))\n\t\telse:\n\t\t\tif self.xAxisQ.isChecked():\n\t\t\t\tfor specScan in self.dataBundle.getSpecScans():\n\t\t\t\t\tself.plot.addItem(pg.PlotDataItem(x=specScan.getQ(), y=specScan.getIntensity(), symbol='t', pen=None, symbolPen=None,symbolSize=10, symbolBrush=(100, 100, 255, 100)))\n\t\t\telse:\n\t\t\t\tfor specScan in self.dataBundle.getSpecScans():\n\t\t\t\t\tself.plot.addItem(pg.PlotDataItem(x=specScan.getTwoTheta(), y=specScan.getIntensity(), symbol='t', pen=None, symbolPen=None,symbolSize=10, symbolBrush=(100, 100, 255, 100)))\n\n\t\tself.plot.autoRange()\n\t\tself.refreshPlot()\n\t\treturn\n\n\tdef plotFootprintCorrectionCurve(self):\n\t\tif self.footprintRangeMax.value() - self.footprintRangeMin.value() == 0:\n\t\t\treturn\n\t\tx = np.linspace(self.footprintRangeMin.value(), self.footprintRangeMax.value(), 1000)\n\t\ty = self.footprintSlope.value()*x + self.footprintIntercept.value()\n\t\tself.plot.addItem(pg.PlotDataItem(x=x, y=y, name=\"Footprint Correction\"))\n\t\tself.plot.autoRange()\n\t\treturn\n\n\tdef loadSpecButtonClicked(self):\n\t\tfilenames = QtWidgets.QFileDialog.getOpenFileNames(parent=self, caption=\"Select data files: \")[0]\n\t\tself.dataBundle.addSpecScans(filenames)\n\t\tself.specWidget.clear()\n\t\tself.specWidget.addItems(filenames)\n\t\tself.plotDataBundle()\n\t\treturn\n\n\tdef loadBackButtonClicked(self):\n\t\tfilenames = QtWidgets.QFileDialog.getOpenFileNames(parent=self, caption=\"Select data files: \")[0]\n\t\tself.dataBundle.addBackScans(filenames)\n\t\tself.backWidget.clear()\n\t\tself.backWidget.addItems(filenames)\n\t\tself.plotDataBundle()\n\t\treturn\n\n\tdef loadSlitButtonClicked(self):\n\t\tfilenames = QtWidgets.QFileDialog.getOpenFileNames(parent=self, caption=\"Select data files: \")[0]\n\t\tself.dataBundle.addSlitScans(filenames)\n\t\tself.slitWidget.clear()\n\t\tself.slitWidget.addItems(filenames)\n\t\tself.plotDataBundle()\n\t\treturn\n\n\tdef footprintCalcGuessClicked(self):\n\n\t\ta, b = self.dataBundle.guessFootprintCorrection()\n\t\tself.footprintSlope.setValue(a)\n\t\tself.footprintIntercept.setValue(b)\n\t\tself.plotDataBundle()\n\t\tself.plotFootprintCorrectionCurve()\n\t\treturn\n\n\tdef footprintCalcFromGraphClicked(self):\n\t\tself.plotDataBundle()\n\t\tself.plotFootprintCorrectionCurve()\n\t\tself.plot.autoRange()\n\t\treturn\n\n\tdef footprintRangeFullClicked(self):\n\t\tself.plotDataBundle()\n\t\tself.plotFootprintCorrectionCurve()\n\t\tself.plot.autoRange()\n\t\treturn\n\n\tdef footprintRangeFromGraphClicked(self):\n\t\tself.plotDataBundle()\n\t\tself.plotFootprintCorrectionCurve()\n\t\tself.plot.autoRange()\n\t\treturn\n\n\tdef footprintApplyClicked(self):\n\t\tself.dataBundle.footprintCorrection(minQ=self.footprintRangeMin.value(), maxQ=self.footprintRangeMax.value(), slope=self.footprintSlope.value(), intercept=self.footprintIntercept.value())\n\t\tself.plotDataBundle()\n\n\t\tself.footprintRangeMax.setEnabled(False)\n\t\tself.footprintRangeMin.setEnabled(False)\n\t\tself.footprintIntercept.setEnabled(False)\n\t\tself.footprintSlope.setEnabled(False)\n\t\tself.footprintApply.setEnabled(False)\n\t\tself.footprintRangeFromGraph.setEnabled(False)\n\t\tself.footprintRangeFull.setEnabled(False)\n\t\tself.footprintCalcMax.setEnabled(False)\n\t\tself.footprintCalcMin.setEnabled(False)\n\t\tself.footprintCalcGuess.setEnabled(False)\n\t\tself.footprintCalcFromGraph.setEnabled(False)\n\t\treturn\n\n\tdef refreshPlot(self):\n\n\t\tself.plot.getPlotItem().setLabel(\"left\", text=\"Intensity\")\n\n\t\tif self.yAxisLog.isChecked():\n\t\t\tself.plot.getPlotItem().setLogMode(y=True)\n\t\telse:\n\t\t\tself.plot.getPlotItem().setLogMode(y=False)\n\n\t\tif self.xAxisQ.isChecked():\n\t\t\tself.plot.getPlotItem().setLabel(\"bottom\", text=\"Q (1/Å)\")\n\t\telse:\n\t\t\tself.plot.getPlotItem().setLabel(\"bottom\", text=\"2Theta (°)\")\n\n\t\tself.plot.repaint()\n\n\t\treturn\n\n\tdef initializePlot(self):\n\t\tvb = CustomViewBox()\n\t\tself.plot = pg.PlotWidget(parent=self, viewbox=vb, title=\"Reflectivity\")\n\t\tself.plotLayout.addWidget(self.plot)\n\t\treturn\n\n\tdef combineScansButtonClicked(self):\n\t\ttry:\n\t\t\tself.dataBundle.combineScans()\n\t\t\tself.plotDataBundle()\n\t\t\tself.footprintRangeMin.setValue(self.dataBundle.getProcessed().getMinQ())\n\t\t\tself.footprintRangeMax.setValue(self.dataBundle.getProcessed().getMaxQ())\n\n\t\t\tself.loadSlitButton.setEnabled(False)\n\t\t\tself.loadBackButton.setEnabled(False)\n\t\t\tself.loadSpecButton.setEnabled(False)\n\t\t\tself.deleteSlitButton.setEnabled(False)\n\t\t\tself.deleteBackButton.setEnabled(False)\n\t\t\tself.deleteBackButton.setEnabled(False)\n\t\t\tself.combineScansButton.setEnabled(False)\n\t\t\tself.specWidget.setEnabled(False)\n\t\t\tself.backWidget.setEnabled(False)\n\t\t\tself.slitWidget.setEnabled(False)\n\t\texcept ValueError as e:\n\t\t\tself.msg(str(e))\n\t\treturn\n\n\tdef msg(self, string):\n\t\tself.statusBar().showMessage(string, msecs=2000)\n\t\treturn\n\nif __name__ == \"__main__\":\n\tapp = QtWidgets.QApplication(sys.argv)\n\twin = XRRRedGUI()\n\twin.setWindowTitle(\"XRRRed\")\n\twin.show()\n\tsys.exit(app.exec_())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"271971481","text":"### modified from https://stackoverflow.com/questions/22886699/how-to-remove-all-n-sequence-entries-from-fasta-files\n\nimport sys\nimport Bio\nfrom Bio import SeqIO\n\nINPUT = sys.argv[1] # Input fasta file\nOUTPUT = sys.argv[2] # output fasta file\n\ndef main():\n records = Bio.SeqIO.parse(INPUT, 'fasta')\n filtered = (rec for rec in records if any(ch != '-' for ch in rec.seq))\n Bio.SeqIO.write(filtered, OUTPUT, 'fasta')\n\nif __name__==\"__main__\":\n main()\n\n\n\n","sub_path":"GIT/25_remove_all_gap_seq.py","file_name":"25_remove_all_gap_seq.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"271841905","text":"### TD01 le tri\n### algorithme de tri\n\nfrom tris import *\nimport random as rd\nimport time as t\nimport matplotlib.pyplot as plt\nimport sys\nsys.setrecursionlimit(1000000)\n\n\n### lecture du fichier csv\n\nf=open('films_martiniere.csv','r')\nligne1=f.readline()\nfichier=f.readlines()\nf.close()\n\nL=[]\nfor ligne in fichier:\n ligne=ligne.replace('\"','')\n ligne=ligne.split(';')\n ligne[-1]=ligne[-1].rstrip('\\n')\n ligne[-1]=int(ligne[-1])\n ligne[1]=int(ligne[1])\n L.append(ligne)\n \n\n\n\n\n\n","sub_path":"Exercices/S1_08_Tris/05_TriFilms/fichiers_utiles/fichiers_utiles/lecture_fichier_csv.py","file_name":"lecture_fichier_csv.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"544929800","text":"import os\nimport numpy as np\n\nfrom pyFAI.detectors import ALL_DETECTORS\n\nfrom nexusformat.nexus import *\nfrom nexpy.gui.datadialogs import NXDialog, GridParameters\nfrom nexpy.gui.utils import report_error\n\n\ndef show_dialog():\n try:\n dialog = ExperimentDialog()\n dialog.show()\n except NeXusError as error:\n report_error(\"Defining New Experiment\", error)\n\n\nclass ExperimentDialog(NXDialog):\n\n def __init__(self, parent=None):\n super(ExperimentDialog, self).__init__(parent)\n\n self.experiment_file = NXroot()\n self.experiment_file['entry'] = NXentry()\n\n self.detectors = {}\n self.entries = {}\n\n self.setup_instrument()\n\n self.set_layout(self.directorybox('Choose Experiment Directory', default=False), \n self.instrument.grid(header=False))\n self.set_title('New Experiment')\n\n def setup_instrument(self):\n entry = self.experiment_file['entry']\n entry.instrument = NXinstrument()\n entry.instrument.monochromator = NXmonochromator()\n entry.instrument.detector = NXdetector()\n entry['instrument/monochromator/wavelength'] = NXfield(0.5, dtype=np.float32)\n entry['instrument/monochromator/wavelength'].attrs['units'] = 'Angstroms'\n entry['instrument/monochromator/energy'] = NXfield(12.398419739640717/0.5, dtype=np.float32)\n entry['instrument/monochromator/energy'].attrs['units'] = 'keV'\n entry['instrument/detector/distance'] = NXfield(100.0, dtype=np.float32)\n entry['instrument/detector/distance'].attrs['units'] = 'mm'\n self.instrument = GridParameters()\n self.instrument.add('experiment', 'experiment', 'Experiment Name')\n self.instrument.add('wavelength', entry['instrument/monochromator/wavelength'], 'Wavelength (Ang)')\n self.instrument.add('distance', entry['instrument/detector/distance'], 'Detector Distance (mm)')\n detector_list = sorted(list(set([detector().name for detector in ALL_DETECTORS.values()])))\n self.instrument.add('detector', detector_list, 'Detector')\n self.instrument['detector'].value = 'Pilatus CdTe 2M'\n self.instrument.add('positions', [0,1,2,3,4], 'Number of Detector Positions', slot=self.set_entries)\n self.instrument['positions'].value = '0'\n\n def setup_entry(self, position):\n entry = NXentry()\n self.detectors[position] = GridParameters()\n self.detectors[position].add('x', 0.0, 'Translation - x (mm)')\n self.detectors[position].add('y', 0.0, 'Translation - y (mm)')\n self.experiment_file['f%s' % position] = entry\n\n def get_detector(self):\n for detector in ALL_DETECTORS:\n if ALL_DETECTORS[detector]().name == self.instrument['detector'].value:\n return ALL_DETECTORS[detector]()\n\n @property\n def positions(self):\n return int(self.instrument['positions'].value)\n \n def set_entries(self):\n for position in range(1,self.positions+1):\n self.setup_entry(position)\n self.layout.addLayout(self.detectors[position].grid(header=False, title='Position %s'%position))\n self.layout.addWidget(self.close_buttons(save=True))\n\n def get_parameters(self):\n entry = self.experiment_file['entry']\n entry['instrument/monochromator/wavelength'] = self.instrument['wavelength'].value\n entry['instrument/monochromator/energy'] = 12.398419739640717 / self.instrument['wavelength'].value\n detector = self.get_detector()\n entry['instrument/detector/description'] = detector.name\n entry['instrument/detector/distance'] = self.instrument['distance'].value\n entry['instrument/detector/pixel_size'] = detector.pixel1 * 1000\n entry['instrument/detector/pixel_size'].attrs['units'] = 'mm'\n entry['instrument/detector/pixel_mask'] = detector.mask\n entry['instrument/detector/shape'] = detector.shape\n entry['instrument/detector/yaw'] = 0.0\n entry['instrument/detector/pitch'] = 0.0\n entry['instrument/detector/roll'] = 0.0\n for position in range(1, self.positions+1):\n entry = self.experiment_file['f%s' % position]\n entry['instrument'] = self.experiment_file['entry/instrument']\n entry['instrument/detector/translation_x'] = self.detectors[position]['x'].value\n entry['instrument/detector/translation_x'].attrs['units'] = 'mm'\n entry['instrument/detector/translation_y'] = self.detectors[position]['y'].value\n entry['instrument/detector/translation_y'].attrs['units'] = 'mm'\n entry['instrument/detector/frame_time'] = 0.1\n entry['instrument/detector/frame_time'].attrs['units'] = 'seconds'\n\n def accept(self):\n try:\n home_directory = self.get_directory()\n self.mainwindow.default_directory = home_directory\n self.get_parameters()\n configuration_directory = os.path.join(home_directory, 'configurations')\n if not os.path.exists(configuration_directory):\n os.makedirs(configuration_directory)\n self.experiment_file.save(os.path.join(configuration_directory,\n self.instrument['experiment'].value+'.nxs'))\n task_directory = os.path.join(home_directory, 'tasks')\n if not os.path.exists(task_directory):\n os.makedirs(task_directory)\n calibration_directory = os.path.join(home_directory, 'calibrations')\n if not os.path.exists(calibration_directory):\n os.makedirs(calibration_directory)\n self.treeview.tree.load(self.experiment_file.nxfilename, 'rw')\n super(ExperimentDialog, self).accept()\n except Exception as error:\n report_error(\"Defining New Experiment\", error)\n","sub_path":"src/nxrefine/plugins/refine/new_experiment.py","file_name":"new_experiment.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"218285110","text":"def dfs(k, cnt, dungeons):\n global answer\n if cnt > answer:\n answer = cnt\n\n for j in range(N):\n if not visit[j] and k >= dungeons[j][0] and k >= dungeons[j][1]:\n visit[j] = 1\n dfs(k - dungeons[j][1], cnt + 1, dungeons)\n visit[j] = 0\n\n\ndef solution(k, dungeons):\n global N, visit, answer\n answer = 0\n N = len(dungeons)\n visit = [0] * N\n dfs(k, 0, dungeons)\n return answer","sub_path":"Algorithm/programmers/피로도.py","file_name":"피로도.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"163117558","text":"\nimport time\nimport unittest\n\nfrom slurmpy.slurmpy import Slurm, SlurmException\n\nclass TestSlurmpy(unittest.TestCase):\n def setUp(self):\n self.slurm_queue_args = {\"time\": \"00:00:15\", 'no-requeue': None, \"Q\": None}\n\n def test_silent_query_of_nonexistent_job(self):\n ret = Slurm.query('101', on_failure='silent')\n self.assertIsNone(ret)\n\n def test_exception_query_of_nonexistent_job(self):\n with self.assertRaises(SlurmException):\n Slurm.query('101')\n\n def test_sending_to_queue(self):\n s = Slurm(\"job-name\", self.slurm_queue_args) \n job_id = s.run('sleep 5')\n self.assertTrue(s.still_running(job_id))\n\n def test_sending_local(self):\n s = Slurm(\"job-name\")\n job_id = s.run('sleep 5', local=True)\n self.assertTrue(s.still_running(job_id))\n time.sleep(7)\n self.assertFalse(s.still_running(job_id))\n\n def test_multiple_local_sends(self):\n s = Slurm(\"job-name\")\n ids = []\n for i in range(5):\n ids.append(s.run('sleep 5', local=True))\n\n def test_multiple_queue_sends(self):\n s = Slurm(\"job-name\")\n ids = []\n for i in range(5):\n ids.append(s.run('sleep 5'))\n\n def test_kill_local(self):\n s = Slurm(\"job-name\")\n job_id = s.run('sleep 10', local=True)\n self.assertTrue(s.still_running(job_id))\n s.kill(job_id)\n self.assertFalse(s.still_running(job_id))\n\n def test_kill_queue(self):\n s = Slurm(\"job-name\", self.slurm_queue_args)\n job_id = s.run('sleep 10', local=True)\n self.assertTrue(s.still_running(job_id))\n s.kill(job_id)\n self.assertFalse(s.still_running(job_id))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/TestSlurmpy.py","file_name":"TestSlurmpy.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"554075495","text":"# -*- coding: utf-8 -*-\n# @Time: 2021-07-13 17:01\n# @Author: little kimber\n# @File: test002.py\n\"\"\"\n电影商店——管理录像带租借,记录借出时间、到期时间、逾期费用。复杂一点可以生成逾期用户的账号报告。\n建立四个类,分别是录像带,商店,租借人,被租借的录像带\n\"\"\"\n\nclass Vedio:\n def __init__(self, price, number, name, id):\n self.price = price\n self.number = number\n self.id = id\n self. name = name\n\n def __str__(self):\n return f\"名称:{self.name},数量:{self.number},价格:{self.price},类别编号:{self.id}\"\n\nclass Borrow_vedio:\n def __init__(self, borrow_name, borrow_id, user_id, borrow_day, back_day, today, due_day):\n self.borrow_name = borrow_name\n self.borrow_id = borrow_id\n self.user_id = user_id\n self.borrow_day = borrow_day\n self.back_day = back_day\n self.today = today\n self.due_day = due_day\n\n def __str__(self):\n return f'''被租借的录像带: 名称:{self.borrow_name}, 编号:{self.borrow_id} \n 租借人编号:{self.user_id}\n 租借日期:{self.borrow_day}, 应还日期:{self.back_day}\n 已逾期:{self.due_day}\n '''\n\nclass User:\n def __init__(self, name, u_id):\n self.name = name\n self.u_id = u_id\n self.borrowed = []\n\n def __str__(self):\n return f\"用户姓名:{self.name}, 用户编号:{self.u_id}\"\n\n def borrow(self, vedio, flag=1):\n # 设定最多借5个录像带\n if(len(self.borrowed)<5 or flag==0):\n if flag == 1:\n self.borrowed.append(vedio)\n else:\n self.borrowed.remove(vedio)\n\n else:\n print(\"已经超出借阅数量,不能再借阅\")\n\n def n_vedio(self):\n return len(self.borrowed)\n\n def list_vedio(self):\n for vedio in self.borrowed:\n print(vedio)\n\nclass Shop:\n def __init__(self):\n self.vedio = []\n self.borrowed_vedio = []\n self.user = []\n self.overdue = []\n self.due_day = 30\n\n def addVedio(self, vedio_tape):\n if len(self.vedio) > 0:\n flag = 0\n for v in self.vedio:\n if v.name == vedio_tape.name and v.id == vedio_tape.id:\n v.number += vedio_tape.number\n flag = 1\n if flag != 1:\n self.vedio.append(vedio_tape)\n else:\n self.vedio.append(vedio_tape)\n\n def delVedio(self, vedio_tape):\n flag = 0\n for v in self.vedio:\n if v.name == vedio_tape.name and v.id == vedio_tape.id:\n flag = 1\n if v.number > vedio_tape.number:\n v.number -= vedio_tapenumber\n elif v.number == vedio_tape.number:\n self.vedio.remove(v)\n else:\n print(\"录像带数量不足\")\n break\n if flag == 0:\n print(\"未找到该录像带\")\n\n def findVedio(self, key):\n if isinstance(key, int):\n flag = 0\n for v in self.vedio:\n if v.id == key:\n flag = 1\n print(v)\n for bor in self.borrowed_vedio:\n if bor.id == v.id:\n print(bor)\n break\n if flag == 0:\n print(\"未找到录像带\")\n\n else:\n for v in self.vedio:\n if v.name == key:\n flag = 1\n print(v)\n for bor in self.borrowed_vedio:\n if bor.name == v.name:\n print(bor)\n\n def addUser(self, user_name):\n u = User(user_name, len(self.user)+1)\n self.user.append(u)\n\n def findUser(self, key):\n flag = 0\n if isinstance(key, int):\n for user in self.user:\n if user.u_id == key:\n flag = 1\n print(user)\n if flag == 0:\n print(\"没有该用户\")\n else:\n for user in self.user:\n if user.name == key:\n flag = 1\n print(user)\n if flag == 0:\n print(\"没有该用户\")\n\n def borrowVedio(self, vedio, user_id, day):\n pass\n\n def returnVedio(self, vedio, user_id, day):\n pass\n\n def list_user(self):\n for user in self.user:\n print(user)\n\n def list_vedio(self):\n for vedio in self.vedio:\n print(vedio)\n\n def list_borrowed_vedio(self):\n for bor in self.borrowed_vedio:\n print(bor)\n\n def overdue_update(self, day):\n pass\n\n def overdue_report(self, day):\n pass\n","sub_path":"venv/Include/class_test/test002.py","file_name":"test002.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"50937431","text":"#This is the driver routine for the constant velocity simualtions\n#\n#Steven Large\n#January 4th 2016\n\nfrom math import *\nfrom Parameters import *\nfrom LangevinPropogator import *\nimport random\nimport numpy\n\ndef Propogator(ProtocolTime, VelocityVariance):\n\n\ttime = 0\n\tposition = 0\n\tvelocity = 0\n\tWorkAcc = 0\n\n\tCP = 0\n\n\tPositionTrack = []\n\tCPTrack = []\n\n\tCPVel_MEAN = float(Dist)/float(ProtocolTime)\n\n\tCPVel = random.gauss(CPVel_MEAN, sqrt(VelocityVariance))\n\n\tEquilibration = 50\t\t\t\t\t\t\t \t\t\t\n\n\tProtocolTime = ProtocolTime + Equilibration\n\n\twhile time < Equilibration:\n\n\t\t(time, position, velocity, WorkStep, CP) = LangevinConstantCPVelocity(time, position, velocity, CP, CPVel)\n\n\twhile time < ProtocolTime:\n\n\t\t\t(time, position, velocity, WorkStep, CP) = LangevinConstantCPVelocity(time, position, velocity, CP, CPVel)\n\t\t\tWorkAcc = WorkAcc + WorkStep\n\n\t\t\tCPTrack.append(CP)\n\t\t\tPositionTrack.append(position)\n\n\treturn WorkAcc, CPVel, PositionTrack, CPTrack\n\ndef Propogator2(ProtocolTime,VelocityVariance):\n\n\ttime = 0\n\tposition = random.gauss(0,float(1)/float(k))\n\tvelocity = 0\n\tWorkAcc = 0\n\n\tCP = 0\n\n\tCPVel_MEAN = float(Dist)/float(ProtocolTime)\n\n\tCPVel = random.gauss(CPVel_MEAN, sqrt(VelocityVariance))\n\n\twhile time < ProtocolTime:\n\n\t\t\t(time, position, velocity, WorkStep, CP) = LangevinConstantCPVelocity(time, position, velocity, CP, CPVel)\n\t\t\tWorkAcc = WorkAcc + WorkStep\n\n\treturn WorkAcc\n\n\n\n","sub_path":"ConstantVelocityEnsemble/TESTING/ConstantDriver.py","file_name":"ConstantDriver.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"552429107","text":"import pandas as pd\nimport numpy as np\nfrom scipy.spatial import distance\nimport rpy2.robjects as robjects\nfrom rpy2.robjects import pandas2ri\nimport rpy2\nimport rpy2.robjects.packages as rpackages\nimport rpy2.robjects as ro\n\npandas2ri.activate()\nimport rpy2.robjects.numpy2ri\n\nrpy2.robjects.numpy2ri.activate()\nfrom rpy2.robjects.conversion import localconverter\nimport warnings\nfrom rpy2.rinterface import RRuntimeWarning\n\nwarnings.filterwarnings(\"ignore\", category=RRuntimeWarning)\npandas2ri.activate()\nfrom matplotlib import pyplot\nfrom collections import Counter\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom numpy import mean\nfrom numpy import std\nfrom pandas import read_csv\nfrom sklearn.preprocessing import RobustScaler, OneHotEncoder, MinMaxScaler, PowerTransformer, StandardScaler\nfrom scipy.stats import normaltest\nfrom sklearn.model_selection import ParameterSampler\nfrom numpy.random import randn\nfrom scipy.stats import shapiro\nfrom sklearn.model_selection import train_test_split\nfrom numpy import *\nfrom sklearn.svm import SVR\nfrom sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error\nfrom scipy.stats.stats import pearsonr, spearmanr\nfrom scipy import stats\nimport tensorflow as tf\nimport multiprocessing as mp\nimport time\nimport os\nimport collections\nimport matplotlib.pyplot as plt\nimport itertools as it\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\nimport re\nimport warnings\n\n\nwarnings.filterwarnings(\"ignore\")\n\n########################################################################################################################\n #Setting Global Variables\n########################################################################################################################\n\n#specify path of dataset\ninput_path = \"/home/yhh05/smogN/Manual_Daily_Albedo_NDVI_LST_Cleaned.csv\"\n\n#specify saved file directory \noutput_path = \"/home/yhh05/smogN/output/\"\n\n#specify columns to drop\ncolumnsToDrop = ['Date','Year','Month','Day',\n 'Month_1', 'Month_2', 'Month_3', 'Month_4',\n 'Vegetation_1', 'Vegetation_2','Vegetation_3',\n 'Climate', 'Vegetation', 'Latitude', 'Longitude',\n 'G','G-1','G-2','G-3','G-4','G-5',\n 'Climate_1', 'Climate_2', 'Climate_3',\n 'Latitude_1','Latitude_2', 'Latitude_3', 'Latitude_4', 'Latitude_5',\n 'Latitude_6','Longitude_1', 'Longitude_2', 'Longitude_3', 'Longitude_4',\n 'Longitude_5', 'Longitude_6',\n 'H', 'H_bowen_corr', 'H_bowen_corr-1', 'H_bowen_corr-2', 'H_bowen_corr-3', 'H_bowen_corr-4',\n 'H_bowen_corr-5', 'C_BOWENS',\n 'NETRAD','NETRAD-1','NETRAD-2','NETRAD-3','NETRAD-4','NETRAD-5',\n 'LE', 'LE_bowen_corr',\n 'Elevation(m)_1','Elevation(m)_2', 'Elevation(m)_3', 'Elevation(m)_4',\n 'Elevation(m)_5', 'Elevation(m)_6',\n 'ETo', 'EToF', 'ETr', 'ETrF',\n 'Site Id_1', 'Site Id_2', 'Site Id_3', 'Site Id_4', 'Site Id_5',\n 'Site Id_6'] \n\n#specify the output column\noutput_column = \"LE_bowen_corr_mm\"\n\n#specify name of target variable and name of predicted target variable\ny_test_name = 'LE_bowen_corr_mm'\ny_test_pred_name = 'LE_bowen_corr_mm_pred'\n\n#rename variables with spacing and under score for better proper namings\ncolumns_rename={\"Site Id_1\": \"Site_1\", \"Site Id_2\": \"Site_2\",\n \"Site Id_3\": \"Site_3\", \"Site Id_4\": \"Site_4\",\n \"Site Id_5\": \"Site_5\", \"Site Id_6\": \"Site_6\"}\n\n#specify one-hot encoded vector names \none_hot_encoded = []\n \n#specify desired split size\ntest_size = 0.2\n\n#specify if scaling\nscaling = True\n\n#specify if automatic or manual scaling\nautomatic = True\n \n#if not automatic specify desired column names\nall_columns = ['WS', 'RH', 'TA', 'LE', 'ET_bowen_corr']\n#specify the scaling type for each column\nscaling = ['MinMax', 'Standard', 'Robust', 'MinMax + PowerTransform', 'Standard + PowerTransform']\n\n#specify the option of utility based\nutility_based = True\n\n#specify number of parameters in random search\nn_params = 100\n\n#specify batch size of hyper-parameters\nbatch_size = 4\n\n#specify number of batch you'd like to train model over\nbatch_num = 2\n\n#specify if random search\nrandom_search = False\n\n#specify if grid search\ngrid_search = True \n\n#spcify repetitions and folds for repeated stratified cross validation \nrepetitions = 1\nfolds = 5\n\n#specify if you wish to apply over sampling by smogn\nsmogn = False\n\n#smogn relate hyper-params\ntarget_variable = \"Leoutput\"\nrel_method='extremes'\nextr_type='both'\ncoef=1.5\n#rell = np.array([\n# [1, 0 , 0],\n# [6 , 0 , 0],\n# [15 ,1, 0]\n# ])\nrell = None\nrelevance_pts=rell\nrel=\"auto\"\nthr_rel=0.5\nCperc=\"balance\"\nk=5\nrepl=False\ndist=\"Euclidean\"\np=2\npert=0.1\n\n########################################################################################################################\n #Helper Methods\n########################################################################################################################\n\n# checks if the input is gaussian by shapiro wilk test\ndef check_distribution_shapiro(col):\n stat, p = shapiro(col)\n alpha = 0.05\n if p > alpha:\n gaussian = True\n else:\n gaussian = False\n\n return gaussian\n\n# checks if the input is gaussian by dagostino test\ndef check_distribution_dagostino(col):\n stat, p = normaltest(col)\n alpha = 0.05\n if p > alpha:\n gaussian = True\n else:\n gaussian = False\n\n return gaussian\n\n\n# splits data into train and test\ndef train_test_splitting(X, y, test_size=0.2):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=1234)\n return X_train, X_test, y_train, y_test\n\n\n# does automatic standardization according to column values\ndef standardization_needed(col, X):\n col = col.values.reshape(-1, 1)\n X = X.values.reshape(-1, 1)\n # checks if column has zero values\n if 0 not in col:\n col_trans = Standard(col, X)\n col_trans_pow = apply_power_trans(col_trans)\n else:\n # if there are zero values, applies MinMaxScaler\n # check range of values\n col_trans = Min_Max(col, X)\n col_trans_pow = apply_power_trans(col_trans)\n return np.ravel(col_trans_pow)\n\n# does manual standardization according to user input\ndef standardization_needed_manual(col, X, scaling):\n col = col.values.reshape(-1, 1)\n X = X.values.reshape(-1, 1)\n if scaling == 'None':\n col_trans_final = col\n if scaling == 'MinMax':\n col_trans_final = Min_Max(col, X)\n if scaling == 'Standard':\n col_trans_final = Standard(col, X)\n if scaling == 'Robust':\n col_trans_final = Robust(col, X)\n if scaling == 'MinMax + PowerTransform':\n col_trans = Min_Max(col, X)\n col_trans_final = apply_power_trans(col_trans)\n if scaling == 'Standard + PowerTransform':\n col_trans = Standard(col, X)\n col_trans_final = apply_power_trans(col_trans)\n return np.ravel(col_trans_final)\n\n\ndef Min_Max(col, X):\n if any(n < 0 for n in col):\n scaler = MinMaxScaler((-1, 1))\n else:\n scaler = MinMaxScaler((0, 1))\n scaler.fit(X)\n col_trans = scaler.transform(col)\n return col_trans\n\n\ndef Standard(col, X):\n if 0 not in col:\n # if no zero values, apply StandardScaler\n scaler = StandardScaler()\n scaler.fit(X)\n col_trans = scaler.transform(col)\n return col_trans\n else:\n print('column has 0 values, cannot apply standard scaling')\n return col\n\n\ndef Robust(col, X):\n scaler = RobustScaler()\n scaler.fit(X)\n col_trans = scaler.transform(col)\n return col_trans\n\n# does power transform on column automatically\ndef apply_power_trans(col_trans):\n if any(n <= 0 for n in col_trans):\n # if there are negative or zero values, applies yeo-johnson transform\n pt = PowerTransformer('yeo-johnson')\n col_trans_pow = pt.fit_transform(col_trans)\n # checks if column values are strictly positive\n elif all(n > 0 for n in col_trans):\n # if values are strictly positive, applies box-cox transform\n pt = PowerTransformer('box-cox')\n col_trans_pow = pt.fit_transform(col_trans)\n return col_trans_pow\n\n# does scaling on dataset automatically\ndef apply_scaling(df, columns, X_train):\n df_scaled = pd.DataFrame(columns=df.columns)\n for i in df.columns: \n #checking if data is Gaussian\n if i in columns:\n if not check_distribution_shapiro(df[i]) and not check_distribution_dagostino(df[i]):\n print(str(i) + ' does not have a Gaussian distribution and will be scaled')\n #scaling data\n df_scaled[i] = standardization_needed(df[i], X_train[i])\n else: \n df_scaled[i] = df[i]\n print(str(i) + ' has a gaussian distribution')\n else:\n df_scaled[i] = df[i]\n return df_scaled\n\n# does scaling on dataset according to user input\ndef apply_scaling_manual(df, columns, X_train, scaling):\n iterator = 0\n if len(scaling) != len(columns):\n print(\"Please specify scaling for all columns listed\")\n return\n else:\n df_scaled = pd.DataFrame(columns=df.columns)\n for i in df.columns:\n if i in columns:\n # checking if data is Gaussian\n if not check_distribution_shapiro(df[i]) and not check_distribution_dagostino(df[i]):\n print(str(i) + ' does not have a Gaussian distribution and will be scaled')\n # scaling data\n df_scaled[i] = standardization_needed_manual(df[i], X_train[i], scaling[iterator])\n iterator = iterator + 1\n else:\n df_scaled[i] = df[i]\n print(str(i) + ' has a gaussian distribution')\n else:\n df_scaled[i] = df[i]\n return df_scaled\n\n\ndef get_rare(y, method, extr_type, thresh, coef, control_pts):\n # yrel=None, phi_params=None, loss_params=None, df=None, target_variable=None,\n ''' we will be getting the relevance function on all the data not just the training data because\n when we want to apply Lime on the 'rare' testing instances, the relevance function must map all possible demand\n values to a certain relevance. If it happens that some demand values are present only in the testing\n and not in the training data, we cannot detect rare values correctly. The way we compute\n rare values depends on the relevance\n\n :param y: the target variable vector\n :param method: 'extremes' or 'range'. Default is 'extremes'\n :param extr_type: 'both', 'high', or 'low'\n :param thresh: threshold. Default is 0.8\n :param coef: parameter needed for method \"extremes\" to specify how far the wiskers extend to the most extreme data point in the boxplot. The default is 1.5.\n :param control_pts: if method == 'range', then this is the relevance matrix provided by the user. Default is None\n\n :return the indices of the rare values in the data\n '''\n\n yrel = get_relevance_2(y, df=None, target_variable=None, method=method, extr_type=extr_type, control_pts=control_pts)\n\n # get the the phi.control returned parameters that are used as input for computing the relevance function phi\n # (function provided by R UBL's package: https://www.rdocumentation.org/packages/UBL/versions/0.0.6/topics/phi)\n # (function provided by R UBL's package\n # https://www.rdocumentation.org/packages/UBL/versions/0.0.6/topics/phi.control)\n # we need those returned parameters for computing rare values\n\n print('relevance method - phi function : {}'.format(method))\n\n if control_pts is None:\n # without relevance matrix\n print('control.pts - phi function: {}'.format(control_pts))\n print('without relevance matrix')\n params = runit.get_relevance_params_extremes(y, rel_method=method, extr_type=extr_type, coef=coef)\n else:\n # with relevance matrix (provided by the user)\n print('control.pts - phi function: {}'.format(control_pts))\n print('with relevance matrix')\n params = runit.get_relevance_params_range(y, rel_method=method, extr_type=extr_type, coef=coef,\n relevance_pts=control_pts)\n\n # phi params\n phi_params = params[0]\n loss_params = params[1]\n\n phi_params = dict(zip(phi_params.names, list(phi_params)))\n loss_params = dict(zip(loss_params.names, list(loss_params)))\n\n print('\\nCONTROL PTS')\n print(phi_params['control.pts'])\n print(\"for the whole dataset\")\n rare_indices = get_rare_indices(y=y, y_rel=yrel, thresh=thresh, controlpts=phi_params['control.pts'])\n # print('rare indices are: {}'.format(rare_indices))\n\n return rare_indices, phi_params, loss_params, yrel\n\n\ndef get_relevance_2(y, df, target_variable, method, extr_type, control_pts):\n '''\n gets the relevance values of the target variable vector\n :param y: the target variable vector\n :param df: if y in None, this must be passed. It is the data frame of interest\n :param target_variable: if y is None, this must be passed. It is the name of the target variable\n :param method: 'extremes' or 'range'\n :param extr_type: 'both', 'high', or 'low'\n :param control_pts: if method == 'range', will be a relevance matrix provided by the user\n :return: the relevance values of the associated target variable\n '''\n\n # get the target variable vector y\n if y is None:\n if df is None or target_variable is None:\n raise ValueError('if y is None, neither df nor target_variable must be None')\n y = df[target_variable]\n\n # check that the passed parameters are in order\n if method != 'range' and method != 'extremes':\n raise ValueError('method must be \"range\" or \"extremes\", there is no method called \"%s\"' % method)\n elif method == 'range' and control_pts is None:\n raise ValueError('If method == \"range\", then control_pts must not be None')\n elif method == 'extremes' and extr_type not in ['high', 'low', 'both']:\n raise ValueError('extr_type must wither be \"high\", \"low\", or \"both\"')\n else:\n if control_pts is None:\n print('getting yrel - Control pts is {}, method is {}'.format(control_pts, method))\n y_rel = runit.get_yrel(y=np.array(y), meth=method, extr_type=extr_type)\n else:\n print('getting yrel - Control pts is not None, method is {}'.format(method))\n y_rel = runit.get_yrel(y=np.array(y), meth=method, extr_type=extr_type, control_pts=control_pts)\n\n return y_rel\n\n\ndef get_rare_indices(y, y_rel, thresh, controlpts):\n '''\n get the indices of the rare values in the data\n :param y: the target variable vector\n :param y_rel: the target variable (y) relevance vector\n :param thresh: the threshold of interest\n :param controlpts: the phi.control (function provided by R UBL's package: https://www.rdocumentation.org/packages/UBL/versions/0.0.6/topics/phi.control)\n returned parameters that are used as input for computing the relevance function phi (function provided by R UBL's package: https://www.rdocumentation.org/packages/UBL/versions/0.0.6/topics/phi)\n :return: the indices of the rare values in 'y'\n '''\n\n # references\n # https://github.com/paobranco/SMOGN-LIDTA17/blob/8964a2327de19f6ca9e6f7055479ca863cd6b8a0/R_Code/ExpsDIBS.R#L41\n\n # transform controlpts returned by R into a python list\n controlpts = list(np.array(controlpts))\n # print(controlpts)\n\n # boolean variable indicating whether both low and high rare exist\n both = [controlpts[i] for i in [1, 7]] == [1, 1]\n\n # initialize rare cases to empty list (in case there are no rare cases at all)\n rare_cases = []\n\n if both:\n # bothr = True\n print('\\nWe have both low and high extremes')\n rare_low = [i for i, e in enumerate(y_rel) if e > thresh and y[i] < controlpts[3]]\n rare_high = [i for i, e in enumerate(y_rel) if e > thresh and y[i] > controlpts[3]]\n\n # merge two lists (of low rare + high rare) together\n rare_cases = rare_low + rare_high\n\n else:\n print('\\nWe dont have both', end=' ')\n if controlpts[1] == 1:\n print('We have only low rare')\n # lowr = True\n rare_cases = [i for i, e in enumerate(y_rel) if e > thresh and y[i] < controlpts[3]]\n else:\n print('We have only high rare')\n # highr = True\n rare_cases = [i for i, e in enumerate(y_rel) if e > thresh and y[i] > controlpts[3]]\n\n total = len(rare_cases)\n\n print('Total Number of rare cases: %d out of %d' % (total, len(y)))\n print('Percentage of Rare Cases: %.2f%%\\n' % (total/len(y) * 100))\n\n return rare_cases\n\n\ndef round_oversampled_one_hot_encoded(df):\n '''\n round one hot encoded vectors of an oversampled dataset. We have fed the SMOGN/SMOTER/GN/RandUnder\n a data frame having one hot encoded values (0s and 1s). However, given that we are using Euclidean/Manhattan\n distances for oversampling, some noise is added to these making them 1.0003, 0.99, etc.\n Having this said, this function will round these values back again so they are\n perfect 0s or 1s. We could have used HEOM distance, but it expects \"nominal\" features\n as opposed to one hot encodings.\n :param df: the over-sampled data frame\n :return: the over-sampled data frame with one hot encodings rounded\n '''\n for col in one_hot_encoded:\n df.loc[df[col] < 0.5, col] = 0\n df.loc[df[col] >=0.5, col] = 1\n return df\n\n\ndef count_abnormal(df):\n '''\n Due to Oversampling, SMOGN is adding noise to the one hot encoded vectors. This function counts how many of these\n are being done\n :param df: the oversampled data frame\n :return: statistics about the above\n '''\n count = 0\n for col in one_hot_encoded:\n for i, row in df.iterrows():\n if row[col] not in [0, 1]:\n count += 1\n else:\n continue\n\n print('number of noisy one hot encoded: {} out of {}'.format(count, len(df)))\n print('percentage of noisy one hot encoded: %.3f' % (count / len(df) * 100))\n\n#calculates all error metrics needed\ndef calculate_errors(actual, predicted):\n r2score = r2_score(actual, predicted)\n mase = mean_absolute_error(actual, predicted)\n rms = sqrt(mean_squared_error(actual, predicted))\n mse = mean_squared_error(actual, predicted)\n re = (mse / np.mean(predicted)) * 100\n pearson, pval = stats.pearsonr(actual.ravel(), predicted.ravel())\n mae = np.mean(np.abs((actual - predicted) / actual)) * 100\n return r2score, mase, rms, mse, re, pearson, pval, mae\n\n#get indices of folds in Stratified KFold CV\ndef get_fold_indices(X,y,n_splits,rare_values):\n rare_vec = [1 if i in rare_values else 0 for i in range(len(y))]\n y = np.array(rare_vec)\n\n splitter = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=123)\n folds = list(splitter.split(X, y))\n return folds\n \n#get grid of all hyper-parameters\ndef get_param_grid(dicts):\n return [dict(zip(dicts.keys(), p)) for p in it.product(*dicts.values())]\n\ndef model_fit_predict_CV(X, y, split, params):\n\n X_train, y_train = X.iloc[split[0],:], y[split[0]]\n X_valid, y_valid = X.iloc[split[1],:], y[split[1]]\n \n reg = _doFitBoostedTreeRegressor(X_train, y_train, X_train.columns, params)\n y_pred = _doPredictBoostedTreeRegressor(X_valid, reg)\n \n df_test = X_valid\n\n # combine y_test and y_pred in 1 dataset\n df_test[y_test_name] = y_valid\n df_test[y_test_pred_name] = y_pred\n \n mape_score,distance_corr,spearman_corr,pearson_corr,mae_score,mse_score,rmse_score,adjusted_r2,r2_Score,f1,f2,f5,prec,recall = evaluate(df_test, actual=y_test_name, predicted=y_test_pred_name,\n thresh=0.8, rel_method='extremes', extr_type='high',\n coef=1.5, relevance_pts=None)\n \n return mape_score,distance_corr,spearman_corr,pearson_corr,mae_score,mse_score,rmse_score,adjusted_r2,r2_Score,f1,f2,f5,prec,recall\n\n#make input data pipeline for tf model\ndef make_input_fn(X, y, n_epochs=None, shuffle=True):\n NUM_EXAMPLES = math.floor(len(y) / 2)\n\n def input_fn():\n \n dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))\n #print(dataset)\n if shuffle:\n dataset = dataset.shuffle(NUM_EXAMPLES)\n #For training, cycle thru dataset as many times as need (n_epochs=None).\n dataset = dataset.repeat(n_epochs)\n # In memory training doesn't use batching.\n dataset = dataset.batch(NUM_EXAMPLES, drop_remainder=False)\n #print(dataset)\n return dataset\n\n return input_fn\n\n#make test input data pipeline for tf model\ndef make_input_fn_test(X, n_epochs=None, shuffle=False):\n NUM_EXAMPLES = math.floor(len(X) / 2)\n def input_fn():\n dataset = tf.data.Dataset.from_tensor_slices(dict(X))\n if shuffle:\n dataset = dataset.shuffle(NUM_EXAMPLES)\n # For training, cycle thru dataset as many times as need (n_epochs=None).\n dataset = dataset.repeat(n_epochs)\n # In memory training doesn't use batching.\n dataset = dataset.batch(NUM_EXAMPLES,drop_remainder=True)\n #print(dataset)\n return dataset\n\n return input_fn\n\n#train the boosted tree\ndef _doFitBoostedTreeRegressor(X, Y, columns, params):\n # Define our feature columns\n fc = tf.feature_column\n feature_columns = []\n NUMERIC_COLUMNS = columns\n\n for feature_name in NUMERIC_COLUMNS:\n feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32))\n\n # Creating the TF dataset\n train_input_fn = make_input_fn(X, Y)\n #print(train_input_fn, 'train input fn')\n\n # Defining the estimator (BoostedTreeRegressor)\n n_batches = 2\n est = tf.estimator.BoostedTreesRegressor(feature_columns, n_batches_per_layer=n_batches, **params)\n # Training the Model\n est.train(train_input_fn, max_steps=100)\n print(\"Done training for hyperparameter set\" + str(params))\n return est\n\n#predict on test values\ndef _doPredictBoostedTreeRegressor(X, reg):\n origShape = X.shape\n test_input_fn = make_input_fn_test(X)\n outData = reg.predict(test_input_fn)\n print(outData)\n\n out = []\n count = 1\n while (count <= origShape[0]):\n try:\n out.append(float(next(outData)['predictions']))\n count = count + 1\n except:\n break\n\n out = np.array(out)\n\n return out\n\n\ndef rarify_data(df, df_train, df_test, target_variable, method, extr_type, thresh, coef, control_pts):\n '''\n 1. get df_train and df_test\n 2. Obtain new df_train and df_test such that:\n * df_train and df_test have equal class distributions between classes: rare and not rare (see below)\n * get the % of rare in the whole dataframe (df_train + df_test)\n * denote df_train by A\n * denote df_test by B\n * denote the whole dataframe by S\n * S = A U B and S has X% rare\n * make A have X% rare\n * make B have X% rare\n\n :param df_train: the training data frame\n :param df_test: the testing data frame\n :param target_variable: name of the target variable column\n :return: df_train and df_test with equal class distribution between classes: rare and not rare\n '''\n\n # concatenate both df_train and df_test into one data frame\n #df = pd.concat([df_train, df_test])\n print(\"checking null values in rarify\")\n df.dropna(inplace=True)\n print(df.isnull().values.any())\n\n # get y, reset the index to avoid falsy retrievals by index later on\n y = df[target_variable].reset_index(drop=True)\n\n #print(y)\n print(method,extr_type,thresh,coef,control_pts)\n\n # get the indices of the rare values in the combined data frame\n # note that the relevance returned is the relevance of the whole data frame not just the training\n rare_values, phi_params, loss_params, yrel = get_rare(y, method, extr_type,thresh, coef, control_pts)\n\n # dictionary mapping each value to its relevance\n demandrel = {}\n relvals = np.array(yrel)\n print(y)\n y.dropna(inplace=True)\n for i, e in enumerate(y):\n if e not in demandrel:\n rel = relvals[i]\n demandrel[e] = rel\n\n # now we have the indices of the rare values, get their percentage and ensure equal\n # class distribution between rare and not rare\n\n # percentage of rare values in the whole dataset\n prare = len(rare_values)/len(df)\n print(prare)\n print('percentage of rare values in dataset before smogn: ' + str(prare*100) , file=open(output_path +\"rare_perc_results.txt\", \"a\"))\n print('percentage of rare values in dataset before smogn: ' + str(prare*100))\n # number of rare values in the whole dataset\n numrare = len(rare_values)\n print('number of rare values in dataset before smogn: {}/{}'.format(numrare, len(df)), file=open(output_path +\"rare_perc_results.txt\", \"a\"))\n print('number of rare values in dataset before smogn: {}/{}'.format(numrare, len(df)))\n\n # number of rare values that must be in each of the train and test\n numraretrain = int(round(prare * len(df_train)))\n numraretest = int(round(prare * len(df_test)))\n\n print('number of rare that must be in train: {}/{}'.format(numraretrain, len(df_train)))\n print('==> {}%%'.format((numraretrain/len(df_train))*100))\n print('number of rare that must be in test: {}/{}'.format(numraretest, len(df_test)))\n print('==> {}%%'.format((numraretest / len(df_test))*100))\n\n rare_values = sorted(rare_values)\n # print('rare values sorted: {}'.format(rare_values))\n\n # rare indices partitioned for each of the train and test\n rtrain = rare_values[:numraretrain]\n rtest = rare_values[numraretrain:]\n\n # # get the rows of the rare values, retrieve by indices\n # rarerowstrain = df.iloc[raretrain, :].reset_index(drop=True)\n # rarerowstest = df.iloc[raretest, :].reset_index(drop=True)\n\n # # number of rows that remain in training if we remove the rare values\n # numrowstrain = len(df_train) - len(rarerowstrain)\n\n # get the relevance of each of the new dftrainrare and dftestrare\n yreltrain = [demandrel[d] for d in df_train[target_variable]]\n yreltest = [demandrel[d] for d in df_test[target_variable]]\n\n # # get the modified indices of the rare values in each of the new dftrainrare and dftestrare\n # print(\"for train dataset\")\n # rtrain = get_rare_indices(df_train[target_variable], yreltrain, thresh, phi_params['control.pts'])\n # print(\"for test dataset\")\n # rtest = get_rare_indices(df_test[target_variable], yreltest, thresh, phi_params['control.pts'])\n\n if len(rtrain) != numraretrain:\n raise ValueError('Incompatibility between the number of rare values that must be included in the '\n 'training data for equal class distribution and the obtained number of rare')\n\n if len(rtest) != numraretest:\n raise ValueError('Incompatibility between the number of rare values that must be included in the '\n 'testing data for equal class distribution and the obtained number of rare')\n\n # return dftrainrare, dftestrare, phi_params['control.pts']\n return df_train, df_test, rtrain, rtest, yreltrain, yreltest, phi_params, loss_params, demandrel\n\n#required error metrics\ndef error_metrics(y_test, y_pred):\n r2score, mase, rms, mse, re, pearson, pval, mae = calculate_errors(y_test, y_pred)\n print(\"The range for the output variable is:\" + str(y_test.mean()))\n print(\"r2score : \" + str(r2score))\n print(\"mae : \" + str(mase))\n print(\"rmse : \" + str(rms))\n print(\"mse : \" + str(mse))\n print(\"re : \" + str(re))\n print(\"pearson : \" + str(pearson))\n print(\"mape : \" + str(mae))\n\n#evaluate ub error metrics\ndef evaluate(df, actual, predicted, thresh, rel_method='extremes', extr_type='high', coef=1.5, relevance_pts=None):\n y = np.array(df[actual])\n phi_params, loss_params, _ = get_phi_loss_params(y, rel_method, extr_type, coef, relevance_pts)\n\n nb_columns = len(list(df.columns.values)) - 1\n\n mape_score,distance_corr,spearman_corr,pearson_corr,mae_score,mse_score,rmse_score,adjusted_r2,r2_Score,f1,f2,f5,prec,recall = get_stats(df[actual], df[predicted], nb_columns, thresh, phi_params, loss_params)\n return mape_score,distance_corr,spearman_corr,pearson_corr,mae_score,mse_score,rmse_score,adjusted_r2,r2_Score,f1,f2,f5,prec,recall\n\n\ndef get_phi_loss_params(y, rel_method, extr_type='high', coef=1.5, relevance_pts=None):\n '''\n get the parameters of the relevance function\n :param df: dataframe being used\n :param target_variable: name of the target variable\n :param rel_method: either 'extremes' or 'range'\n :param extr_type: either 'high', 'low', or 'both' (defualt)\n :param coef: default: 1.5\n :param relevance_pts: the relevance matrix in case rel_method = 'range'\n :return: phi parameters and loss parameters\n '''\n\n if relevance_pts is None:\n print('Will not use relevance matrix')\n params = runit.get_relevance_params_extremes(y, rel_method=rel_method, extr_type=extr_type, coef=coef)\n else:\n print('Using supplied relevance matrix')\n params = runit.get_relevance_params_range(y, rel_method=rel_method, extr_type=extr_type, coef=coef,\n relevance_pts=relevance_pts)\n\n # phi params and loss params\n phi_params = params[0]\n loss_params = params[1]\n relevance_values = params[2]\n\n phi_params = dict(zip(phi_params.names, list(phi_params)))\n loss_params = dict(zip(loss_params.names, list(loss_params)))\n\n return phi_params, loss_params, relevance_values\n\n\ndef get_stats(y_test, y_pred, nb_columns, thr_rel, phi_params, loss_params):\n '''\n Function to compute regression error metrics between actual and predicted values +\n correlation between both using different methods: Pearson, Spearman, and Distance\n :param y_test: the actual values. Example df['actual'] (the string inside is the name\n of the actual column. Example: df['LE (mm)'], df['demand'], etc.)\n :param y_pred: the predicted vlaues. Example df['predicted']\n :param nb_columns: number of columns <>\n :return: R2, Adj-R2, RMSE, MSE, MAE, MAPE\n '''\n\n def mean_absolute_percentage_error(y_true, y_pred):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n if not isinstance(y_test, list):\n y_test = list(y_test)\n if not isinstance(y_pred, list):\n y_pred = list(y_pred)\n\n n = len(y_test)\n\n r2_Score = r2_score(y_test, y_pred) # r-squared\n adjusted_r2 = 1 - ((1 - r2_Score) * (n - 1)) / (n - nb_columns - 1) # adjusted r-squared\n rmse_score = np.sqrt(mean_squared_error(y_test, y_pred)) # RMSE\n mse_score = mean_squared_error(y_test, y_pred) # MSE\n mae_score = mean_absolute_error(y_test, y_pred) # MAE\n #print(np.asarray(np.abs(( np.array(y_test) - np.array(y_pred)) / np.array(y_test)), dtype=np.float64))\n mape_score = np.asarray(np.abs(( np.array(y_test) - np.array(y_pred)) / np.array(y_test)), dtype=np.float64).mean() * 100 # MAPE\n\n trues = np.array(y_test)\n preds = np.array(y_pred)\n\n method = phi_params['method']\n npts = phi_params['npts']\n controlpts = phi_params['control.pts']\n ymin = loss_params['ymin']\n ymax = loss_params['ymax']\n tloss = loss_params['tloss']\n epsilon = loss_params['epsilon']\n\n rmetrics = runit.eval_stats(trues, preds, thr_rel, method, npts, controlpts, ymin, ymax, tloss, epsilon)\n\n # create a dictionary of the r metrics extracted above\n rmetrics_dict = dict(zip(rmetrics.names, list(rmetrics)))\n\n if isinstance(y_pred[0], np.ndarray):\n y_pred_new = [x[0] for x in y_pred]\n y_pred = y_pred_new\n pearson_corr, _ = pearsonr(y_test, y_pred)\n spearman_corr, _ = spearmanr(y_test, y_pred)\n distance_corr = distance.correlation(y_test, y_pred)\n\n print('\\nUtility Based Metrics')\n print('F1: %.5f' % rmetrics_dict['ubaF1'][0])\n print('F2: %.5f' % rmetrics_dict['ubaF2'][0])\n print('F05: %.5f' % rmetrics_dict['ubaF05'][0])\n print('precision: %.5f' % rmetrics_dict['ubaprec'][0])\n print('recall: %.5f' % rmetrics_dict['ubarec'][0])\n\n print('\\nRegression Error Metrics')\n print('R2: %.5f' % r2_Score)\n print('Adj-R2: %.5f' % adjusted_r2)\n print('RMSE: %.5f' % rmse_score)\n print('MSE: %.5f' % mse_score)\n print('MAE: %.5f' % mae_score)\n print('MAPE: %.5f' % mape_score)\n\n print('\\nCorrelations')\n print('Pearson: %.5f' % pearson_corr)\n print('Spearman: %.5f' % spearman_corr)\n print('Distance: %.5f' % distance_corr)\n return mape_score,distance_corr,spearman_corr,pearson_corr,mae_score,mse_score,rmse_score,adjusted_r2,r2_Score,rmetrics_dict['ubaF1'][0],rmetrics_dict['ubaF2'][0],rmetrics_dict['ubaF05'][0],rmetrics_dict['ubaprec'][0],rmetrics_dict['ubarec'][0]\n \n \ndef calculate_avg_error_metrics(mape_folds, d_f,sp_f, p_f, mae_f,mse_f, rmse_f, ar2_f, r2_f, f1_f, f2_f, f5_f,prec_f,recall_f,folds):\n print('\\nUtility Based Metrics Across All', file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_f1 = f1_f / folds\n print('F1: ' , avg_f1, file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_f2 = f2_f / folds\n print('F2: ' , avg_f2, file=open(output_path +\"output_CV_results.txt\", \"a\") )\n avg_f5 = f5_f / folds\n print('F05:' , avg_f5, file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_prec = prec_f / folds\n print('precision: ', avg_prec , file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_recall = recall_f / folds\n print('recall:' , avg_recall , file=open(output_path +\"output_CV_results.txt\", \"a\"))\n \n print('\\nRegression Error Metrics Across All', file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_r2 = r2_f/folds\n print('R2:' , avg_r2, file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_ar2 = ar2_f/folds\n print('Adj-R2:' , avg_ar2, file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_rmse = rmse_f/folds\n print('RMSE:' , avg_rmse , file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_mse = mse_f/folds\n print('MSE:' , avg_mse , file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_mae = mae_f/folds\n print('MAE:' , avg_mae, file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_mape = mape_folds/folds\n print('MAPE:' , avg_mape , file=open(output_path +\"output_CV_results.txt\", \"a\"))\n \n print('\\nCorrelations Across All', file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_pearson = p_f/folds\n print('Pearson:' , avg_pearson, file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_spearman = sp_f/folds\n print('Spearman:' , avg_spearman , file=open(output_path +\"output_CV_results.txt\", \"a\"))\n avg_dist = d_f/folds\n print('Distance:' , avg_dist, file=open(output_path +\"output_CV_results.txt\", \"a\"))\n return avg_f1, avg_f2, avg_f5, avg_prec, avg_recall, avg_r2, avg_ar2, avg_rmse, avg_mse, avg_mae, avg_mape, avg_pearson, avg_spearman, avg_dist\n \ndef get_relevance_oversampling(smogned, target_variable, targetrel):\n '''\n gets the relevance values of an oversampled data frame\n :param smogned: the oversampled data frame\n :param target_variable: name of the target variable column\n :param targetrel: dictionary mapping each target variable value to a relevance value\n :return: the relevance of the oversampled data frame\n '''\n yrelafter = []\n distances = []\n for val in smogned[target_variable]:\n if val in targetrel:\n yrelafter.append(targetrel[val])\n else:\n nearest = min(sorted(list(targetrel.keys())), key=lambda x: abs(x - val))\n distances.append(abs(nearest - val))\n yrelafter.append(targetrel[nearest])\n\n return yrelafter, distances\n \ndef get_formula(target_variable):\n '''\n gets the formula for passing it to R functions. Example: target_variable ~ col1 + col2 ...\n :param target_variable: the name of the target variable\n :return: R's formula as follows: target_variable ~ other[0] + other[1] + other[2] + other[3] + ...\n '''\n formula = runit.create_formula(target_variable)\n return formula\n \ndef apply_smogn(df_train, smogn, target_variable, phi_params, thr_rel, Cperc, k, repl, dist, p, pert, plotdensity=False ):\n '''\n method that applies SMOGN Algorithm to the current data frame\n '''\n # print('getting back values from oversampled R data frame')\n # print('before smogn')\n # print('zamatet')\n #print(pandas2ri.py2ri(df_train).head(), \"this is py2ri\")\n if smogn:\n smogned = runit.WFDIBS(\n fmla=get_formula(target_variable),\n dat= pandas2ri.py2ri(df_train),\n #dat=df_train,\n method=phi_params['method'][0],\n npts=phi_params['npts'][0],\n controlpts=phi_params['control.pts'],\n thrrel=thr_rel,\n Cperc=Cperc,\n k=k,\n repl=repl,\n dist=dist,\n p=p, \n pert=pert)\n\n # print('after smogn')\n # print('before pandas2ri')\n #convert the oversampled R Data.Frame back to a pandas data frame\n smogned = pandas2ri.ri2py_dataframe(smogned)\n # print('after pandas2ri')\n\n if plotdensity:\n # density plot after smooting\n plot_density(smogned,target_variable,output_folder + 'plots/', 'density_after_smogn', 'Density Plot')\n\n X_train = np.array(smogned.loc[:, smogned.columns != target_variable])\n y_train = np.array(smogned.loc[:, target_variable])\n\n return X_train, y_train\n \ndef write_to_txt(filename, content):\n text_file = open(output_path + filename, \"w\")\n text_file.write(content)\n text_file.close()\n \ndef plot_actual_vs_predicted(df, predicted_variable):\n plt.plot(list(range(1, len(df) + 1)), df[y_test_name], color='b', label='actual')\n plt.plot(list(range(1, len(df) + 1)), df[predicted_variable], color='r', label='predicted')\n plt.legend(loc='best')\n plt.suptitle('actual vs. predicted')\n plt.savefig(output_path + 'actual_vs_predicted')\n plt.close()\n \ndef plot_actual_vs_predicted_scatter_bisector(df, predicted_variable):\n fig, ax = plt.subplots()\n ax.scatter(df[y_test_name], df[predicted_variable], c='black')\n lims = [\n np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes\n np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes\n ]\n ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)\n ax.set_aspect('equal')\n ax.set_xlim(lims)\n ax.set_ylim(lims)\n plt.suptitle('actual vs. predicted forecasts')\n plt.savefig(output_path + 'actual_vs_predicted_scatter_plot')\n plt.close()\n\n\ndef plot_relevance(y, yrel, target_variable, output_folder, fig_name):\n reldict = {}\n y = y[target_variable]\n for i, e in enumerate(y):\n if e not in reldict:\n reldict[e] = yrel[i]\n\n reldict = dict(collections.OrderedDict(sorted(reldict.items())))\n plt.plot(list(reldict.keys()), list(reldict.values()))\n plt.xlabel(target_variable)\n plt.ylabel('relevance')\n\n plt.savefig(output_folder + fig_name)\n plt.close()\n\ndef plot_target_variable(df, df_resampled, output_column, output_folder, fig_name):\n y = df[output_column]\n y_resamp = df_resampled[output_column]\n plt.plot(list(range(len(y))), sorted(y), label = \"original\")\n plt.plot(list(range(len(y_resamp))), sorted(y_resamp), label = \"resampled\")\n plt.xlabel('Index')\n plt.ylabel(target_variable)\n plt.legend()\n plt.savefig(output_folder + fig_name)\n plt.close()\n \ndef get_relevance():\n ctrl = phi_params['control.pts']\n if rel_method[0] == 'extremes' and relevance_pts[0] is None:\n rell = np.array([\n [ctrl[0], ctrl[1], ctrl[2]],\n [ctrl[3], ctrl[4], ctrl[5]],\n [ctrl[6], ctrl[7], ctrl[8]]\n ])\n else:\n rell = relevance_pts[0]\n\n return rell\n\n## Generate lags for all input features, re-generate even if some exist so that order will not be shuffled after nan dropping\ndef generate_lags_for(df, column, lags_count):\n for i in range(lags_count):\n lag_name = column + \"-\" + str(i + 1)\n df[lag_name] = df[column].shift(i + 1)\n return df\n\ndef generate_lags(df, lagsForColumns):\n '''This function generates the lags for the list of columns'''\n for k in range(len(lagsForColumns)):\n col = lagsForColumns[k]\n if col in df.columns:\n df = generate_lags_for(df, col, 5)\n return df\n\n\ndef split_train_test_valid(df, TRAIN_RATIO, TEST_RATIO):\n X_train = pd.DataFrame()\n X_test = pd.DataFrame()\n Y_train = pd.DataFrame()\n Y_test = pd.DataFrame()\n \n unique_sites = df[\"Site Id\"].unique()\n print(\"Number of sites:\", len(unique_sites))\n\n for site in unique_sites:\n df_site = df[df[\"Site Id\"] == site]\n X = df_site\n train_index = int(X.shape[0] * TRAIN_RATIO)\n test_index = int(X.shape[0] * (TRAIN_RATIO + TEST_RATIO))\n\n X_train = X_train.append(X[:train_index], ignore_index = True)\n X_test = X_test.append(X[train_index:], ignore_index = True)\n Y_train = Y_train.append(X[:train_index], ignore_index = True)\n Y_test = Y_test.append(X[train_index:], ignore_index = True)\n \n Y_train = Y_train[[output_column]]\n Y_test = Y_test[[output_column]]\n \n X_train = X_train.drop([output_column], axis = 1)\n X_test = X_test.drop([output_column], axis = 1)\n \n return X_train, X_test, Y_train, Y_test\n\n\n########################################################################################################################\n #Establish a connection to R library\n########################################################################################################################\nrpy2.robjects.numpy2ri.activate()\nrunit = robjects.r\nrunit['source']('/home/yhh05/smogN/smogn.R')\n\n########################################################################################################################\n #Read and Preprocess Dataset\n########################################################################################################################\n\ndf = pd.read_csv(input_path, delimiter=',')\n#drop NaN values\ndf.dropna(inplace=True)\n#df = df[df[\"Site Id\"].str.startswith('US-')]\n#df = df[~df[\"Site Id\"].str.startswith('US-')]\ndf = df[df[output_column].between(1, 15)]\n#generate lags\nlagsForColumns = [\"SW_IN\", \"WS\", \"RH\", \"TA\", \"EEflux LST\", \"EEflux Albedo\", \"EEflux NDVI\"]\ndf = generate_lags(df, lagsForColumns)\n\ndf_ameri = df[df[\"Site Id\"].str.startswith('US-')]\ndf_euro = df[~df[\"Site Id\"].str.startswith('US-')]\n\ndf_ameri = df_ameri.drop(columnsToDrop, axis = 1)\ndf_euro = df_euro.drop(columnsToDrop, axis = 1)\n\ndf_ameri.rename(columns_rename, inplace=True)\ndf_ameri.dropna(inplace=True)\n\ndf_euro.rename(columns_rename, inplace=True)\ndf_euro.dropna(inplace=True)\nprint(\"I am here i cleaned data\")\n\n\nX_train = df_ameri.drop([output_column], axis=1)\ny_train = df_ameri[output_column].reset_index(drop=True)\ncols = X_train.columns\n\nX_test = df_euro.drop([output_column], axis=1)\ny_test = df_euro[output_column]\n\n#creating test dataset\ndf_test = X_test\ndf_test[y_test_name] = y_test\n\n########################################################################################################################\n #Train and Test\n########################################################################################################################\n#X_train, X_test, y_train, y_test = split_train_test_valid(df, 0.8, 0.2)\n\ncolumnToDrop = \"Site Id\"\nX_train.drop([columnToDrop], axis = 1, inplace=True)\nX_test.drop([columnToDrop], axis = 1, inplace=True)\n\ndf_test = X_test\ndf_test[y_test_name] = y_test\n#creating train dataset\ndf_train = X_train\ndf_train[output_column] = y_train\nsize_original = df_train.shape\n\ndf_train.reset_index(drop=True)\ndf_test.reset_index(drop=True)\n\nprint(\"checking null values in train\")\nprint(df_train.isnull().values.any())\nprint(\"checking null values in test\")\nprint(df_test.isnull().values.any())\n#rarify both train and test sets and create new Xtrain,ytrain,Xtest,ytest\ndf_train_rare, df_test_rare, rtrain, rtest, yreltrain, yreltest, phi_params, loss_params, targetrel = rarify_data(df, df_train, df_test,output_column, rel_method,extr_type, thr_rel,coef, relevance_pts)\nprint(y_train.shape)\n#yreltrain = np.array(yreltrain).reshape(len(yreltrain), 1)\n# plot_relevance(y_train, yreltrain, output_column, output_path, \"relevance_values_train_data_exp1\")\n# plot_relevance(y_test, yreltest, output_column, output_path, \"relevance_values_test_data_exp1\")\nX_train = X_train.drop([output_column], axis=1)\nX_test = X_test.drop([output_column], axis=1)\ncols = X_train.columns\nX_train.dropna(inplace=True)\nX_test.dropna(inplace=True)\nprint(X_train.columns)\nprint(\"cols in X train after rarify\")\nprint(\"checking null values after rarify\")\nprint(X_train.isnull().values.any())\nprint(len(X_train))\nprint(\"size of Xtrain\")\n\n\n########################################################################################################################\n #Scaling\n########################################################################################################################\n\nif scaling: \n if automatic:\n #type desired col names in X to be scaled\n all_columns = list(X_train.columns)\n #standardize dataset\n X_train = apply_scaling(X_train, all_columns, X_train)\n X_test = apply_scaling(X_test, all_columns, X_train)\nelse: \n df_scaled_manual = apply_scaling_manual(df, all_columns, X_train, scaling)\n########################################################################################################################\n #Random Search\n########################################################################################################################\n\n# define set of hyper-parameters\n# params = {\n# 'n_trees': [50, 150, 200, 250, 300, 350],\n# 'max_depth': [1, 3, 5, 7, 9],\n# 'learning_rate' : [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3],\n# 'l1_regularization': [0, 0.001, 0.01, 0.1, 0.2, 0.3],\n# 'l2_regularization' : [0, 0.001, 0.01, 0.1, 0.2, 0.3]\n# }\n\n# # set of hyper-parameters but with tree complexity and pruning\n# params_with_complexity = {\n# 'n_trees': [50, 150, 200, 250, 300],\n# 'max_depth': [1, 3, 5, 7, 9],\n# 'learning_rate' : [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3],\n# 'l1_regularization': [0.01, 0.1, 0.2, 0.3], \n# 'l2_regularization' : [0.01, 0.1, 0.2, 0.3],\n# 'tree_complexity' : [1,2,3],\n# 'pruning_mode' : ['pre', 'post']\n# }\n\nif random_search:\n # do random search\n rng = np.random.RandomState(0)\n #specify random parameter number\n\n # getting list of hyper-parameters\n param_list = list(ParameterSampler(params_with_complexity, n_iter=n_params, random_state=rng))\n\n mape_list = []\n for param in param_list:\n start = time.time()\n reg = _doFitBoostedTreeRegressor(X_train, y_train, X_train.columns, param)\n y_pred = _doPredictBoostedTreeRegressor(X_test, reg)\n end = time.time()\n print(\"The time taken to train and predict is \" + str(end - start) + \" seconds\")\n df_test[y_test_name_pred] = y_pred\n if utility_based:\n mape = evaluate(df_test, actual=y_test_name, predicted=y_test_name_pred, thresh=0.8, rel_method='extremes', extr_type='high',coef=1.5, relevance_pts=None)\n mape_list.append(mape)\n else:\n error_metrics(y_test, y_pred)\n\n print(\"The best mape is \" + str(min(mape_list)))\n index = mape_list.index(min(mape_list))\n print(\"The best hyper-params are \" + str(param_list[index]))\n\n# ########################################################################################################################\n# #Grid Search\n# ########################################################################################################################\n\n#The best hyper-params in random search were the following:\n#params_best = {'n_trees': 100, 'max_depth': 5, 'learning_rate': 0.1, 'l1_regularization': 0, 'l2_regularization': 0}\n\n#Thus, we shall do gridsearch around these hyper-params:\nparams_grid = {\n 'n_trees': [100,120],\n 'max_depth': [7,6],\n 'learning_rate' : [0.1,0.01]\n}\n\n#params_grid = {'n_trees': 100, 'max_depth': 7, 'learning_rate': 0.1}\n\n\nif grid_search:\n grid = get_param_grid(params_grid)\n print(\"We will be trying \" + str(len(grid)) + \" hyper-params\" )\n\n########################################################################################################################\n #Grid Search + CV\n########################################################################################################################\nif grid_search:\n mape_all = []\n n_params = batch_size / (repetitions * folds)\n total_iter = len(grid) * folds * repetitions\n grid_start = ( batch_num - 1 )* n_params\n grid_end = batch_num * n_params\n grid_needed = grid[int(grid_start):int(grid_end)]\n grid_needed = grid\n\n for param in grid_needed:\n mape_rep= d_rep=sp_rep= p_rep= mae_rep=mse_rep= rmse_rep= ar2_rep= r2_rep= f1_rep= f2_rep= f5_rep=prec_rep=recall_rep=0\n\n for rep in range(repetitions):\n fold_indx = get_fold_indices(X_train,y_train,folds,rtrain)\n print(\"Calculated stratified fold indices\")\n mape_folds = d_f=sp_f= p_f= mae_f=mse_f= rmse_f= ar2_f= r2_f= f1_f= f2_f= f5_f=prec_f=recall_f = 0\n \n for fold in range(folds):\n print( \" *************************Results for FOLD number \" + str(fold) + \"***************************** \" )\n print(\"Columns used in X_train in CV\")\n print(X_train)\n print(y_train)\n mape_score,distance_corr,spearman_corr,pearson_corr,mae_score,mse_score,rmse_score,adjusted_r2,r2_Score,f1,f2,f5,prec,recall = model_fit_predict_CV(X_train,y_train,fold_indx[fold], param)\n mape_folds += mape_score\n d_f += distance_corr\n sp_f += spearman_corr\n p_f += pearson_corr\n mae_f += mae_score\n mse_f += mse_score\n rmse_f += rmse_score\n ar2_f += adjusted_r2\n r2_f += r2_Score\n f1_f += f1\n f2_f += f2 \n f5_f += f5 \n prec_f += prec\n recall_f += recall \n print(\"For param \" + str(param) , file=open(output_path +\"output_CV_results.txt\", \"a\") )\n print( \" *************************FOLDS Average***************************** \", file=open(output_path +\"output_CV_results.txt\", \"a\") )\n avg_f1, avg_f2, avg_f5, avg_prec, avg_recall, avg_r2, avg_ar2, avg_rmse, avg_mse, avg_mae, avg_mape, avg_pearson, avg_spearman, avg_dist = calculate_avg_error_metrics(mape_folds, d_f,sp_f, p_f, mae_f,mse_f, rmse_f, ar2_f, r2_f, f1_f, f2_f, f5_f,prec_f,recall_f,folds)\n \n mape_rep += avg_mape\n d_rep += avg_dist\n sp_rep += avg_spearman\n p_rep += avg_pearson\n mae_rep += avg_mae\n mse_rep += avg_mse\n rmse_rep += avg_rmse\n ar2_rep += avg_ar2\n r2_rep += avg_r2\n f1_rep += avg_f1\n f2_rep += avg_f2 \n f5_rep += avg_f5 \n prec_rep += avg_prec\n recall_rep += avg_recall\n \n print( \"************************REPETITIONS Average*******************************\", file=open(output_path +\"output_CV_results.txt\", \"a\") )\n avg_f1, avg_f2, avg_f5, avg_prec, avg_recall, avg_r2, avg_ar2, avg_rmse, avg_mse, avg_mae, avg_mape, avg_pearson, avg_spearman, avg_dist = calculate_avg_error_metrics(mape_rep, d_rep,sp_rep, p_rep, mae_rep,mse_rep, rmse_rep,ar2_rep, r2_rep, f1_rep, f2_rep, f5_rep,prec_rep,recall_rep, repetitions)\n \n mape_all.append(avg_mape)\n\n print(\"The best mape is \" + str(min(mape_all)))\n index = mape_all.index(min(mape_all))\n print(\"The best hyper-params are \" + str(grid_needed[index]))\n\n write_to_txt('winning-hyperparams.txt', str(grid_needed[index]))\n winning_hyper = grid_needed[index]\n\nwinning_hyper = {'n_trees': 100, 'max_depth': 6, 'learning_rate': 0.1}\n\n#######################################################################################################################\n #Applying Smogn\n#######################################################################################################################\n\nif smogn: \n df_train = X_train\n df_train[\"Leoutput\"] = y_train\n\n if rel_method == 'range' and relevance_pts is None:\n raise ValueError('You have set rel_method = range. You must provide relevance_pts as a matrix.'\n 'Currently, it is None')\n\n y_train_a = np.array(df_train[target_variable])\n phi_params, loss_params, relevance_values = get_phi_loss_params(y_train_a, rel_method, extr_type, coef,relevance_pts)\n X_ups, y_ups = apply_smogn(df_train, smogn, target_variable, phi_params, thr_rel, Cperc, k, repl, dist, p, pert, plotdensity=False)\n X_train,y_train = X_ups, y_ups\n X_train = pd.DataFrame(X_train, columns= cols)\n print(X_train.columns)\n print(\"cols in X train after smogn\")\n df_train_smogned = X_train\n df_train_smogned[output_column] = y_train\n\n ########################################################################################################################\n #Fix one-hot encoded errors \n ########################################################################################################################\n\nif one_hot_encoded:\n count_abnormal(X_train)\n print(\"fixing one hot encoded cols\")\n X_train = round_oversampled_one_hot_encoded(X_train) \n\nelse:\n print(\"there are no onehot encoded cols to be accounted for\")\n\n\n ########################################################################################################################\n #Reporting Rarity Metrics \n ########################################################################################################################\nif smogn:\n print(\"The size of the original data is \" + str(size_original))\n print(\"The size of the oversampled data is \" + str(df_train_smogned.shape))\n yrelafter, distances = get_relevance_oversampling(df_train_smogned, output_column, targetrel)\n roversampled = get_rare_indices(df_train_smogned[output_column], yrelafter, thr_rel, phi_params['control.pts'])\n rare_train_after = (len(roversampled)/len(df_train_smogned)) * 100\n print(\"The percentage of rare values in dataset after smogn are \" + str(rare_train_after))\n\n\n########################################################################################################################\n #Final Training\n########################################################################################################################\n\nstart = time.time()\nif output_column in X_train:\n X_train = X_train.drop([output_column], axis=1)\nprint(\"Training model on the best hyper-params \" + str(winning_hyper) )\nprint( \" *************************Final Results on all Folds***************************** \" )\nprint(\"Columns used in X_train in final training\")\nprint(X_train.columns)\nreg = _doFitBoostedTreeRegressor(X_train, y_train, X_train.columns, winning_hyper)\ny_pred = _doPredictBoostedTreeRegressor(X_test, reg)\nend = time.time()\nprint(\"The time taken to train and predict is \" + str(end - start) + \" seconds\")\n\nprint(\"The average target variable is \" + str(y_test.mean()))\ndf_test = X_test\ndf_test[y_test_name] = y_test\n# combine y_test and y_pred in 1 dataset\ndf_test[y_test_pred_name] = y_pred\ndf_test.to_csv(output_path + 'test_dataset.csv')\nplot_actual_vs_predicted(df_test, y_test_pred_name)\nplot_actual_vs_predicted_scatter_bisector(df_test, y_test_pred_name)\n\nif smogn:\n plot_target_variable(df, df_train_smogned, output_column, output_path, 'target_variable')\n\nif utility_based:\n mape_score,distance_corr,spearman_corr,pearson_corr,mae_score,mse_score,rmse_score,adjusted_r2,r2_Score,f1,f2,f5,prec,recall = evaluate(df_test, actual=y_test_name, predicted=y_test_pred_name,\n thresh=0.8, rel_method='extremes', extr_type='high',\n coef=1.5, relevance_pts=None)\nelse:\n error_metrics(y_test, y_pred)\n \nwith open(output_path + 'winning-model-scores.txt', 'a') as the_file:\n the_file.write('\\nUtility Based Metrics'+'\\n')\n the_file.write('F1: %.5f' % f1 + '\\n')\n the_file.write('F2: %.5f' % f2+'\\n')\n the_file.write('F05: %.5f' % f5+'\\n')\n the_file.write('precision: %.5f' %prec+'\\n')\n the_file.write('recall: %.5f' % recall+'\\n')\n\n the_file.write('\\nRegression Error Metrics'+'\\n')\n the_file.write('R2: %.5f' % r2_Score+'\\n')\n the_file.write('Adj-R2: %.5f' % adjusted_r2+'\\n')\n the_file.write('RMSE: %.5f' % rmse_score+'\\n')\n the_file.write('MSE: %.5f' % mse_score+'\\n')\n the_file.write('MAE: %.5f' % mae_score+'\\n')\n the_file.write('MAPE: %.5f' % mape_score+'\\n')\n\n the_file.write('\\nCorrelations'+'\\n')\n the_file.write('Pearson: %.5f' % pearson_corr+'\\n')\n the_file.write('Spearman: %.5f' % spearman_corr+'\\n')\n the_file.write('Distance: %.5f' % distance_corr+'\\n')\n\n \n \n\n\n","sub_path":"notebooks/tf_cv_smogn_euro.py","file_name":"tf_cv_smogn_euro.py","file_ext":"py","file_size_in_byte":57814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"318529553","text":"\"\"\"\n Utility functions for training one epoch \n and evaluating one epoch\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport math\nimport dgl\n\nfrom train.metrics import accuracy_SBM as accuracy\n\ndef train_epoch(model, optimizer, device, graph, epoch):\n\n model.train()\n epoch_loss = 0\n epoch_train_acc = 0\n nb_data = 0\n gpu_mem = 0\n\n if model.library == 'dgl':\n features = graph.ndata['feat'].to(device)\n labels = graph.ndata['label'].to(device)\n train_mask = graph.ndata['train_mask'].to(device)\n else:\n features = graph['x'].to(device)\n labels = graph['y'].to(device)\n train_mask = graph['train_mask'].to(device)\n\n optimizer.zero_grad()\n try:\n\n lap_pos_enc = graph.ndata['lap_pos_enc'].to(device) if model.library == 'dgl' else graph['ndata']['lap_pos_enc']\n sign_flip = torch.rand(lap_pos_enc.size(1)).to(device)\n sign_flip[sign_flip>=0.5] = 1.0; sign_flip[sign_flip<0.5] = -1.0\n lap_pos_enc = lap_pos_enc * sign_flip.unsqueeze(0)\n except:\n lap_pos_enc = None\n\n scores = model.forward(graph, features, None, lap_pos_enc)\n\n loss = model.loss(scores[train_mask], labels[train_mask])\n loss.backward()\n optimizer.step()\n epoch_loss += loss.detach().item()\n epoch_train_acc += accuracy(scores[train_mask], labels[train_mask])\n \n return epoch_loss, epoch_train_acc, optimizer\n\n\ndef evaluate_network(model, device, graph, epoch, split):\n \n model.eval()\n epoch_test_loss = 0\n epoch_test_acc = 0\n features = graph.ndata['feat'].to(device)\n labels = graph.ndata['label'].to(device)\n\n train_mask = graph.ndata['train_mask'].to(device)\n val_mask = graph.ndata['val_mask'].to(device)\n test_mask = graph.ndata['test_mask'].to(device)\n mask = val_mask if split == 'val' else (test_mask if split == 'test' else train_mask)\n\n nb_data = 0\n with torch.no_grad():\n try:\n batch_lap_pos_enc = graph.ndata['lap_pos_enc'].to(device)\n except:\n batch_lap_pos_enc = None\n\n scores = model.forward(graph, features, None, batch_lap_pos_enc)\n loss = model.loss(scores[mask], labels[mask])\n epoch_test_loss += loss.detach().item()\n epoch_test_acc += accuracy(scores[mask], labels[mask])\n \n return epoch_test_loss, epoch_test_acc\n\n\n","sub_path":"train/train_Cora_node_classification.py","file_name":"train_Cora_node_classification.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"10375968","text":"from rest_framework import serializers\nfrom invoices.models import Invoice\nfrom invoices.models import File\n\n\nclass InvoiceSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Invoice\n fields = (\n 'id',\n 'invoice_number',\n 'client_name',\n 'client_lastname',\n 'client_id',\n 'item_code',\n 'item_description',\n 'item_amount',\n 'item_price',\n 'item_discount_rate',\n 'file',\n 'created_at',\n 'updated_at'\n )\n read_only_fields = (\n 'id',\n )\n\n\nclass FileSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = File\n fields = (\n 'id',\n 'filename',\n 'total_items_price',\n 'created_at',\n 'updated_at'\n )\n read_only_fields = (\n 'id',\n )\n\n\nclass ListFileSerializer(serializers.ModelSerializer):\n invoices = InvoiceSerializer(many=True)\n\n class Meta:\n model = File\n fields = (\n 'id',\n 'filename',\n 'total_items_price',\n 'created_at',\n 'updated_at',\n 'invoices'\n )\n read_only_fields = (\n 'id',\n )","sub_path":"invoices/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"322492460","text":"\"\"\"\n辞書を作る\n形態素解析用と判定用を作る?\nMeCab用の要素に分類ラベルを入れるべきか\n\nmecab -u /usr/local/lib/mecab/dic/userdic/c92_dic.dic\n\nしゃろーんは水を飲んだ\nしゃろーん\t名詞,固有名詞,人名,一般,*,*,\"しゃろーん\",*,*,c92dic,東ツ53a\nは\t助詞,係助詞,*,*,*,*,は,ハ,ワ\n水\t名詞,一般,*,*,*,*,水,ミズ,ミズ\nを\t助詞,格助詞,一般,*,*,*,を,ヲ,ヲ\n飲ん\t動詞,自立,*,*,五段・マ行,連用タ接続,飲む,ノン,ノン\nだ\t助動詞,*,*,*,特殊・タ,基本形,だ,ダ,ダ\nEOS\n\nカンマを含むものをどうするか\n→大抵は連名だから分ける?\n\"\"\"\n\nfrom myuse.mysqlUse import MySQLuse\nimport re\n\n#検出の重みを計算\ndef calc_weight(text):\n \"\"\"\n 単語の長さで重みを計算する\n 参考:http://qiita.com/ynakayama/items/f42d2519e513aa37ad0c\n \"\"\"\n weight_default = 10000\n number = \"^\\d+$\"\n\n add_max = 5\n if re.match(number,text):\n #数字だけで構成されている→使えない\n return 999999999\n if len(text) < add_max:\n weight = 5000*(add_max + 1 - len(text))\n return weight\n return weight_default\n\ndef length_1_is_san(text):\n # [ぁ-ゞ]とどっちがいいのだろう?\n all_hira_kata = \"^([(ぁ-ん)ー]+|[(ァ-ン)ー]+)$\"\n #1文字の名前、または全部ひらがなorカタカナはさん付けする\n if len(text) <= 3:\n return text + \"さん\"\n elif len(text) < 6 and re.match(all_hira_kata,text):\n return text + \"さん\"\n else:\n return text\n\ndef renmei_split(day, text, number):\n dic = \"\"\n names = text.split(\",\")\n for name in names:\n #if len(name) == 1:\n n = length_1_is_san(name)\n w = calc_weight(name)\n if w != 999999999:\n dic += '\"{}\",,,{},名詞,固有名詞,人名,一般,*,*,\"{}\",*,*,c92dic,{},{}\\n'.format(n, str(w), name, str(day), number)\n return dic\n\ndef dic_space_number(number):\n #スペース番号の各表記ゆれに対応する\n #pat = \"(東|西)(地区)?\\d?\"\n\n \"\"\"\n ホール番号を含む場合もある\n 今の表記からホール番号を割り出す必要がある\n \"\"\"\n east_west = number[0]\n area = number[1]\n num = number[2:4]\n subnum = number[4]\n space = [\"{}地区{}-{}{}\".format(east_west, area, num, subnum),\n \"{}地区 {}-{}{}\".format(east_west, area, num, subnum),\n \"{}{}-{}{}\".format(east_west, area, num, subnum),\n \"{} {}-{}{}\".format(east_west, area, num, subnum),\n \"{}地区{}{}{}\".format(east_west, area, num, subnum),\n \"{}地区 {}{}{}\".format(east_west, area, num, subnum),\n \"{}{}{}\".format(area, num, subnum),\n \"{}-{}{}\".format(area, num, subnum)]\n dic = \"\"\n for s in space:\n dic += '{},,,100,名詞,固有名詞,人名,一般,*,*,{},*,*,c92dic,space,{}\\n'.format(s,number,number)\n return dic\n\n\n\nmysql = MySQLuse(databasename=\"house\")\n\nselect = \"select day, space, circle_name, author, twitter_name from comike_circles\"\nmecab_dic = \"C92,-1,-1,-1000,名詞,固有名詞,一般,*,*,*,コミックマーケット92,シーキュウジュウニ,シーキュウジュウニ,c92dic,*,*\\n\"\n#東1-8の単語登録\nfor i in range(1,9):\n mecab_dic += \"東{},,,100,名詞,固有名詞,一般,*,*,*,東{},*,*,c92dic,hole,東{}\\n\".format(str(i),str(i),str(i))\ndatas = mysql.select(sql=select)\n\nspace_registared = []\nfor data in datas:\n #サークル名\n if data[2] and data[2] != \"\":\n name = length_1_is_san(data[2])\n w = calc_weight(name)\n if w != 999999999:\n mecab_dic += '\"{}\",-1,-1,{},名詞,固有名詞,一般,*,*,*,\"{}\",*,*,c92dic,{},{}\\n'.format(name,str(w),data[2],str(data[0]),data[1])\n #執筆者名\n if data[3] and data[3] != \"\":\n if \",\" in data[3]:\n mecab_dic += renmei_split(data[0], data[3], data[1])\n else:\n name = length_1_is_san(data[3])\n w = calc_weight(name)\n if w != 999999999:\n mecab_dic += '\"{}\",-1,-1,{},名詞,固有名詞,人名,一般,*,*,\"{}\",*,*,c92dic,{},{}\\n'.format(name,str(w), data[3], str(data[0]),data[1])\n #screen_name\n if data[4]:\n name = length_1_is_san(data[4])\n w = calc_weight(name)\n if w != 999999999:\n mecab_dic += '{},-1,-1,{},名詞,固有名詞,人名,一般,*,*,{},*,*,c92dic,{},{}\\n'.format(name,str(w), data[4],str(data[0]),data[1])\n #スペース番号\n if data[1] not in space_registared:\n #日付が変わっても番号の表記は同じ\n mecab_dic += '{},-1,-1,100,名詞,固有名詞,人名,一般,*,*,{},*,*,c92dic,space,{}\\n'.format(data[1], data[1], data[1])\n mecab_dic += dic_space_number(data[1])\n space_registared.append(data[1])\n\nwith open(\"c92_dic.csv\",\"w\",encoding=\"utf-8\") as f:\n f.write(mecab_dic)\n\nprint(mecab_dic)","sub_path":"myuse/make_c92_dic_0922.py","file_name":"make_c92_dic_0922.py","file_ext":"py","file_size_in_byte":4933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"69804388","text":"import grpc\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow_serving.apis import predict_pb2\r\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\r\n\r\nfrom . import merge_lac\r\nfrom . import utils\r\nfrom .model_config import MAX_SEQ_LENGTH, MODEL_NAME\r\nfrom .utils import Example\r\n\r\n\r\nclass PredictGRPC(object):\r\n def __init__(self, hostport, version):\r\n self.hostport = hostport\r\n\r\n self.request = predict_pb2.PredictRequest()\r\n self.request.model_spec.name = MODEL_NAME\r\n self.request.model_spec.signature_name = 'serving_default'\r\n self.request.model_spec.version.value = version\r\n # 提前初始化,第一个请求会比较耗时\r\n # self.request.inputs['input_ids'].CopyFrom(tf.contrib.util.make_tensor_proto(\r\n # np.ones(MAX_SEQ_LENGTH), shape=[1, MAX_SEQ_LENGTH], dtype=tf.int32))\r\n # self.request.inputs['input_mask'].CopyFrom(tf.contrib.util.make_tensor_proto(\r\n # np.ones(MAX_SEQ_LENGTH), shape=[1, MAX_SEQ_LENGTH], dtype=tf.int32))\r\n # self.request.inputs['label_ids'].CopyFrom(tf.contrib.util.make_tensor_proto(\r\n # np.ones(MAX_SEQ_LENGTH), shape=[1, MAX_SEQ_LENGTH], dtype=tf.int32))\r\n # self.request.inputs['segment_ids'].CopyFrom(tf.contrib.util.make_tensor_proto(\r\n # np.ones(MAX_SEQ_LENGTH), shape=[1, MAX_SEQ_LENGTH], dtype=tf.int32))\r\n\r\n def predict_grpc(self, converted_example):\r\n input_ids, input_mask, segment_ids, label_ids = converted_example\r\n self.request.inputs['input_ids'].CopyFrom(tf.contrib.util.make_tensor_proto(\r\n input_ids, shape=[1, MAX_SEQ_LENGTH], dtype=tf.int32))\r\n self.request.inputs['input_mask'].CopyFrom(tf.contrib.util.make_tensor_proto(\r\n input_mask, shape=[1, MAX_SEQ_LENGTH], dtype=tf.int32))\r\n self.request.inputs['label_ids'].CopyFrom(tf.contrib.util.make_tensor_proto(\r\n label_ids, shape=[1, MAX_SEQ_LENGTH], dtype=tf.int32))\r\n self.request.inputs['segment_ids'].CopyFrom(tf.contrib.util.make_tensor_proto(\r\n segment_ids, shape=[1, MAX_SEQ_LENGTH], dtype=tf.int32))\r\n\r\n channel = grpc.insecure_channel(self.hostport)\r\n self.stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\r\n result = self.stub.Predict(self.request)\r\n predictions = np.reshape(result.outputs['probabilities'].float_val, (1, MAX_SEQ_LENGTH, -1))\r\n cls_pred = predictions[0][0] # 分类概率\r\n cls_prob = cls_pred[np.argmax(cls_pred)] # 分类概率值\r\n return cls_prob, [np.argmax(x) for pred in predictions for x in pred]\r\n\r\n def predict(self, text):\r\n \"\"\"\r\n 预测结果 [CLS] ...\r\n :param text:\r\n :return:\r\n \"\"\"\r\n example = Example(text, ' '.join(['O'] * len(text)))\r\n converted_example = utils.convert_single_example(example, utils.label_id_map, MAX_SEQ_LENGTH, utils.tokenizer)\r\n cls_prob, predict_label_ids = self.predict_grpc(converted_example)\r\n predict_labels = [utils.id_label_map[labelid] for labelid in predict_label_ids if labelid != 0]\r\n return cls_prob, predict_labels[0], merge_lac.merge_line2(list(\" \" + text), predict_labels)\r\n","sub_path":"albert_tfserving_client/predict_grpc.py","file_name":"predict_grpc.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"557719553","text":"#! usr/bin/env python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@Author:Kaiyin Zhou\n\"\"\"\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom fennlp.datas import Sminarog\nfrom fennlp.metrics import Losess, Metric\nfrom fennlp.models import GraphSAGE\nfrom fennlp.gnn.utils import merge_batch_graph\nfrom fennlp.callbacks import EarlyStopping\n\ndim = 200\nnum_class = 52\ndrop_rate = 0.5\nepoch = 100\npenalty = 1e-4\nlr = 1e-3\n\n# R8,R52\ndata = Sminarog(data=\"R52\", data_dir=\"data\", embedding=\"glove200\")\nfeatures, adjs, edge_attrs, labels, batchs, edge2index = data.build_graph(mode=\"train\", p=3, k=5)\nfeatures1, adjs1, edge_attrs1, labels1, batchs1, _ = data.build_graph(edge2index, mode=\"test\", p=3)\n\n\nclass TextGCNDynamicWeight(tf.keras.layers.Layer):\n def __init__(self, dim, num_class, drop_rate, **kwargs):\n super(TextGCNDynamicWeight, self).__init__(**kwargs)\n\n self.model = GraphSAGE(dim, num_class, drop_rate)\n\n # def build(self, input_shape):\n # self.weight = self.add_weight(\n # shape=(len(edge2index),),\n # initializer='ones',\n # name='an',\n # )\n\n def call(self, feature, adj, edge_attr, batch, training=True):\n # edge_attr = tf.cast(edge_attr, dtype=tf.int32)\n # edge_weight = tf.gather(self.weight, edge_attr)\n\n # predict = self.model(feature, adj, batch, edge_weight, training=training)\n predict = self.model(feature, adj, batch, training=training)\n return predict\n\n def predict(self, feature, adj, edge_attr, batch, training=False):\n return self(feature, adj, edge_attr, batch, training)\n\n\naccs_all = []\nfor i in range(10):\n model = TextGCNDynamicWeight(dim, num_class, drop_rate)\n optimize = tf.optimizers.Adam(lr)\n\n cross_entropy = Losess.MaskSparseCategoricalCrossentropy()\n acc_score = Metric.SparseAccuracy()\n\n stop_monitor = EarlyStopping(monitor=\"loss\", patience=10,)\n for i in range(epoch):\n loss_train = []\n acc_train = []\n t = time.time()\n\n for feature, label, adj, edge_attr, batch in data.load(features[:-500], adjs[:-500],\n labels[:-500], edge_attrs[:-500],\n batchs[:-500], batch_size=32):\n feature, label, adj, edge_attr, batch = merge_batch_graph(feature, label, adj,\n edge_attr, batch)\n with tf.GradientTape() as tape:\n predict = model(feature, adj, edge_attr, batch, training=True)\n loss = cross_entropy(label, predict)\n loss += tf.add_n([tf.nn.l2_loss(v) for v in model.variables\n if \"bias\" not in v.name]) * penalty\n acc = acc_score(label, predict)\n loss_train.append(loss)\n acc_train.append(acc)\n\n grads = tape.gradient(loss, model.trainable_variables)\n optimize.apply_gradients(grads_and_vars=zip(grads, model.trainable_variables))\n\n loss_valid = []\n acc_valid = []\n for feature, label, adj, edge_attr, batch in data.load(features[-500:], adjs[-500:],\n labels[-500:], edge_attrs[-500:],\n batchs[-500:],\n batch_size=32):\n feature, label, adj, edge_attr, batch = merge_batch_graph(feature, label, adj,\n edge_attr, batch)\n t_predict = model.predict(feature, adj, edge_attr, batch, training=False)\n t_loss = cross_entropy(label, t_predict)\n t_acc = acc_score(label, t_predict)\n acc_valid.append(t_acc)\n loss_valid.append(t_loss.numpy())\n print(\"Valid: Epoch {} | Loss {:.4f} | Acc {:.4f} | Time {:.4f}\".format(i, np.mean(loss_valid),\n np.mean(acc_valid),\n time.time() - t))\n # if stop_monitor(np.mean(loss_valid), model):\n # break\n\n # test\n loss_test = []\n acc_test = []\n for feature, label, adj, edge_attr, batch in data.load(features1, adjs1,\n labels1, edge_attrs1,\n batchs1, batch_size=32):\n feature, label, adj, edge_attr, batch = merge_batch_graph(feature, label, adj,\n edge_attr, batch)\n t_predict = model.predict(feature, adj, edge_attr, batch, training=False)\n t_loss = cross_entropy(label, t_predict)\n t_acc = acc_score(label, t_predict)\n acc_test.append(t_acc)\n loss_test.append(t_loss.numpy())\n print(\"Test: Loss {:.4f} | Acc {:.4f}\".format(np.mean(loss_test), np.mean(acc_test)))\n accs_all.append(np.mean(acc_test))\nprint(\"ACC: {:.4f}±{:.4f}\".format(np.mean(accs_all), np.std(accs_all)))\n","sub_path":"tests/GNN/nlp/text_sage.py","file_name":"text_sage.py","file_ext":"py","file_size_in_byte":5204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"346015344","text":"# coding: utf-8\nimport uuid\n\nfrom django.contrib.auth.models import User\n\ntry:\n from django.apps import apps\n get_model = apps.get_model\nexcept ImportError:\n pass\n\nfrom django_th.services.services import ServicesMgr\nfrom django_th.tests.test_main import MainTest\n\nfrom th_rss.models import Rss\n\n\nclass ServicesMgrTestCase(MainTest):\n\n def setUp(self):\n try:\n self.user = User.objects.get(username='john')\n except User.DoesNotExist:\n self.user = User.objects.create_user(\n username='john', email='john@doe.info', password='doe')\n\n def create_rss(self):\n trigger = self.create_triggerservice()\n name = 'Foobar RSS'\n url = 'https://blog.trigger-happy.eu/feeds/all.rss.xml'\n status = True\n return Rss.objects.create(uuid=uuid.uuid4(),\n url=url,\n name=name,\n trigger=trigger,\n status=status)\n\n def test_set_title(self):\n data = {'title': 'foobar'}\n self.assertTrue('title' in data)\n data = {'link': 'http://localhost/url/to/news'}\n self.assertTrue('title' not in data)\n self.assertTrue('link' in data)\n\n def test_set_content(self):\n data = {'summary_detail': 'some summary'}\n self.assertTrue('summary_detail' in data)\n data = {'description': 'foobar'}\n self.assertTrue('description' in data)\n self.assertTrue('summary_detail' not in data)\n\n def test_save_data(self):\n data = {'title': 'a title', 'summary_detail': 'a content'}\n s = ServicesMgr('')\n title = s.set_title(data)\n content = s.set_content(data)\n self.assertTrue(title)\n self.assertTrue(content)\n","sub_path":"django_th/tests/test_services.py","file_name":"test_services.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"236761376","text":"#!/usr/bin/python\n\n\nif __name__ == \"__main__\":\n print ('It is about time I started programming.')\n\ndef getRouter(rtr):\n router1 = {'os_version':'3.1.1', 'hostname':'nyc_router1', 'model':'nexus 9396', 'domain':'cisco.com', 'mgmt_ip':'10.1.50.11'}\n router2 = dict( os_version='3.2.1', hostname='rtp_router2', model='nexus 9396', domain='cisco.com', mgmt_ip='10.1.50.12' )\n router3 = { 'os_version':'3.1.1',\n 'hostname':'ROUTER3',\n 'model':'nexus 9396',\n 'domain':'lab.cisco.com',\n 'mgmt_ip':'10.1.50.13' }\n \n router_list = [router1,router2,router3]\n \n if rtr in router_list:\n return router1\n return 'No router found.'","sub_path":"ACI_Course_wk1/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"388623897","text":"\nimport nineml.abstraction_layer as al\n\n\ndef get_component():\n inter_event_regime = al.Regime(\n name=\"intereventregime\",\n time_derivatives=[\"dA/dt = -A/taur\", \"dB/dt = -B/taud\"],\n transitions=[al.On('spikeinput',\n do=[\"A = A + weight*factor\",\n \"B = B + weight*factor\"])]\n )\n\n dynamicsblock = al.DynamicsBlock(\n aliases=[\n \"taupeak := taur*taud/(taud - taur)*log(taud/taur)\",\n \"factor := 1/(exp(-taupeak/taud) - exp(-taupeak/taur))\",\n \"gB := 1/(1 + mgconc*exp(-1*gamma*V)/beta)\",\n \"g := gB*gmax*(B-A)\",\n \"I := g * df\",\n \"df := (E-V)\",\n ],\n state_variables=[al.StateVariable(o) for o in ('A', 'B')],\n regimes=[inter_event_regime],\n )\n\n nmda = al.ComponentClass(name=\"NMDAPSR\",\n dynamicsblock=dynamicsblock,\n analog_ports=[al.RecvPort(\"V\"), al.SendPort(\"I\"), ],\n event_ports=[al.RecvEventPort('spikeinput')],\n parameters=[\n 'taur', 'taud', 'gmax', 'mgconc', 'gamma', 'beta', 'E', 'weight']\n )\n\n return nmda\n","sub_path":"lib9ml/python/nineml/examples/AL/sample_components/nmda.py","file_name":"nmda.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"296785601","text":"import math\r\nimport random\r\nimport re\r\n\r\nimport requests\r\n\r\n\r\ndef get_proxy():\r\n return requests.get(\"http://127.0.0.1:5010/get/\").content\r\n\r\ndef delete_proxy(proxy):\r\n requests.get(\"http://127.0.0.1:5010/delete/?proxy={}\".format(proxy))\r\n\r\ndef changeTime(allTime):\r\n day = 24 * 60 * 60\r\n hour = 60 * 60\r\n min = 60\r\n if allTime < 60:\r\n return \"%d sec\" % math.ceil(allTime)\r\n elif allTime > day:\r\n days = divmod(allTime, day)\r\n return \"%d days, %s\" % (int(days[0]), changeTime(days[1]))\r\n elif allTime > hour:\r\n hours = divmod(allTime, hour)\r\n return '%d hours, %s' % (int(hours[0]), changeTime(hours[1]))\r\n else:\r\n mins = divmod(allTime, min)\r\n return \"%d mins, %d sec\" % (int(mins[0]), math.ceil(mins[1]))\r\n\r\ndef str_replace_new(str):\r\n reg = r'[促销|wish|ebay|速卖通|敦煌|亚马逊|跨境|专供||欧美|夜店|性感|2017|2018|货源|爆款|不支持退货| !|洲站|特价商品,不支持退换!介意勿拍!|+|,| ]'\r\n pattern = re.compile(reg, re.IGNORECASE)\r\n out = re.sub(pattern, '', str)\r\n return out\r\n\r\ndef str_replace_en(str):\r\n reg = r'[+|\\/|]'\r\n pattern = re.compile(reg, re.IGNORECASE)\r\n out = re.sub(pattern, '', str)\r\n return out\r\n\r\ndef keywords_woman_dress():\r\n arr = [\r\n 'dresses',\r\n 'maxi dresses',\r\n 'formal dresses',\r\n 'prom dresses',\r\n 'evening dresses',\r\n 'summer dresses',\r\n 'evening gowns',\r\n 'white dress',\r\n 'dresses online',\r\n 'long dresses',\r\n 'gown',\r\n 'cocktail dresses',\r\n 'womens clothes',\r\n 'homecoming dresses',\r\n 'long sleeve dress',\r\n 'party dresses',\r\n 'casual dresses',\r\n 'floral dresses',\r\n 'ladies dress',\r\n 'white maxi dress',\r\n 'black dress',\r\n 'red dress',\r\n 'little black dress',\r\n \"women dress\",\r\n \"women dresses size 14\",\r\n \"women dresses size 12\",\r\n \"women dress free shipping\",\r\n \"women dress plus size\",\r\n \"women dress a-line\",\r\n \"women a line dress\",\r\n \"women dress bodycon\",\r\n \"women dress boho\",\r\n \"women dress cocktail party formal evening dress\",\r\n \"women dress casual\",\r\n \"women dress cocktail\",\r\n \"women dress draped lace\",\r\n \"dress women\",\r\n \"women summer dress\",\r\n \"dress women party\",\r\n \"long dress women\",\r\n \"women dresses\",\r\n \"women dresses size 16\",\r\n \"women dresses plus size\",\r\n \"women dress formal\",\r\n \"women dress for party\",\r\n \"women dress green\",\r\n \"women dress gold\",\r\n \"women dressing gown\",\r\n \"women jumpsuit dress\",\r\n \"women dress large\",\r\n \"women dress long sleeve\",\r\n \"women dress long\",\r\n \"women dress lace\",\r\n \"women dress size m\",\r\n \"new fashion casual dress for women\",\r\n \"women dress off shoulder\",\r\n \"women dress office\",\r\n \"women dress oversize\",\r\n \"elegant women office dress\",\r\n \"women dress party\",\r\n \"women dress red\",\r\n \"women dress rockabilly\",\r\n \"women dress size 8\",\r\n \"women dress summer\",\r\n \"women dress size 6\",\r\n \"women dress size 10\",\r\n \"women dress tops\",\r\n \"women dress the party\",\r\n \"women fashion long t shirt dress\",\r\n \"women print t shirt dress\",\r\n \"women long sleeve t shirt dress\",\r\n \"women swimwear bikini cover up beach dress\",\r\n \"women dress vintage\",\r\n \"women dress v neck\",\r\n \"women v neck dress\",\r\n \"yellow v neck dresses for women\",\r\n \"women vintage dress v neck\",\r\n \"women v neck mini dress\",\r\n \"women dress xl\",\r\n \"women dress xs\",\r\n \"women dress xxl\",\r\n \"women dress xxs\",\r\n \"women dress xxxl\",\r\n \"women dressy dress\",\r\n \"women dress 16\",\r\n \"women dress 14w\",\r\n \"women dress 14\",\r\n \"women dress 10\",\r\n \"women dress 12\",\r\n \"women dress 2018\",\r\n \"women's dress suits australia\",\r\n \"women's dress suits\",\r\n \"best women's dress suits\",\r\n \"women's suits dress barn\",\r\n \"women's business dress suits\",\r\n \"women's dress suits\",\r\n \"women's dress suits canada\",\r\n \"women's dress and coat suits\",\r\n \"women's corporate dress suits\",\r\n \"cream colored women's dress suits\",\r\n \"classic women's dress suits\",\r\n \"dress code women's suits\",\r\n \"women's elegant dress suits\",\r\n \"women's evening dress suits\",\r\n \"women's dress suits for weddings\",\r\n \"women's dress suits for work\",\r\n \"women's dress pant suits for weddings\",\r\n \"women's formal dress suits\",\r\n \"women's fall dress suits\",\r\n \"women's gray dress suits\",\r\n \"women's dress and jacket suits\",\r\n \"women's dress knit suits\",\r\n \"kasper women's dress suits\",\r\n \"women's lavender dress suits\",\r\n \"women's linen dress suits\",\r\n \"women's long dress suits\",\r\n \"women's lace dress suits\",\r\n \"women's dress leather suits\",\r\n \"neiman marcus women's dress suits\",\r\n \"navy women's dress suits\",\r\n \"women's dress suits plus size\",\r\n \"women's dress suits petite\",\r\n \"women's dress pant suits\",\r\n \"women's two piece dress suits\",\r\n \"women's plus dress suits\",\r\n \"women's professional dress suits\",\r\n \"women's pink dress suits\",\r\n \"women's dress suits sale\",\r\n \"women's plus size dress suits\",\r\n \"women's dress suits with skirts\",\r\n \"women's spring dress suits\",\r\n \"women's summer dress suits\",\r\n \"women's sheath dress suits\",\r\n \"women's silk dress suits\",\r\n \"women's pant suits dress suits\",\r\n \"sears women's dress suits\",\r\n \"women's tuxedo dress suits\",\r\n \"women's tailored dress suits\",\r\n \"tahari women's dress suits\",\r\n \"women's dress suits under $50\",\r\n \"women's dress suits uk\",\r\n \"women's dress suits work\",\r\n \"www.women dress suits\",\r\n \"womens dress suits australia\",\r\n \"womens dress suits for weddings australia\",\r\n \"dress barn womens bathing suits\",\r\n \"dress barn womens pant suits\",\r\n \"womens business dress suits\",\r\n \"womens dress suits canada\",\r\n \"women dress suits\",\r\n \"classic womens dress suits\",\r\n \"lounge suits dress code women's\",\r\n \"womens evening dress suits\",\r\n \"womens dress suits for weddings\",\r\n \"womens dress suits for weddings uk\",\r\n \"womens dress pants suits for weddings\",\r\n \"womens dress suits for work\",\r\n \"womens formal dress suits\",\r\n \"womens gray dress suits\",\r\n \"women's dress and jacket work suits\",\r\n \"womens dress knit suits\",\r\n \"womens dress suits with long jackets\",\r\n \"womens lace dress suits\",\r\n \"womens dress leather suits\",\r\n \"navy blue womens dress suits\",\r\n \"womens dress suits plus size\",\r\n \"plus size womens dress pant suits\",\r\n \"petite womens dress suits\",\r\n \"womens white dress pant suits\",\r\n \"women's pant suits dress suits jacket\",\r\n \"women's petite dress pant suits\",\r\n \"women's formal dress pant suits\",\r\n \"womens dress pant suits\",\r\n \"cheap womens dress pant suits\",\r\n \"macys womens dress pant suits\",\r\n \"womens dress pant suits for weddings\",\r\n \"womens two piece dress suits\",\r\n \"womens plus dress suits\",\r\n \"womens plus size dress pant suits\",\r\n \"plus size womens dress suits\",\r\n \"professional women's dress suits\",\r\n \"womens pink dress suits\",\r\n \"womens dress suits for sale\",\r\n \"wholesale womens dress suits\",\r\n \"womens dress suits on sale\",\r\n \"womens spring dress suits\",\r\n \"dressing gowns\"\r\n ]\r\n return random.sample(arr,5)","sub_path":"ali1688/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":8057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"362578852","text":"from beat_the_room import Puzzle\nimport RPi.GPIO as GPIO\nimport time\n\nclass KameraRaetsel(Puzzle):\n def init(self):\n print('init')\n self.id = 1\n \n\n GPIO.setmode(GPIO.BOARD)\n\n self.pin_to_circuit = 7\n GPIO.setup(pin_to_circuit, GPIO.IN)\n\n\n#währenddessen soll livecam mitlaufen\n\n def interact(self): \n while self.rc_time>10: \n pass \n self.solved=true\n \n def rc_time(self):\n count = 0\n\n while (GPIO.input(self.pin_to_circuit) == GPIO.LOW):\n time.sleep(0.1)\n count += 1\n\n return count\n\n def deinit(self):\n GPIO.cleanup()\n #Port speziefizieren???\n ","sub_path":"KameraRaetsel.py","file_name":"KameraRaetsel.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"361221640","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mlxtend.plotting import plot_decision_regions\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier, plot_tree\n\ndf = pd.read_csv('dankmemes_task1_train.csv')\nX, y = df[['Engagement', 'Manipulation']].values, df[['Meme']].values\nX_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=0.2,\n stratify=y)\ndtc = DecisionTreeClassifier(criterion='gini',\n max_depth=4)\ndtc.fit(X_train, y_train)\n\nX_combined = np.vstack((X_train, X_test))\ny_combined = np.vstack((y_train, y_test))\n\nplot_decision_regions(X_combined, y_combined.reshape(1600,),\n clf=dtc)\nplt.legend(loc='best')\nplt.tight_layout()\nplt.show()\nplot_tree(dtc)\nplt.show()\n","sub_path":"preliminary/decisiontreeclassifiication.py","file_name":"decisiontreeclassifiication.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"317629299","text":"from django.urls import path, re_path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n re_path(r'^order/(?P.*)/(?P.*)/(?P.*)/$', views.order, name=\"order\"),\n path(\"customer_order\", views.customer_order, name=\"customer_order\"),\n path(\"confirm_order\", views.confirm_order, name=\"confirm_order\"),\n path(\"staff\", views.staff, name=\"staff\"), \n path(\"/approve_order\", views.approve_order, name=\"approve_order\")\n]\n","sub_path":"orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"413899038","text":"class redlight:\n\n\tdef __init__(self):\n\t\t#light config\n\t\tself.n='r'\n\t\tself.e='r'\n\t\tself.s='r'\n\t\tself.w='r'\n\t\tself.turn=-1\n\t\t#cars passing\n\t\tself.cpn=0\n\t\tself.cpe=0\n\t\tself.cps=0\n\t\tself.cpw=0\n\t\t#misc data elements\n\t\tself.edelta=0\n\t\tself.yellowtime=5\n\t\tself.defaulttime={'ns':90,'ew':90}\n\t\tself.record=[[],[],[],[]]\n\t\tself.count=0\n\t\tself.prev=[]\n\n\t#-------functions for light light configuration-------------------------------------------------------\n\tdef setlight(self,turn,yel=False):\n\n\t\tself.n='r'\n\t\tself.e='r'\n\t\tself.s='r'\n\t\tself.w='r'\n\t\tif not yel:\n\t\t\tif turn=='n' or turn=='s':\n\t\t\t\tself.n='G'\n\t\t\t\tself.s='G'\n\t\t\telif turn=='e' or turn=='w':\n\t\t\t\tself.e='G'\n\t\t\t\tself.w='G'\n\t\t\telse:\n\t\t\t\treturn -10\n\n\t\telif yel: \n\t\t\tif turn=='n' or turn=='s':\n\t\t\t\tself.n='y'\n\t\t\t\tself.s='y'\n\t\t\telif turn=='e' or turn=='w':\n\t\t\t\tself.e='y'\n\t\t\t\tself.w='y' \n\t\t\telse :\n\t\t\t\treturn -1\n\t\tself.turn=turn\n\t#to return the final config of lights-------------------------------------------------------------------\n\n\tdef getlight(self):\n\n\t\treturn str(self.n+self.e+self.s+self.w)\n\n\t#to set the values of car passed--------------------------------------------------------------------------\n\tdef setcp(self):\n\t\tvalue=sum(self.record[0])\n\t\tself.cpn=value\n\t\tvalue=sum(self.record[1])\n\t\tself.cpe=value\n\t\tvalue=sum(self.record[2])\n\t\tself.cps=value\n\t\tvalue=sum(self.record[3])\n\t\tself.cpw=value\n\n\t#getter--------------------------------------------------------------------------------------------------\n\n\tdef talk(self,rec):\n\t\tif self.count<(self.defaulttime['ns']+self.defaulttime['ew']):\n\t\t\tif self.count<(self.defaulttime['ns']-self.edelta):\n\t\t\t\tself.setlight('n')\n\t\t\telse:\n\t\t\t\tself.setlight('e')\n\t\t\tself.record[0].append(rec[0])\n\t\t\tself.record[1].append(rec[1])\n\t\t\tself.record[2].append(rec[2])\n\t\t\tself.record[3].append(rec[3])\n\t\t\tself.count+=1\n\n\t\telse:\n\n\t\t\tself.setcp()\n\t\t\tself.calcdelta()\n\t\t\t#------------------reset everything--------------------------------------------------------------\n\t\t\tself.record=[[],[],[],[]]\n\t\t\tself.count=0\n\t\t\tself.setlight('n')\n\n\n\n\n\t#calculate the delta-------------------------------------------------------------------------------------\n\n\tdef calcdelta(self):\n\t\tscore1=(self.cpn+self.cps)\n\t\tscore2=(self.cpe+self.cpw)\n\t\tself.delta=score1-score2\n\t\tself.delta=self.delta*1.25\n\t\tif score1==0:\n\t\t\tself.delta=70\n\t\telif score2==0:\n\t\t\tself.delta=-70\n\t\tprint(score1,score2)\n\t\tprint(self.delta,'the delta')\n\t\tself.calc_edelta()\n\n\n\t#calculate the edelta------------------------------------------------------------------------------------\n\n\tdef calc_edelta(self):\n\t\tsum1=0\n\t\tcount = 0\n\t\tself.prev.append(self.delta)\n\t\t#print(len(self.prev))\n\t\tif len(self.prev)<5:\n\t\t\tif len(self.prev)<3:\n\t\t\t\te=0\n\t\t\telif len(self.prev)==3:\n\t\t\t\te=((25*self.prev[0])+(25*self.prev[1])+(50*self.prev[2]))\n\t\t\telif len(self.prev)==4:\n\t\t\t\te=(5*self.prev[0]+25*self.prev[1]+25*self.prev[2]+45*self.prev[3])//100\n\t\telse:\n\t\t\t#self.cong()\n\t\t\tw=[5,5,25,25,40]\n\t\t\tself.prev.pop(0)\n\t\t\tfor x in self.prev: \n\t\t\t\tsum1=sum1+x*w[count]\n\t\t\t\tcount+=1\n\t\t\te=sum1//100\n\t\tif e>self.defaulttime['ns']//2:\n\t\t\te=self.defaulttime['ns']//2\n\t\telif e<-self.defaulttime['ns']//2:\n\t\t\te=self.defaulttime['ns']//2\n\n\t\tself.edelta=e\n\n\t\t#print(e,'edelta')","sub_path":"done.py","file_name":"done.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"169157997","text":"# pngdiscover.py\n\"\"\" This module defines functions that discover all the .png files\n under the sepcific parent directory.\n\"\"\"\nimport os\nimport logging\n\n\noutput_list = []\n\n# set logging configuration\nLOG_FORMAT = \"%(levelname)s %(filename)s %(message)s\"\nlogging.basicConfig(format=LOG_FORMAT, level=logging.INFO)\nLOGGER = logging.getLogger(__name__)\n\n\ndef discover_file(directory):\n for cur_path, cur_dir, files in os.walk(directory):\n LOGGER.debug(f'cur_subdir:{cur_dir}, cur_files:{files}')\n for file in files:\n if file.endswith('.png'):\n if cur_path not in output_list:\n output_list.append(cur_path)\n files_list = []\n output_list.append(files_list)\n files_list.append(file)\n LOGGER.debug(f'adding file:{file} to dir:{cur_path}')\n\n if cur_dir:\n for the_dir in cur_dir:\n # recursively calling discover_file() to find .png files under\n # sub-directory.\n LOGGER.debug('\\n----------------')\n LOGGER.debug(f'-- Start to dig in dir:{the_dir} -- ')\n new_dir = os.path.join(cur_path, the_dir)\n discover_file(new_dir)\n LOGGER.debug(f'-- End of dig in dir:{the_dir} --')\n # if the current directory doesn't have any sub-directory\n # then terminate the recursion.\n else:\n LOGGER.debug(f'Return from dir:{directory} due to no sub-dir')\n break\n","sub_path":"students/zhen_yang/lesson09/assignment/pngdiscover.py","file_name":"pngdiscover.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"58421139","text":"# tools.py\n\nimport csv\nimport codecs\nimport operator\nfrom itertools import permutations, groupby, starmap\n\nfrom ._compat import StringIO\n\n__all__ = ['maximal', 'lazyproperty', 'unicode_csv_reader', 'UnicodeWriter']\n\n\ndef maximal(iterable, comparison=operator.lt, _groupkey=operator.itemgetter(0)):\n \"\"\"Yield the unique maximal elements from iterable using comparison.\n\n >>> list(maximal([1, 2, 3, 3]))\n [3]\n\n >>> list(maximal([1]))\n [1]\n \"\"\"\n iterable = set(iterable)\n if len(iterable) < 2:\n return iterable\n return (item for item, pairs\n in groupby(permutations(iterable, 2), key=_groupkey)\n if not any(starmap(comparison, pairs)))\n\n\nclass lazyproperty(object):\n \"\"\"Non-data descriptor caching the computed result as instance attribute.\n\n >>> class Spam(object):\n ... @lazyproperty\n ... def eggs(self):\n ... return 'spamspamspam'\n\n >>> spam=Spam(); spam.eggs\n 'spamspamspam'\n\n >>> spam.eggs='eggseggseggs'; spam.eggs\n 'eggseggseggs'\n\n >>> Spam().eggs\n 'spamspamspam'\n\n >>> Spam.eggs # doctest: +ELLIPSIS\n <...lazyproperty object at 0x...>\n \"\"\"\n\n def __init__(self, fget):\n self.fget = fget\n for attr in ('__module__', '__name__', '__doc__'):\n setattr(self, attr, getattr(fget, attr))\n\n def __get__(self, instance, owner):\n if instance is None:\n return self\n result = instance.__dict__[self.__name__] = self.fget(instance)\n return result\n\n\n# from stdlib recipe\n\n\ndef unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):\n csv_reader = csv.reader(utf_8_encoder(unicode_csv_data), dialect=dialect, **kwargs)\n for row in csv_reader:\n yield [unicode(cell, 'utf-8') for cell in row]\n\n\ndef utf_8_encoder(unicode_csv_data):\n for line in unicode_csv_data:\n yield line.encode('utf-8')\n\n\nclass UnicodeWriter(object):\n\n def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwargs):\n self.queue = StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwargs)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n\n def writerow(self, row):\n self.writer.writerow([s.encode('utf-8') for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode('utf-8')\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n","sub_path":"concepts/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"23350732","text":"# pylint: disable=unused-argument\nfrom __future__ import print_function\n\nfrom api.evolution_result import EvolutionResult\nfrom .player import Player\nfrom .inventory_parser import InventoryParser\nfrom .worldmap import WorldMap, Gym, PokeStop\nfrom .encounter import Encounter\nfrom .item import Incubator\nfrom .exceptions import AccountBannedException\n\nimport logging\n\nclass StateManager(object):\n def __init__(self):\n\n # Transforms response data from the server to objects.\n # Use self._noop if there is no response data.\n self.response_map = {\n \"CHECK_CHALLENGE\": self._verify_challenge,\n \"GET_PLAYER\": self._parse_player,\n \"GET_INVENTORY\": self._parse_inventory,\n \"GET_MAP_OBJECTS\": self._parse_map,\n \"ENCOUNTER\": self._parse_encounter,\n \"DISK_ENCOUNTER\": self._parse_disk_encounter,\n \"RELEASE_POKEMON\": self._noop,\n \"CATCH_POKEMON\": self._parse_catch_pokemon,\n \"PLAYER_UPDATE\": self._noop,\n \"CHECK_AWARDED_BADGES\": self._identity,\n \"FORT_DETAILS\": self._parse_fort,\n \"FORT_SEARCH\": self._identity,\n \"RECYCLE_INVENTORY_ITEM\": self._noop,\n \"USE_ITEM_EGG_INCUBATOR\": self._parse_use_incubator,\n \"GET_HATCHED_EGGS\": self._parse_get_hatched_eggs,\n \"EVOLVE_POKEMON\": self._parse_evolution,\n \"DOWNLOAD_SETTINGS\": self._dl_settings,\n \"DOWNLOAD_ITEM_TEMPLATES\": self._identity,\n \"DOWNLOAD_REMOTE_CONFIG_VERSION\": self._identity,\n \"GET_ASSET_DIGEST\": self._identity,\n \"SET_FAVORITE_POKEMON\": self._identity,\n \"LEVEL_UP_REWARDS\": self._identity\n }\n\n self.current_state = {}\n\n self.staleness = {}\n\n\n def _noop(self, *args, **kwargs):\n pass\n\n # Update a state object and mark it as valid.\n def _update_state(self, data):\n for key in data:\n value = data.get(key, None)\n if value is None:\n continue\n self.current_state[key] = data[key]\n self.staleness[key] = False\n\n def get_state(self):\n return self.current_state\n\n # Get only the following state objects from the current state.\n def get_state_filtered(self, keys):\n return_object = {}\n for key in keys:\n return_object[key] = self.current_state.get(key, None)\n return self.current_state\n\n # Transform the returned data from the server into data objects and\n # then update the current state.\n def update_with_response(self, key, response):\n if key not in self.response_map:\n print(response)\n print(\"Unimplemented response \" + key)\n logging.error(\"Unimplemented response \" + key)\n self.response_map[key](key, response)\n\n def _verify_challenge(self, key, response):\n if response[\"challenge_url\"] != \" \":\n logging.error(\"Challenge URL: %s\", response[\"challenge_url\"])\n print(\"ERROR Challenge:\" + response[\"challenge_url\"])\n raise AccountBannedException()\n\n def _parse_player(self, key, response):\n current_player = self.current_state.get(\"player\", None)\n if current_player is None:\n current_player = Player()\n current_player.update_get_player(response)\n self._update_state({\"player\": current_player})\n\n def _parse_inventory(self, key, response):\n full_inventory = self.current_state.get(\"full_inventory\", InventoryParser())\n full_inventory.update(response)\n\n logging.debug(response)\n\n new_state = {\n \"full_inventory\": full_inventory,\n \"inventory\": full_inventory.items,\n \"pokedex\": full_inventory.pokedex_entries,\n \"candy\": full_inventory.candy,\n \"pokemon\": full_inventory.pokemon,\n \"eggs\": full_inventory.eggs,\n \"egg_incubators\": full_inventory.egg_incubators,\n \"inventory_timestamp\": full_inventory.last_updated\n }\n\n current_player = self.current_state.get(\"player\", None)\n if current_player is None:\n current_player = Player()\n current_player.update_get_inventory_stats(response)\n new_state[\"player\"] = current_player\n\n self._update_state(new_state)\n\n def _parse_map(self, key, response):\n # TODO: Figure out how I want to do WorldMap. Lazy loading might be a better idea\n \"\"\"\n current_map = self.current_state.get(\"worldmap\", None)\n if current_map is None:\n current_map = WorldMap()\n current_map.update_map_objects(response)\n \"\"\"\n current_map = WorldMap()\n current_map.update_map_objects(response)\n\n self._update_state({\"worldmap\": current_map})\n\n def _parse_encounter(self, key, response):\n current_encounter = Encounter()\n current_encounter.update_encounter(response)\n self._update_state({\"encounter\": current_encounter})\n\n def _parse_disk_encounter(self, key, response):\n current_encounter = Encounter()\n current_encounter.update_disk_encounter(response)\n self._update_state({\"disk_encounter\": current_encounter})\n\n def _parse_catch_pokemon(self, key, response):\n current_encounter = self.current_state.get(\"encounter\", None)\n if current_encounter is None:\n current_encounter = Encounter()\n current_encounter.update_catch_pokemon(response)\n self._update_state({\"encounter\": current_encounter})\n\n def _parse_fort(self, key, response):\n fort_type = response.get(\"type\", 2)\n if fort_type == 2:\n self._update_state({\"fort\": Gym(response)})\n else:\n self._update_state({\"fort\": PokeStop(response)})\n\n def _parse_get_hatched_eggs(self, key, response):\n if response.get(\"success\", False):\n current_player = self.current_state.get(\"player\", None)\n if current_player is None:\n current_player = Player()\n\n current_player.update_hatched_eggs(response)\n self._update_state({\"player\": current_player})\n\n if len(response.get(\"pokemon_id\", [])) > 0:\n self.mark_returned_stale(\"GET_INVENTORY\")\n\n def _parse_use_incubator(self, key, response):\n if response.get(\"result\", 0) == 1:\n\n current_egg_incubators = self.current_state.get(\"egg_incubators\", [])\n new_egg_incubators = []\n\n for curr_incu in current_egg_incubators:\n if curr_incu.unique_id == response[\"egg_incubator\"].get(\"id\"):\n new_egg_incubators.append(Incubator(response[\"egg_incubator\"]))\n else:\n new_egg_incubators.append(curr_incu)\n\n self._update_state({\"egg_incubators\": new_egg_incubators})\n\n def _parse_evolution(self, key, response):\n self._update_state({\"evolution\": EvolutionResult(response)})\n\n def _identity(self, key, response):\n self._update_state({key: response})\n\n def _dl_settings(self, key, response):\n self._update_state({\"download_settings\": response})\n","sub_path":"api/state_manager.py","file_name":"state_manager.py","file_ext":"py","file_size_in_byte":7143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"499906783","text":"#!/usr/bin/env python3\n\nprint(\"\\nЗадача 1\\n\")\n\n###############################################################################\n# Задача-1:\n# Дан список фруктов.\n# Напишите программу, выводящую фрукты в виде нумерованного списка,\n# выровненного по правой стороне.\n###############################################################################\n\n# Пример:\n# Дано: [\"яблоко\", \"банан\", \"киви\", \"арбуз\"]\n# Вывод:\n# 1. яблоко\n# 2. банан\n# 3. киви\n# 4. арбуз\n\n# Подсказка: воспользоваться методом .format()\n\nfruits = [\"яблоко\", \"банан\", \"киви\", \"арбуз\"]\n\n# Вычисляем максимальную длину элемента, для использования в качестве отступа\n\nmax_len = 0\nfor fruit in fruits:\n if len(fruit) > max_len:\n max_len = len(fruit)\n\n# Спецификация строки форматирования:\n# https://docs.python.org/3/library/string.html#formatstrings\n# \"{\" [field_name] [\":\" format_spec] \"}\"\n# format_spec ::= [align][width]\n# Итого, для выравнивания по правому краю { :>max_len }\n\n# Вариант через while + list[index]\n# index = 0\n# while index < len(fruits):\n# print(\"{0}. {1:>{2}}\".format(index+1, fruits[index], max_len))\n# index += 1\n\nindex = 1\nfor fruit in fruits:\n print(\"{0}. {1:>{2}}\".format(index, fruit, max_len))\n index += 1\n\n# Адаптивная ZeroPadding нумерация\n# index = 0\n# while index < len(fruits):\n# print(\"{0:0{3}}. {1:>{2}}\".format(index+1, fruits[index], max_len, len(\n# str(len(fruits)))))\n# index += 1\n\nprint(\"\\nЗадача 2\\n\")\n\n###############################################################################\n# Задача-2:\n# Даны два произвольные списка.\n# Удалите из первого списка элементы, присутствующие во втором списке.\n###############################################################################\n\nfruits = [\"яблоко\", \"яблоко\", \"банан\", \"киви\", \"арбуз\", \"мандарин\", \"яблоко\"]\nbad_fruits = [\"яблоко\", \"ананас\", \"апельсин\", \"арбуз\"]\n\nprint(\"Список фруктов:\", fruits, sep='\\t\\t\\t')\nprint(\"Список санкционных фруктов:\", bad_fruits, sep='\\t')\n\n# Быстрое решение. Теряем порядок в списке, но задание выполняем\nfruits = list(set(fruits) - set(bad_fruits))\n\n# Второй вариант, с сохранением порядка\n# index = 0\n# while index < len(fruits):\n# if fruits[index] in bad_fruits:\n# fruits.remove(fruits[index])\n# continue\n# index += 1\n\nprint()\nprint(\"Итоговый список:\", fruits, sep='\\t\\t', end='\\n\\n')\n\nprint(\"Задача 3\", end='\\n\\n')\n\n###############################################################################\n# Задача-3:\n# Дан произвольный список из целых чисел.\n# Получите НОВЫЙ список из элементов исходного, выполнив следующие условия:\n# если элемент кратен двум, то разделить его на 4, если не кратен,\n# то умножить на два.\n###############################################################################\n\nnumbers = [1, 2, 3, 4, 5, 6, 10, 12, 13, 15, 5, 21, 1, 0]\n\nprint(\"Исходный список:\", numbers, sep='\\t')\n\nnew_numbers = []\nfor number in numbers:\n # Когда число кратно 2, получаем 0. А 0 считается за False\n # Учитываем этот момент\n if number % 2:\n new_numbers.append(number*2)\n else:\n new_numbers.append(number/4)\n\n# Next-level вариант с использованием (ifTrue, ifFalse)[bool]\n# new_numbers = list(map(lambda x: (x / 4, x * 2)[x % 2], numbers))\n\nprint(\"Результирующий список:\", new_numbers, sep='\\t')\n","sub_path":"lesson02/home_work/hw02_easy.py","file_name":"hw02_easy.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"457654086","text":"from django.shortcuts import render\n\nimport simplejson as json\n\nfrom portal.models import *\nfrom portal.forms import GetUsageForm\n\ndef usage_request_view(request):\n\n\tform = GetUsageForm()\n\n\t#dic containers\n\tlaptopArr = []\n\troomArr = []\n\tprojectorArr = []\n\n\tif request.method == 'POST':\n\t\tform = GetUsageForm(request.POST)\n\n\t\tif form.is_valid():\n\t\t\tcategory = form.cleaned_data['category']\n\n\t\t\t# shows the resource and the employee\n\t\t\tif category == 'Laptop':\n\t\t\t\tfor laptop in Laptop.objects.filter(is_checked_out=True):\n\t\t\t\t\temployee = laptop.employee.username\n\t\t\t\t\tresource_name = laptop.name\n\t\t\t\t\tusage_dict = {\n\t\t\t\t\t\t'employee' : employee,\n\t\t\t\t\t\t'resname' : resource_name,\n\t\t\t\t\t\t}\n\n\t\t\t\tlaptopArr.append(usage_dict)\n\n\t\t\telif category == 'Projector':\n\t\t\t\tprojectorArr = []\n\n\t\t\telif category == 'Room':\n\t\t\t\troomArr = []\n\n\t\t\telif category == 'All':\n\t\t\t\troomArr = []\n\t\t\t\tprojectorArr = []\n\t\t\t\tlaptopArr = []\n\n\t\t\treturn render(request, 'usage/usage_results.html', {'laptopArr': laptopArr, 'projectorArr': projectorArr, 'roomArr': roomArr})\n\t\telse:\n\t\t\tform = GetResourceForm()\n\n\treturn render(request, \"usage/usage_request.html\", {'form': form})","sub_path":"Schedulize/portal/views/usage_view.py","file_name":"usage_view.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"416936443","text":"with open(\"resources/p022_names.txt\") as names:\n name_list = sorted(names.read().replace('\"', \"\").split(\",\"))\n\nalphabet_values = {char: index + 1 for index, char in enumerate(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\")}\n\ntotal_score = 0\n\nfor index, name in enumerate(name_list):\n total_score += sum(alphabet_values[char] for char in name) * (index + 1)\n\nprint(total_score)\n","sub_path":"Problems 001 - 050/Problem 022.py","file_name":"Problem 022.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"186741228","text":"import math\nvelocidade = float(input())\nangulo = float(input())\n\nd = ((velocidade**2)*math.sin(2*math.radians(angulo)))/9.8\n\nif d<2:\n print('Muito perto')\nelif d>4:\n print('Muito longe')\nelse:\n print(\"Acertou!\")","sub_path":"backup/user_210/ch25_2020_03_02_16_23_51_965153.py","file_name":"ch25_2020_03_02_16_23_51_965153.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"219856246","text":"from django.test import TestCase\nfrom django.core.urlresolvers import reverse\nfrom hello.models import Requests\nfrom django.contrib.auth.models import User, AnonymousUser\n\n\nclass MiddlewareTest(TestCase):\n\n def setUp(self):\n self.user = User.objects.create_user(username='Artem',\n password='okguy')\n\n def test_requests(self):\n \"\"\"middleware saves requests in db after request\"\"\"\n self.assertEquals(Requests.objects.count(), 0)\n\n self.client.get(reverse('bio_detail'))\n\n self.assertEquals(Requests.objects.count(), 1)\n\n def test_requests_registered_user(self):\n \"\"\"middleware saves logged user after request\"\"\"\n self.client.login(username='Artem', password='okguy')\n self.client.get(reverse('bio_detail'))\n\n logged_user_name = self.user.username\n\n request_object_1 = Requests.objects.get(pk=1)\n\n self.assertEquals(request_object_1.user, logged_user_name)\n\n def test_requests_anonymus_user(self):\n \"\"\"middleware saves unlogged user after request\"\"\"\n self.client.get(reverse('bio_detail'))\n\n anonymous_user = AnonymousUser().__str__()\n\n request_object_1 = Requests.objects.get(pk=1)\n\n self.assertEquals(anonymous_user, request_object_1.user)\n\n def test_request_method(self):\n \"\"\"middleware saves method after request\"\"\"\n requests_method_get = self.client\\\n .get(reverse('bio_detail')).request['REQUEST_METHOD']\n requests_instances_method_get = Requests.objects.get(pk=1).method\n\n self.assertEquals(requests_method_get, requests_instances_method_get)\n\n requests_method_post = self.client\\\n .post(reverse('bio_detail')).request['REQUEST_METHOD']\n\n requests_instances_method_post = Requests.objects.get(pk=2).method\n\n self.assertEquals(requests_method_post, requests_instances_method_post)\n\n def test_requests_url(self):\n \"\"\"middleware saves url after request\"\"\"\n requests_url = (self.client.get(reverse('bio_detail')).\n request['PATH_INFO'])\n requests_instances_url = Requests.objects.get(pk=1).path\n\n self.assertEquals(requests_url, requests_instances_url)\n\n def test_ajax_request(self):\n \"\"\"middleware don't saves requests on url 'requests'\"\"\"\n \"\"\"make test request for checking middleware is catching requests\"\"\"\n self.assertEquals(Requests.objects.count(), 0)\n\n self.client.get(reverse('bio_detail'))\n\n self.assertEquals(Requests.objects.count(), 1)\n\n self.client.get(\n reverse('requests_list'),\n **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}\n )\n\n self.assertEquals(Requests.objects.count(), 1)\n","sub_path":"apps/hello/tests/test_hello_middleware.py","file_name":"test_hello_middleware.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"392311824","text":"# Runs all fuzz tests in a loop\n\nimport sys, time\n_is_subprocess = __name__ == '__main__' and len(sys.argv) > 1 and sys.argv[1] == 'subprocess'\n\nif not _is_subprocess:\n import os, test, subprocess\n\n tests = []\n for tpath in test.__path__:\n for test in os.listdir(tpath):\n if test.startswith('fuzz_') and test != os.path.basename(__file__):\n tests.append(getattr(__import__('test.'+test[:-3]), test[:-3]))\n \n if __name__ == '__main__':\n print('Start Time:', time.ctime())\n subprocess.Popen(sys.executable + ' -m test.fuzz_all subprocess')\n for i in range(1,99999):\n for t in tests:\n t.test()\n print('End of', t.__name__, 'for Iteration', i, '-', time.ctime())\n\nif _is_subprocess:\n import random, struct, lockdownlib\n rnd = random.SystemRandom()\n safe_modules = []\n for m in lockdownlib.SAFE_MODULES:\n try: safe_modules.append(__import__(m))\n except ModuleNotFoundError: pass\n\n def fargs(depth=0):\n args = []\n for _ in range(rnd.randint(0, 8)):\n c = rnd.randint(1, 8 if depth<1 else 5)\n if c == 1: args.append(rnd.randint(-999999,999999))\n elif c == 2: args.append(os.urandom(rnd.randint(0,10)))\n elif c == 3: args.append(fargs)\n elif c == 4: args.append(struct.unpack('i', os.urandom(4))[0])\n elif c == 5: args.append(struct.unpack('d', os.urandom(8))[0])\n elif c == 6: args.append(fargs(depth+1))\n elif c == 7: args.append(tuple(fargs(depth+1)))\n elif c == 8: args.append({k:v for k, v in zip(fargs(), fargs())})\n return args\n\n if __name__ == '__main__':\n lockdownlib.lockdown()\n for i in range(1,99999): \n for _ in range(999999):\n m = rnd.choice(list(sys.modules.values()))\n a = getattr(m, rnd.choice(dir(m)))\n try:\n a(*fargs) if callable(a) else a\n except:\n pass\n print('Finished testing several random functions and attributes for Iteration', i, '-', time.ctime())\n","sub_path":"Lib/test/fuzz_all.py","file_name":"fuzz_all.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"404151933","text":"'''\n ╔╦╗╔═╗ ╔═╗┌─┐┬ ┬ ┌─┐┌─┐┌┬┐┌─┐┬─┐\n ║║╠═╝ ║ │ ││ │ ├┤ │ │ │ │├┬┘\n ═╩╝╩ ╚═╝└─┘┴─┘┴─┘└─┘└─┘ ┴ └─┘┴└─\n'''\nfrom django.views.generic.edit import UpdateView\nfrom extra_views import UpdateWithInlinesView\nfrom django.views.generic.detail import DetailView\nfrom django.contrib import messages\nfrom collector.forms.basic import CharacterForm, SkillFormSet, TalentFormSet, BlessingCurseFormSet, BeneficeAfflictionFormSet, ArmorFormSet, WeaponFormSet, ShieldFormSet\nfrom collector.models.characters import Character\nfrom scenarist.mixins.ajaxfromresponse import AjaxFromResponseMixin\nfrom django.urls import reverse_lazy\n\nclass CharacterDetailView(DetailView):\n model = Character\n context_object_name = 'c' \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\nclass CharacterUpdateView(AjaxFromResponseMixin,UpdateView):\n model = Character\n form_class = CharacterForm\n context_object_name = 'c'\n template_name_suffix = '_update_form' \n #success_url=reverse_lazy('collector:view_character')\n\n def form_valid(self, form):\n context = self.get_context_data(form=form)\n skills_formset = context['skills']\n talents_formset = context['talents']\n blessingcurses_formset = context['blessingcurses']\n beneficeafflictions_formset = context['beneficeafflictions']\n armors_formset = context['armors']\n weapons_formset = context['weapons']\n shields_formset = context['shields']\n if skills_formset.is_valid() and talents_formset.is_valid() and blessingcurses_formset.is_valid() and beneficeafflictions_formset.is_valid() and armors_formset.is_valid() and weapons_formset.is_valid() and shields_formset.is_valid():\n response = super().form_valid(form)\n skills_formset.instance = self.object\n talents_formset.instance = self.object\n blessingcurses_formset.instance = self.object\n beneficeafflictions_formset.instance = self.object\n armors_formset.instance = self.object\n weapons_formset.instance = self.object\n shields_formset.instance = self.object\n skills_formset.save()\n talents_formset.save()\n blessingcurses_formset.save()\n beneficeafflictions_formset.save()\n armors_formset.save()\n weapons_formset.save()\n shields_formset.save()\n return response\n else:\n return super().form_invalid(form)\n \n def get_context_data(self, **kwargs):\n context = super(CharacterUpdateView, self).get_context_data(**kwargs)\n if self.request.POST: \n context['form'] = CharacterForm(self.request.POST, instance=self.object)\n context['skills'] = SkillFormSet(self.request.POST, instance=self.object)\n context['talents'] = TalentFormSet(self.request.POST, instance=self.object)\n context['blessingcurses'] = BlessingCurseFormSet(self.request.POST, instance=self.object)\n context['beneficeafflictions'] = BeneficeAfflictionFormSet(self.request.POST, instance=self.object)\n context['armors'] = ArmorFormSet(self.request.POST, instance=self.object)\n context['weapons'] = WeaponFormSet(self.request.POST, instance=self.object)\n context['shields'] = ShieldFormSet(self.request.POST, instance=self.object)\n \n context['skills'].full_clean()\n context['talents'].full_clean()\n context['blessingcurses'].full_clean()\n context['beneficeafflictions'].full_clean()\n context['armors'].full_clean()\n context['weapons'].full_clean()\n context['shields'].full_clean()\n messages.add_message(self.request, messages.INFO, 'Updating character %s'%(context['form']['full_name'].value()))\n else:\n context['form'] = CharacterForm(instance=self.object)\n context['skills'] = SkillFormSet(instance=self.object)\n context['talents'] = TalentFormSet(instance=self.object)\n context['blessingcurses'] = BlessingCurseFormSet(instance=self.object)\n context['beneficeafflictions'] = BeneficeAfflictionFormSet(instance=self.object)\n context['armors'] = ArmorFormSet(instance=self.object)\n context['weapons'] = WeaponFormSet(instance=self.object)\n context['shields'] = ShieldFormSet(instance=self.object)\n messages.add_message(self.request, messages.INFO, 'Editing character %s'%(context['form']['full_name'].value())) \n return context\n\n","sub_path":"collector/views/characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"138351519","text":"import utils\nfrom utils import Exit\n\n\ndef get_edges():\n '''\n Return the exit connections for triangle_maze.jpg.\n '''\n edge_list = [\n [Exit(0, 0), Exit(1, 0)],\n [Exit(1, 1), Exit(8, 1)],\n [Exit(1, 2), Exit(4, 0)],\n [Exit(1, 3), Exit(3, 1)],\n [Exit(2, 0), Exit(3, 0)],\n [Exit(3, 2), Exit(6, 0)],\n [Exit(4, 1), Exit(5, 2)],\n [Exit(4, 2), Exit(5, 1)],\n [Exit(4, 3), Exit(5, 0)],\n [Exit(4, 4), Exit(6, 1)],\n [Exit(5, 3), Exit(7, 1)],\n [Exit(6, 2), Exit(7, 0)],\n [Exit(6, 3), Exit(7, 3)],\n [Exit(6, 4), Exit(8, 2)],\n [Exit(7, 2), Exit(8, 0)]\n ]\n\n return utils.create_edge_hash(edge_list)\n\n\ndef get_junctions():\n '''\n Return the junction/valve info for complex_maze.jpg.\n '''\n dead_end = {'connection_lists': [[]]}\n triangle = {'connection_lists': [[+1], [-1], []]}\n\n return [\n dict(dead_end), # shallow clones\n {'connection_lists': [[+2], [+2], [-2], [-2]], 'max_spin': 1},\n dict(dead_end),\n dict(triangle),\n {'connection_lists': [[+1], [-1], [+1, +2], [+1, -1], [-1, -2]]},\n {'connection_lists': [[+1], [-1], [+1], [-1]], 'max_spin': 2},\n {'connection_lists': [[], [], [+2], [], [-2]]},\n {'connection_lists': [[+2], [], [-2], []], 'max_spin': 2},\n dict(triangle)\n ]\n\n\ndef get_source():\n return Exit(2, 0)\n\n\ndef get_sprinklers():\n return [\n [Exit(0, 0), Exit(1, 0)],\n [Exit(1, 1), Exit(8, 1)],\n [Exit(1, 2), Exit(4, 0)],\n [Exit(1, 3), Exit(3, 1)],\n [Exit(4, 1), Exit(5, 2)],\n [Exit(4, 2), Exit(5, 1)],\n [Exit(4, 3), Exit(5, 0)],\n [Exit(4, 4), Exit(6, 1)],\n [Exit(5, 3), Exit(7, 1)],\n [Exit(6, 3), Exit(7, 3)],\n [Exit(6, 4), Exit(8, 2)]\n ]\n","sub_path":"complex_maze.py","file_name":"complex_maze.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"305393501","text":"from tkinter import *\nfrom tkinter.ttk import *\nfrom tkinter.constants import LEFT, RIGHT, BOTTOM, RAISED, HORIZONTAL, PAGES\nimport json\nfrom tkinter import messagebox\nimport sqlite3\nimport datetime\nfrom functools import partial\nimport xml.dom.minidom as xml\n\n# log in screen\n\ndef login_window():\n global login\n login = Tk()\n login.title('Đăng nhập')\n login.geometry('320x170')\n login.resizable(0, 0)\n login.iconbitmap('data/bug.ico')\n\n global user_var\n user_var = StringVar()\n global pass_var\n pass_var = StringVar()\n\n label_user = Label(login, text='Tên đăng nhập:')\n label_user.grid(row=1, column=1, padx=(30, 10), pady=(30, 10))\n entry_user = Entry(login, textvariable=user_var)\n entry_user.grid(row=1, column=2, padx=(30, 10), pady=(30, 10))\n\n label_pass = Label(login, text='Mật khẩu:')\n label_pass.grid(row=2, column=1, padx=(30, 10), pady=10)\n entry_pass = Entry(login, textvariable=pass_var, show=\"*\")\n entry_pass.grid(row=2, column=2, padx=(30, 10), pady=10)\n\n login_button = Button(login, text='Đăng nhập', command=login_func)\n login_button.grid(row=3, column=2, padx=10, pady=10)\n login.mainloop()\n\n\ndef login_func():\n global username\n username = user_var.get()\n password = pass_var.get()\n # connect to json\n f = open('data/login.json', 'r', encoding='utf-8')\n data = json.load(f)\n if username in data.keys():\n if password == data[username]:\n messagebox.showinfo('Đăng nhập thành công',\n 'Chào mừng %s vào phần mềm' % username)\n login.destroy()\n\n main_window()\n\n else:\n messagebox.showwarning('Sai mật khẩu', 'Mật khẩu không đúng')\n\n else:\n messagebox.showerror('Sai tên đăng nhập', 'User không tồn tại')\n\n f.close()\n\n\ndef main_window():\n global main\n main = Tk()\n main.title('Phần mềm quản lý kết quả vi sinh')\n main.geometry('500x500')\n main.resizable(0, 0)\n main.config(menu=menu_bar())\n main.iconbitmap('data/bug.ico')\n main.mainloop()\n return main\n\n\ndef menu_bar():\n menubar = Menu(main)\n funcmenu = Menu(menubar, tearoff=0)\n\n funcmenu.add_command(label='Nhập bệnh phẩm', command=new_specimen_window)\n\n funcmenu.add_command(label='Cập nhật kết quả nuôi cấy',\n command=update_result_window)\n funcmenu.add_command(label='Tìm kiếm thông tin bệnh phẩm')\n funcmenu.add_command(label='Xóa bệnh phẩm')\n funcmenu.add_separator()\n funcmenu.add_command(label = 'Xuất dữ liệu XML')\n funcmenu.add_separator()\n funcmenu.add_command(label='Thoát', command=main.destroy)\n menubar.add_cascade(label='Chức năng', menu=funcmenu)\n return menubar\n\n\ndef connect_to_database():\n conn = sqlite3.connect(\"data/database.db\")\n return conn\n\n\ndef new_specimen_window():\n main.withdraw()\n new_specimen = Toplevel()\n new_specimen.title('Nhập bệnh phẩm mới')\n new_specimen.geometry('500x500')\n new_specimen.iconbitmap('data/bug.ico')\n\n conn = connect_to_database()\n\n label1 = Label(new_specimen, text='NHẬP BỆNH PHẨM MỚI').grid(\n row=0, column=1, padx=(30, 10), pady=(30, 10))\n label_SID = Label(new_specimen, text='Mã số bệnh phẩm (SID):').grid(\n sticky='W', row=1, column=0, padx=10, pady=10)\n sid_var = StringVar()\n\n entry_SID = Entry(new_specimen, textvariable=sid_var)\n entry_SID.grid(row=1, column=1)\n name_var = StringVar()\n age_var = StringVar()\n sex_var = IntVar()\n ward_var = StringVar()\n address_var = StringVar()\n diagnosis_var = StringVar()\n physician_var = StringVar()\n sample_type_var = StringVar()\n\n def check_SID(self):\n read_SID = conn.execute('''SELECT SID FROM SAMPLE_INFO''')\n sid_list = []\n for row in read_SID:\n sid_list.append(int(row[0]))\n if len(sid_var.get()) != 0:\n try:\n if int(sid_var.get()) in sid_list:\n messagebox.showerror(\n 'SID đã tồn tại', 'SID ' + sid_var.get() + ' đã tồn tại')\n entry_SID.focus_force()\n sid_var.set(\"\")\n except ValueError:\n messagebox.showerror('Lỗi nhập liệu', 'SID chỉ bao gồm số')\n entry_SID.focus_force()\n sid_var.set(\"\")\n \n \n #entry_SID.bind(\"\", check_SID)\n \n entry_SID.bind(\"\", check_SID)\n\n pid_var = StringVar()\n entry_PID = Entry(new_specimen, textvariable=pid_var)\n label_PID = Label(new_specimen, text='Số bệnh án (PID):').grid(\n sticky='W', row=2, column=0, padx=(10, 0), pady=10)\n entry_PID.grid(row=2, column=1)\n\n def check_PID(self):\n\n if len(pid_var.get()) != 0:\n try:\n pid_get = int(pid_var.get())\n read_pid = conn.execute(\"SELECT PID FROM PATIENT_INFO\")\n list_pid = []\n for id_ in read_pid:\n list_pid.append(id_[0])\n if pid_get in list_pid:\n\n data = conn.execute(\n 'SELECT * FROM PATIENT_INFO WHERE PID = ? ', (pid_get,))\n for row in data:\n name_var.set(row[2])\n age_var.set(row[3])\n sex_var.set(row[4])\n ward_var.set(row[5])\n address_var.set(row[6])\n diagnosis_var.set(row[7])\n\n except ValueError:\n messagebox.showerror('Lỗi nhập liệu', \"PID chỉ bao gồm số\")\n entry_PID.focus_force()\n pid_var.set(\"\")\n \n\n #entry_PID.bind(\"\", check_PID)\n entry_PID.bind(\"\", check_PID)\n\n # name\n entry_name = Entry(new_specimen, textvariable=name_var)\n label_name = Label(new_specimen, text='Họ và tên:').grid(\n sticky='W', row=3, column=0, padx=(10, 0), pady=10)\n entry_name.grid(row=3, column=1)\n\n # age\n\n def validate_age(self):\n if len(age_var.get()) != 0:\n\n try:\n int(age_var.get())\n if len(age_var.get()) != 4:\n messagebox.showerror('Năm sinh', 'Nhập năm sinh sai')\n age_var.set(\"\")\n entry_age.focus_force()\n except ValueError:\n messagebox.showerror('Lỗi nhập liệu', 'Năm sinh phải là số')\n entry_age.focus_force()\n age_var.set(\"\")\n\n entry_age = Entry(new_specimen, textvariable=age_var, width=5)\n entry_age.bind(\"\", validate_age)\n label_age = Label(new_specimen, text='Năm sinh').grid(\n sticky='W', row=3, column=2)\n entry_age.grid(row=3, column=3)\n # sex\n label_age = Label(new_specimen, text='Giới tính:').grid(\n sticky='W', row=4, column=0, padx=(10, 0), pady=10)\n sex_radio_button_1 = Radiobutton(\n new_specimen, text='Nam', variable=sex_var, value=1)\n sex_radio_button_1.grid(sticky='W', row=4, column=1)\n sex_radio_button_2 = Radiobutton(\n new_specimen, text='Nữ', variable=sex_var, value=0)\n sex_radio_button_2.grid(row=4, column=1)\n sex_radio_button_3 = Radiobutton(\n new_specimen, text='Khác', variable=sex_var, value=3)\n sex_radio_button_3.grid(sticky='E', row=4, column=1)\n # department\n add = 'data/ward.json'\n file = open(add)\n data = json.load(file)\n list_dep = []\n for key in data.keys():\n list_dep.append(data.get(key))\n\n label_ward = Label(new_specimen, text='Khoa:').grid(\n sticky='W', row=5, column=0, padx=(10, 0), pady=10)\n ward_var.set(None)\n #ward_var.set(list_dep[0])\n ward_menu = Combobox(new_specimen, textvariable = ward_var, values = list_dep, state = 'readonly')\n ward_menu['width'] = 10\n ward_menu.grid(row=5, column=0, padx=(50, 10))\n # address\n label_address = Label(new_specimen, text='Địa chỉ:').grid(\n sticky='W', row=5, column=1, padx=(30, 0), pady=10)\n entry_address = Entry(new_specimen, textvariable=address_var)\n scroll_address = Scrollbar(new_specimen, orient=HORIZONTAL)\n entry_address.config(xscrollcommand=scroll_address.set)\n scroll_address.config(command=entry_address.xview)\n scroll_address.grid(sticky=E+W, row=5, column=1,\n padx=(80, 10), pady=(36.5, 0))\n entry_address.grid(row=5, column=1, padx=(80, 10))\n #scroll_address.pack(side = BOTTOM, fill = 'x')\n\n # dx\n label_dx = Label(new_specimen, text='Chẩn đoán:').grid(\n sticky='W', row=6, column=0, padx=(10, 0), pady=10)\n entry_dx = Entry(new_specimen, textvariable=diagnosis_var)\n entry_dx.grid(sticky=E+W, row=6, column=1)\n scroll_dx = Scrollbar(new_specimen, orient=HORIZONTAL)\n entry_dx.config(xscrollcommand=scroll_dx)\n scroll_dx.config(command=entry_dx.xview)\n scroll_dx.grid(sticky=E+W, row=6, column=1, padx=(0, 10), pady=(36.5, 0))\n\n # physician\n label_physician = Label(new_specimen, text='Bác Sĩ chỉ định:').grid(\n sticky='W', row=7, column=0, padx=(10, 0), pady=10)\n entry_physician = Entry(new_specimen, textvariable=physician_var)\n entry_physician.grid(sticky='W', row=7, column=1)\n # date/time\n label_datetime = Label(new_specimen, text='Ngày giờ nhập mẫu:').grid(\n sticky='W', row=8, column=0, padx=(10, 0), pady=10)\n var_datetime = StringVar()\n date = datetime.datetime.now()\n date_string = date.strftime('%Y/%m/%d %H:%M:%S')\n var_datetime.set(date_string)\n entry_datetime = Entry(\n new_specimen, textvariable=var_datetime, state=\"readonly\")\n entry_datetime.grid(sticky='W', row=8, column=1)\n # sample type\n sample_type = json.load(open('data/SAMPLE_TYPE.json'))\n list_sample_type = []\n for key in sample_type.keys():\n list_sample_type.append(sample_type.get(key))\n\n label_sampletype = Label(new_specimen, text='Loại mẫu:').grid(\n sticky='W', row=9, column=0, padx=(10, 0), pady=10)\n sample_type_var.set(list_sample_type[0])\n sample_type_menu = Combobox(\n new_specimen, textvariable = sample_type_var, values = list_sample_type, state = 'readonly')\n sample_type_menu['width'] = 15\n sample_type_menu.grid(sticky=E+W, row=9, column=1)\n\n # clear everything function and button\n def clear_everything():\n sid_var.set(\"\")\n pid_var.set(\"\")\n name_var.set(\"\")\n age_var.set(\"\")\n sex_var.set(1)\n ward_var.set(list_dep[0])\n address_var.set(\"\")\n diagnosis_var.set(\"\")\n physician_var.set(\"\")\n sample_type_var.set(list_sample_type[0])\n\n def reset_everything():\n if messagebox.askokcancel('Xóa tất cả', 'Xóa tất cả và nhập lại?'):\n clear_everything()\n\n reset_button = Button(new_specimen, text='Nhập lại',\n command=reset_everything)\n reset_button.grid(row=10, column=0, padx=(10, 0), pady=10)\n # save button\n\n def save_button_command():\n if len(sid_var.get()) == 0:\n messagebox.showerror('Chưa nhập SID', 'Chưa nhập SID')\n entry_SID.focus_force() \n else: \n sid_get = int(sid_var.get())\n if len(pid_var.get()) == 0:\n messagebox.showerror('Chưa nhập PID', 'Chưa nhập PID')\n entry_PID.focus_force() \n else: \n pid_get = int(pid_var.get())\n \n name_get = name_var.get()\n age_get = None\n sex_get = sex_var.get()\n try:\n age_get = int(age_var.get())\n\n except ValueError:\n pass\n ward_get = ward_var.get()\n address_get = address_var.get()\n dx_get = diagnosis_var.get()\n physician_get = physician_var.get()\n date_get = date_string\n sample_type_get = sample_type_var.get()\n try:\n conn = connect_to_database()\n query_patient = 'INSERT OR REPLACE INTO PATIENT_INFO\\\n (PID, FULL_NAME, AGE, SEX, WARD, ADDRESS, DIAGNOSIS)\\\n VALUES (?, ?, ?, ?, ?, ?, ?)'\n conn.execute(query_patient, (pid_get, name_get, age_get,\n sex_get, ward_get, address_get, dx_get))\n query_sample = 'INSERT INTO SAMPLE_INFO\\\n (SID, PID, DATE_TIME, PHYSICIAN, SAMPLE_TYPE)\\\n VALUES (?, ?, ?, ?, ?)'\n conn.execute(query_sample, (sid_get, pid_get,\n date_get, physician_get, sample_type_get))\n conn.commit()\n except sqlite3.OperationalError:\n messagebox.showerror(\n 'Lỗi CSDL', 'Cơ sở cập nhật hiện đang bị khóa')\n messagebox.showinfo('Thêm mới thành công',\n 'Thêm mới SID ' + str(sid_get) + ' thành công')\n clear_everything()\n save_button = Button(new_specimen, text='Lưu',\n width=15, command=save_button_command)\n save_button.grid(row=10, column=1, pady=10)\n\n #bind Return to move focus\n list_entry = []\n for child in new_specimen.winfo_children():\n if isinstance(child, Entry):\n list_entry.append(child)\n \n def focus_on_next_entry(event, entry_list, this_index):\n next_index = (this_index + 1) % len(list_entry)\n entry_list[next_index].focus_set()\n\n for idx, entry in enumerate(list_entry):\n entry.bind('', lambda e, idx = idx: focus_on_next_entry(e, list_entry, idx)) \n # exit button = return to main\n\n def on_exit():\n if messagebox.askokcancel('Thoát', 'Bạn muốn ngưng nhập mới?'):\n main.deiconify()\n new_specimen.destroy()\n conn.commit()\n conn.close()\n exit_button = Button(new_specimen, text='Quay lại', command=on_exit)\n exit_button.grid(row=10, column=2, pady=10)\n new_specimen.protocol(\"WM_DELETE_WINDOW\", on_exit)\n\n new_specimen.mainloop()\n\n\ndef update_result_window():\n main.withdraw()\n update_result = Toplevel()\n update_result.title('Cập nhật kết quả nuôi cấy')\n update_result.geometry('550x700')\n update_result.iconbitmap('data/bug.ico')\n\n conn = connect_to_database()\n\n top_frame = LabelFrame(update_result, text = 'Thông tin mẫu')\n top_frame.grid(row = 0, column = 0, padx= (10, 10))\n sid_entry_var = StringVar()\n sid_label = Label(top_frame, text='Nhập SID cần cập nhật:')\n sid_label.grid(row=1, column=0, padx=(10, 10), pady=10)\n sid_entry = Entry(top_frame, textvariable=sid_entry_var)\n\n pid_var = StringVar()\n date_var = StringVar()\n physician_var = StringVar()\n sample_type_var = StringVar()\n name_var = StringVar()\n age_var = StringVar()\n sex_var = IntVar()\n ward_var = StringVar()\n address_var = StringVar()\n diagnosis_var = StringVar()\n result_var = StringVar()\n result_var.set(\"\")\n organism_var =StringVar()\n organism_var.set(\"\")\n \n antibiogram_var = StringVar()\n \n global flag_1\n flag_1 = 0\n def check_sid(self):\n if len(sid_entry_var.get()) != 0:\n try:\n #global sid_get\n global sid_get\n sid_get = int(sid_entry_var.get())\n sql_query = '''SELECT SID FROM SAMPLE_INFO'''\n data = conn.execute(sql_query)\n list_sid = []\n for row in data:\n list_sid.append(row[0])\n\n if sid_get in list_sid:\n sql_query_2 = '''SELECT PID, DATE_TIME, PHYSICIAN, SAMPLE_TYPE, RESULT, ORGANISM, ANTIBIOGRAM FROM SAMPLE_INFO WHERE SID = ?'''\n data_1 = conn.execute(sql_query_2, (sid_get,))\n for row in data_1:\n pid_var.set(row[0])\n pid = int(pid_var.get())\n date_var.set(row[1])\n physician_var.set(row[2])\n sample_type_var.set(row[3])\n result_var.set(row[4])\n organism_var.set(row[5])\n antibiogram_var.set(row[6])\n \n sql_query_3 = '''SELECT FULL_NAME, AGE, SEX, WARD, ADDRESS, DIAGNOSIS FROM PATIENT_INFO WHERE PID = ?'''\n data_2 = conn.execute(sql_query_3, (pid,))\n for row in data_2:\n name_var.set(row[0])\n age_var.set(row[1])\n sex_var.set(row[2])\n ward_var.set(row[3])\n address_var.set(row[4])\n diagnosis_var.set(row[5])\n \n if sex_var.get() == 1:\n sex_var_char.set('Nam')\n elif sex_var.get() == 0:\n sex_var_char.set('Nữ')\n else:\n sex_var_char.set('Khác')\n \n else:\n messagebox.showwarning(\n 'Không tìm thấy SID', 'Không tìm thấy SID ' + str(sid_get))\n\n sid_entry.focus_force()\n sid_entry_var.set(\"\")\n except ValueError:\n messagebox.showerror('Lỗi nhập liệu', 'SID chỉ bao gồm số')\n sid_entry.focus_force()\n sid_entry_var.set(\"\")\n\n \n sid_entry.bind(\"\", check_sid)\n sid_entry.bind(\"\", check_sid)\n sid_entry.grid(row=1, column=1, padx=10, pady=10)\n \n entry_PID = Entry(top_frame, textvariable=pid_var, state = 'readonly')\n label_PID = Label(top_frame, text='Số bệnh án (PID):').grid(\n sticky='W', row=2, column=0, padx=(10, 0), pady=10)\n entry_PID.grid(row=2, column=1)\n\n entry_name = Entry(top_frame, textvariable=name_var, state = 'readonly')\n label_name = Label(top_frame, text='Họ và tên:').grid(\n sticky='W', row=3, column=0, padx=(10, 0), pady=10)\n entry_name.grid(row=3, column=1)\n\n entry_age = Entry(top_frame, textvariable=age_var, state = 'readonly', width = 5)\n label_age = Label(top_frame, text='Tuổi:').grid(\n sticky='W', row=3, column=2, padx=(10, 0), pady=10)\n entry_age.grid(sticky = 'W', row=3, column=3)\n sex_var_char = StringVar()\n \n if sex_var.get() == 1:\n sex_var_char.set('Nam')\n elif sex_var.get() == 0:\n sex_var_char.set('Nữ')\n else:\n sex_var_char.set('Khác')\n label_sex = Label(top_frame, text = 'Giới tính').grid(\n row = 3, column = 3)\n entry_sex = Entry(top_frame, textvariable = sex_var_char, state = 'readonly', width = 5)\n entry_sex.grid(sticky = 'E', row = 3, column = 3) \n\n entry_address = Entry(top_frame, textvariable=address_var, state = 'readonly')\n scroll_address = Scrollbar(top_frame, orient = HORIZONTAL)\n entry_address.config(xscrollcommand=scroll_address.set)\n scroll_address.config(command=entry_address.xview)\n scroll_address.grid(sticky=E+W, row=4, column=1,\n padx=(10, 10), pady=(36.5, 0))\n label_address = Label(top_frame, text='Địa chỉ:').grid(\n sticky='W', row=4, column=0, padx=(10, 0), pady=10)\n entry_address.grid(row=4, column=1)\n label_dep = Label(top_frame, text = 'Khoa:').grid(\n sticky='W', row=4, column=2, padx=(10, 0), pady=10)\n entry_dep = Entry(top_frame, textvariable = ward_var, state = 'readonly') \n entry_dep.grid(row = 4, column= 3)\n\n label_physician = Label(top_frame, text = 'Bác sĩ chỉ định: ').grid(\n sticky = 'W', row = 5, column = 0, padx = (10, 0), pady = 10)\n entry_physician = Entry(top_frame, textvariable = physician_var, state = 'readonly')\n entry_physician.grid(row = 5, column = 1)\n\n label_dx = Label(top_frame, text = 'Chẩn đoán:').grid(\n sticky = 'W', row = 5, column = 2, padx = (10, 0), pady = 10)\n entry_dx = Entry(top_frame, textvariable = diagnosis_var, state = 'readonly')\n scroll_dx = Scrollbar(top_frame, orient = HORIZONTAL)\n entry_dx.config(xscrollcommand=scroll_dx.set)\n scroll_dx.config(command=entry_dx.xview)\n scroll_dx.grid(sticky=E+W, row=5, column=3,\n padx=(10, 10), pady=(36.5, 0))\n entry_dx.grid(row = 5, column = 3)\n\n label_time = Label(top_frame, text = 'Thời gian nhập mẫu:').grid(\n sticky = 'W', row = 6, column = 0, padx = (10, 0), pady = 10)\n entry_time = Entry(top_frame, textvariable = date_var, state = 'readonly')\n entry_time.grid(row = 6, column = 1)\n label_sample_type = Label(top_frame, text = 'Loại mẫu:').grid(\n sticky = W, row = 6, column = 2, padx = (10, 0), pady = 5)\n entry_sample_type = Entry(top_frame, textvariable = sample_type_var, state = 'readonly')\n entry_sample_type.grid(row = 6, column = 3)\n lable_result = Label(top_frame, text = 'Kết quả:').grid(\n sticky = 'W', row = 7, column = 0, padx = (10, 0), pady = 10)\n list_result=['ÂM TÍNH', 'DƯƠNG TÍNH']\n\n entry_result = Combobox(top_frame, values = list_result, state = 'readonly', textvariable = result_var)\n \n entry_result.grid(row = 7, column = 1)\n result_frame = LabelFrame(update_result, text = 'Kết quả định danh')\n result_frame.grid(row = 1, column = 0, sticky = W, padx=(10,0), pady = 10)\n \n \n list_bacteria = []\n data_3 = conn.execute('SELECT NAME FROM BACTERIA_ID') \n for row in data_3:\n list_bacteria.append(row[0]) \n #entry_bacteria = Combobox(result_frame, textvariable = organism_var, values = list_bacteria, state = 'readonly')\n class_var = StringVar()\n ast_frame = LabelFrame(update_result,text = 'Kháng sinh đồ')\n update_result.update_idletasks()\n anti_list = []\n s_list = []\n r_list = []\n list_entry_antibio = []\n list_result_eval = []\n def show_antibiogram(self, *arg):\n def check_ast(radius, s, r, text_var, *arg):\n if len(radius.get()) != 0:\n if int(radius.get()) < 6:\n radius.set('6') \n if int(radius.get()) >= s:\n text_var.set('S')\n elif int(radius.get()) <= r:\n text_var.set('R')\n elif int(radius.get()) > r and int(radius.get()) < s:\n text_var.set('I') \n else:\n text_var.set(\" \")\n \n def enter_antibiogram(self, *arg):\n ast_frame.grid(row = 2, column = 0 , sticky = W, padx=(10,0), pady = 10)\n data_4 = conn.execute('SELECT CLASS_ FROM BACTERIA_ID WHERE NAME = ?', (organism_var.get(),))\n for row in data_4: \n class_var.set(row[0])\n data_5 = conn.execute('SELECT * FROM ANTIBIOTIC WHERE CLASS_ = ?', (class_var.get(),))\n \n anti_list.clear()\n \n s_list.clear()\n \n r_list.clear()\n \n list_entry_antibio.clear()\n \n list_result_eval.clear()\n for row in data_5:\n anti_list.append(row[2])\n s_list.append(row[3])\n r_list.append(row[4])\n for i in range(len(anti_list)):\n row = int(i / 3)\n col = i % 3\n label_row_i = Label(ast_frame, text = anti_list[i], width = 5)\n label_row_i.grid(row = row, column = col, sticky = W, padx = (10, 80), pady = 5) \n global entry_i_var\n entry_i_var = StringVar()\n result_ast_i = StringVar()\n result_ast_i.set(\"\")\n entry_i = Entry(ast_frame, textvariable = entry_i_var, width = 3)\n list_entry_antibio.append(entry_i_var)\n entry_i.grid(row = row, column = col, padx = (15, 5), pady = 5)\n result_str_i = Label(ast_frame, textvariable = result_ast_i, width = 2)\n result_str_i.grid(row = row, column = col, sticky = E, padx = (0, 10), pady = 5) \n entry_i.bind('', partial(check_ast, entry_i_var, s_list[i], r_list[i], result_ast_i, str(i)))\n list_result_eval.append(result_ast_i)\n data_6 = conn.execute('SELECT ANTIBIOGRAM FROM SAMPLE_INFO WHERE SID = ?', (sid_get,))\n for row in data_6:\n if row[0] != None:\n dic = eval(row[0])\n for key in dic.keys():\n if key in anti_list:\n index = anti_list.index(key)\n list_entry_antibio[index].set(dic[key][0])\n list_result_eval[index].set(dic[key][1])\n #enter to focus on next entry\n list_entry = []\n for child in ast_frame.winfo_children():\n if isinstance(child, Entry):\n list_entry.append(child)\n \n def focus_on_next_entry(event, entry_list, this_index):\n next_index = (this_index + 1) % len(list_entry)\n entry_list[next_index].focus_set()\n\n for idx, entry in enumerate(list_entry):\n entry.bind('', lambda e, idx = idx: focus_on_next_entry(e, list_entry, idx)) \n \n ast_frame.mainloop()\n\n if result_var.get() == list_result[1]:\n label_bacteria = Label(result_frame, text = 'Vi khuẩn:')\n label_bacteria.grid(sticky = 'W', row = 0, column = 0)\n entry_bacteria = Combobox(result_frame, textvariable = organism_var, values = list_bacteria, state = 'readonly') \n entry_bacteria.grid(row = 0, column = 1) \n \n entry_bacteria.bind(\"<>\", enter_antibiogram)\n organism_var.trace('w', enter_antibiogram)\n def re_show(self, *arg):\n if result_var.get == 'DƯƠNG TÍNH':\n enter_antibiogram(self)\n result_var.trace('w', re_show) \n label_class = Label(result_frame, text = 'Nhóm kháng sinh:')\n label_class.grid(sticky = 'W', row = 0, column = 2)\n entry_class = Entry(result_frame, textvariable = class_var, state = 'readonly')\n entry_class.grid(row = 0, column = 3)\n\n else:\n for wid in result_frame.winfo_children():\n wid.destroy()\n for wid in ast_frame.winfo_children():\n wid.destroy()\n #organism_var.trace('w', enter_antibiogram) \n result_var.trace('w', show_antibiogram) \n entry_result.bind(\"<>\", show_antibiogram)\n \n \n\n #save button\n def clear():\n sid_entry_var.set(\"\")\n pid_var.set(\"\")\n name_var.set(\"\")\n age_var.set(\"\")\n sex_var_char.set(\"\")\n physician_var.set(\"\")\n diagnosis_var.set(\"\")\n address_var.set(\"\")\n ward_var.set(\"\")\n date_var.set(\"\")\n sample_type_var.set(\"\")\n result_var.set(\"\")\n def save():\n if result_var.get() == 'DƯƠNG TÍNH':\n result = result_var.get()\n organism = organism_var.get()\n result_str = \"{\"\n for item in range(len(anti_list)):\n result_str += \"'\" + anti_list[item] + \"'\" + ':['+ list_entry_antibio[item].get() + ',' + \"'\"+ list_result_eval[item].get() +\"'\" + '],'\n result_str += \"}\"\n \n else:\n result = result_var.get()\n organism = None\n result_str = None\n query = '''UPDATE SAMPLE_INFO\\\n SET RESULT = ? ,\\\n ORGANISM = ?,\\\n ANTIBIOGRAM = ?\\\n WHERE SID = ?'''\n if messagebox.askokcancel('Cập nhật dữ liệu', 'Cập nhật kết quả SID ' + str(sid_get) + \"?\"): \n conn.execute(query, (result, organism, result_str, sid_get))\n conn.commit() \n messagebox.showinfo('Cập nhật thành công', 'Cập nhật SID %s thành công'%str(sid_get)) \n clear()\n \n save_button = Button(update_result, text = 'Lưu kết quả', command = save)\n save_button.grid(row = 4, column = 0, padx = (50, 10)) \n #reset button\n def reset_all():\n if messagebox.askokcancel('Nhập lại', 'Xóa tất cả và nhập lại?'):\n clear()\n \n reset_button = Button(update_result, text='Nhập lại', command = reset_all)\n reset_button.grid(row = 4, column = 0, sticky = W, padx= (80, 10))\n \n \n # exit button = return to main\n\n \n def on_exit():\n if messagebox.askokcancel('Thoát', 'Bạn muốn ngưng thay đổi?'):\n main.deiconify()\n update_result.destroy()\n conn.commit()\n conn.close()\n #exit_button = Button(update_result, text = 'Quay lại', command = on_exit)\n #exit_button.grid(row = 10, column = 2, pady = 10)\n update_result.protocol(\"WM_DELETE_WINDOW\", on_exit)\n\n update_result.mainloop()\n\n\ndef delete_specimen_window():\n pass\n\n\ndef find_specimen_window():\n pass\n\ndef export_to_xml():\n pass\n\nlogin_window()\n","sub_path":"app/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":29510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"150497933","text":"import sys\nsys.stdin = open(\"17825.txt\")\n\n\ndef play_game(idx, point):\n global answer\n\n if idx == 10:\n if answer < point:\n answer = point\n return\n\n for h in range(4):\n if horse[h] == (4, 4):\n continue\n temp = horse[h]\n new = temp[1] + turns[idx]\n if temp[0] == 0:\n if new == 5:\n comp = (1, 0)\n elif new == 10:\n comp = (2, 0)\n elif new == 15:\n comp = (3, 0)\n elif new == 20:\n comp = (4, 3)\n elif new < 20:\n comp = (0, new)\n else:\n comp = (4, 4)\n elif temp[0] == 1:\n if new < 4:\n comp = (1, new)\n else:\n comp = (4, new - 4)\n elif temp[0] == 2:\n if new < 3:\n comp = (2, new)\n else:\n comp = (4, new - 3)\n elif temp[0] == 3:\n if new < 4:\n comp = (3, new)\n else:\n comp = (4, new - 4)\n else:\n if new < 4:\n comp = (4, new)\n else:\n comp = (4, 4)\n\n if comp in horse and comp != (4, 4):\n continue\n\n if comp == (4, 4):\n horse[h] = (4, 4)\n play_game(idx + 1, point)\n horse[h] = temp\n continue\n\n horse[h] = comp\n if comp[0] == 0:\n play_game(idx + 1, point + comp[1] * 2)\n elif comp[0] == 1:\n play_game(idx + 1, point + (10 + comp[1] * 3))\n elif comp[0] == 2:\n play_game(idx + 1, point + (20 + comp[1] * 2))\n elif comp[0] == 3:\n if comp[1] == 0:\n play_game(idx + 1, point + 30)\n else:\n play_game(idx + 1, point + (29 - comp[1]))\n elif comp[0] == 4:\n play_game(idx + 1, point + (25 + comp[1] * 5))\n horse[h] = temp\n pass\n\n\nfor T in range(int(input())):\n answer = 0\n turns = list(map(int, input().split()))\n horse = [(0, 0) for _ in range(4)]\n play_game(0, 0)\n\n print(answer)","sub_path":"baekjoon/00 모의테스트/17825_주사위 윷놀이.py","file_name":"17825_주사위 윷놀이.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"12615212","text":"from os import path\nfrom pkg_resources import safe_version\nfrom setuptools import find_packages, setup\n\nversion = {}\nwith open(path.join(path.dirname(path.realpath(__file__)), 'cheval', 'version.py')) as fp:\n exec(fp.read(), {}, version)\n\nsetup(\n name='wsp-cheval',\n version=safe_version(version['__version__']),\n description='High-performance discrete-choice (logit) travel demand model evaluation',\n url='https://github.com/wsp-sag/cheval',\n author='WSP, Peter Kucirek',\n maintatiner='Brian Cheung',\n maintainer_email='brian.cheung@wsp.com',\n classifiers=[\n 'License :: OSI Approved :: MIT License'\n ],\n packages=find_packages(),\n install_requires=[\n 'pandas>=0.22',\n 'numpy>=1.14',\n 'astor',\n 'numba>=0.45',\n 'numexpr',\n 'deprecated',\n 'attrs>=19.3'\n ],\n python_requires='>=3.6'\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"461089353","text":"from torch.utils import data\nimport numpy as np\nfrom labeler import remove_last_punct, get_model\nfrom tqdm.auto import tqdm\nimport click\nimport spacy\nimport pandas as pd\n\n\nclass OpenSubtitlesDataset(data.Dataset):\n def __init__(self, source_file, max_lines=None):\n self.sentences = []\n for i, line in tqdm(enumerate(open(source_file))):\n if max_lines is not None and i >= max_lines:\n break\n\n self.sentences.append(self.clean(line))\n\n self.sentences = [x for x in self.sentences if x is not None]\n\n def clean(self, line, min_length=2):\n clean = line.lstrip(\"-\").strip()\n return clean if len(clean) >= min_length else None\n\n def __len__(self):\n return len(self.sentences)\n\n def __getitem__(self, idx):\n return self.sentences[idx]\n\n\nclass Evaluator:\n def __init__(\n self,\n dataset,\n remove_end_punct_prob,\n lower_start_prob,\n punctuation,\n lengths=[2, 3, 4],\n seed=1234,\n ):\n gen = np.random.RandomState(seed)\n self.sentence_groups = []\n\n end = 0\n bar = tqdm(total=len(dataset))\n\n while end < len(dataset):\n start = end\n end = min(start + gen.choice(lengths), len(dataset))\n\n sentence_group = []\n\n for i in range(start, end):\n sentence = dataset[i]\n\n if gen.random() < remove_end_punct_prob:\n sentence = remove_last_punct(sentence, punctuation)\n\n if gen.random() < lower_start_prob:\n sentence = sentence[0].lower() + sentence[1:]\n\n # whitespace which joins sentences is expected to be part of the previous sentence\n if i < end - 1:\n sentence += \" \"\n\n sentence_group.append(sentence)\n\n self.sentence_groups.append(sentence_group)\n bar.update(len(sentence_group))\n\n self.texts = [\"\".join(group) for group in self.sentence_groups]\n\n def evaluate(self, split_fn):\n correct = np.full(len(self.texts), False, dtype=np.bool)\n\n predicted_groups = split_fn(self.texts)\n for i, (predicted_group, group) in enumerate(\n zip(predicted_groups, self.sentence_groups)\n ):\n if len(predicted_group) != len(group):\n continue\n\n for (a, b) in zip(predicted_group, group):\n if a != b:\n continue\n\n correct[i] = True\n\n return correct\n\n\nclass NNSplitInterface:\n def __init__(self, splitter):\n self.splitter = splitter\n\n def split(self, texts):\n out = []\n for split in self.splitter.split(texts):\n out.append([str(x) for x in split])\n\n return out\n\n\nclass SpacyInterface:\n def __init__(self, name, use_sentencizer, batch_size=1000):\n if use_sentencizer:\n nlp = get_model(name)\n nlp.add_pipe(\"sentencizer\")\n else:\n try:\n nlp = spacy.load(name, disable=[\"tagger\", \"ner\"])\n except OSError:\n nlp = None\n\n self.nlp = nlp\n self.batch_size = batch_size\n\n def split(self, texts):\n out = []\n\n if self.nlp is not None:\n for doc in self.nlp.pipe(texts, batch_size=self.batch_size):\n sentences = []\n\n for sent in doc.sents:\n sentences.append(\"\".join([x.text + x.whitespace_ for x in sent]))\n\n out.append(sentences)\n\n return out\n\n\n@click.command()\n@click.option(\"--subtitle_path\", help=\"Path to the OPUS OpenSubtitles raw text.\")\n@click.option(\"--spacy_model\", help=\"Name of the spacy model to compare against.\")\n@click.option(\"--nnsplit_path\", help=\"Path to the .onnx NNSplit model to use.\")\n@click.option(\"--punctuation\", help=\"Which characters to consider punctuation.\", default=\".?!\")\ndef evaluate(subtitle_path, spacy_model, nnsplit_path, punctuation):\n # nnsplit must be installed to evaluate\n from nnsplit import NNSplit\n\n print(\"Evaluating..\")\n\n dataset = data.Subset(\n OpenSubtitlesDataset(subtitle_path, 1_000_000), np.arange(100_000)\n )\n targets = {\n \"NNSplit\": NNSplitInterface(\n NNSplit(nnsplit_path, use_cuda=True, batch_size=2 ** 7)\n ),\n \"Spacy (Tagger)\": SpacyInterface(spacy_model, use_sentencizer=False),\n \"Spacy (Sentencizer)\": SpacyInterface(spacy_model, use_sentencizer=True),\n }\n\n eval_setups = {\n \"Clean\": (0.0, 0.0),\n \"Partial punctuation\": (0.5, 0.0),\n \"Partial case\": (0.0, 0.5),\n \"Partial punctuation and case\": (0.5, 0.5),\n \"No punctuation and case\": (1.0, 1.0),\n }\n\n result = {}\n preds = {}\n\n for eval_name, (remove_punct_prob, lower_start_prob) in eval_setups.items():\n result[eval_name] = {}\n evaluator = Evaluator(dataset, remove_punct_prob, lower_start_prob, punctuation)\n\n for target_name, interface in targets.items():\n correct = evaluator.evaluate(interface.split)\n preds[f\"{eval_name}_{target_name}\"] = {\n \"samples\": evaluator.texts,\n \"correct\": correct,\n }\n result[eval_name][target_name] = correct.mean()\n\n result = pd.DataFrame.from_dict(result).T\n print(result)\n print(result.to_markdown())\n\n\nif __name__ == \"__main__\":\n evaluate()\n","sub_path":"train/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":5442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"201399209","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager\nfrom django.conf import settings\nfrom web.extra import send_event_approved\nfrom collections import namedtuple\n\nimport googlemaps\nimport urllib\nimport sys\n\nclass UserManager(BaseUserManager):\n def create_user(self, email, first_name, last_name, password=None):\n if not email:\n raise ValueError('User must have an email address')\n if not first_name:\n raise ValueError('User must have a first name')\n if not last_name:\n raise ValueError('User must have a last name')\n\n user = self.model(\n email=UserManager.normalize_email(email),\n first_name=first_name,\n last_name=last_name,\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, email, first_name, last_name, password):\n user = self.create_user(email, first_name, last_name, password=password)\n user.is_admin = True\n user.save(using=self._db)\n return user\n\nclass User(AbstractBaseUser):\n email = models.EmailField(unique=True)\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n created = models.DateTimeField(auto_now_add=True)\n\n is_staff = models.BooleanField(default=False)\n is_admin = models.BooleanField(default=False)\n is_active = models.BooleanField(default=True)\n\n objects = UserManager()\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['first_name', 'last_name']\n\n class Meta:\n ordering = ('created',)\n\n def get_full_name(self):\n return \"%s %s\" % (self.first_name, self.last_name)\n\n def get_short_name(self):\n return \"%s\" % (self.first_name)\n\n @property\n def is_superuser(self):\n return self.is_admin\n\n @property\n def is_staff(self):\n return self.is_admin\n\n def has_perm(self, perm, obj=None):\n return self.is_admin\n\n def has_module_perms(self, app_label):\n return self.is_admin\n\n def __str__(self):\n return \"%s %s\" % (self.first_name, self.last_name)\n\n\nclass Event(models.Model):\n CATEGORY_CHOICES = (('ENSIASKELEET', 'Koodikoulun ensiaskeleet'), ('ILTIS', 'Koodikoulun iltis'), ('OTHER', 'Muu'))\n\n title = models.CharField(max_length=100)\n category = models.CharField(max_length=30, choices=CATEGORY_CHOICES, default=CATEGORY_CHOICES[0])\n start_date = models.DateField()\n end_date = models.DateField(blank=True, null=True)\n start_time = models.TimeField(blank=True, null=True)\n end_time = models.TimeField(blank=True, null=True)\n price = models.PositiveIntegerField(default=0)\n bring_along = models.CharField(max_length=255, blank=True, null=True)\n street_address = models.CharField(max_length=100)\n city = models.CharField(max_length=40)\n requirements = models.TextField()\n description = models.TextField()\n organization = models.CharField(max_length=100, blank=True, null=True)\n amount = models.PositiveIntegerField(blank=True, null=True)\n signup_link = models.CharField(max_length=255, blank=True, null=True)\n signup_open = models.DateTimeField(blank=True, null=True)\n created = models.DateTimeField(auto_now_add=True)\n booked = models.BooleanField(default=False)\n\n organizer = models.ForeignKey(User, blank=True, null=True, related_name=\"events\")\n decoded_location = models.CharField(max_length=255, blank=True, null=True)\n lat = models.CharField(max_length=255, blank=True, null=True)\n lng = models.CharField(max_length=255, blank=True, null=True)\n\n approved = models.BooleanField(default=False)\n\n class Meta:\n ordering = ('start_date',)\n\n def save(self, *args, **kwargs):\n if not self.lat or not self.lng or not self.decoded_location:\n try:\n location = getLocation(self.street_address, self.city)\n self.lat = location[0]\n self.lng = location[1]\n address = ','.join([self.street_address, self.city])\n self.decoded_location = urllib.parse.quote(address)\n except:\n pass\n\n # If an event is approved, send a confirmation email to the organizer.\n if self.pk is not None:\n # Check if the state of 'approved' has changed from the previous object.\n original_event = Event.objects.get(pk=self.pk)\n if self.approved and original_event.approved != self.approved:\n try:\n send_event_approved(self.organizer.email, self.title, self.organizer.first_name)\n except:\n for exc in sys.exc_info():\n print(exc)\n\n super(Event, self).save(*args, **kwargs)\n\n\n def __str__(self):\n return \"%s\" % self.title\n\nclass SignUp(models.Model):\n child = models.CharField(max_length=100)\n guardian = models.CharField(max_length=100)\n age = models.PositiveIntegerField()\n email = models.EmailField()\n phone = models.CharField(max_length=100, blank=True, null=True)\n other = models.TextField(null=True, blank=True)\n created = models.DateTimeField(auto_now_add=True)\n\n event = models.ForeignKey(Event, related_name=\"participants\")\n\n class Meta:\n ordering = ('created',)\n\n def save(self, *args, **kwargs):\n super(SignUp, self).save(*args, **kwargs)\n if self.event.amount and len(self.event.participants.all()) >= self.event.amount:\n self.event.booked = True\n self.event.save()\n\n def __str__(self):\n return \"%s\" % self.child\n\n\ndef getLocation(address, city):\n gmaps = googlemaps.Client(key=settings.GOOGLE_KEY)\n geocode_result = gmaps.geocode(\"%s, %s\" % (address, city))\n lat = geocode_result[0][\"geometry\"][\"location\"][\"lat\"]\n lng = geocode_result[0][\"geometry\"][\"location\"][\"lng\"]\n return [lat, lng]\n\n\nLearningResource = namedtuple('LearningResource', 'url age ageClass languages header description free')\nResourceCategory = namedtuple('ResourceCategory', 'title id resources')","sub_path":"web/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"847817","text":"# -*- coding: utf-8 -*-\r\n# @Author : LG\r\n\r\nfrom Model import RetainNet, Evaler\r\nfrom Data import vocdataset\r\nfrom Configs import _C as cfg\r\nfrom Data import transfrom,targettransform\r\n\r\n\r\n# 训练数据集,VOC格式数据集, 训练数据取自 ImageSets/Main/train.txt'\r\ntrain_dataset=vocdataset(cfg, is_train=True, transform=transfrom(cfg,is_train=True),\r\n target_transform=targettransform(cfg))\r\n\r\n# 测试数据集,VOC格式数据集, 测试数据取自 ImageSets/Main/eval.txt'\r\ntest_dataset = vocdataset(cfg=cfg, is_train=False,\r\n transform=transfrom(cfg=cfg, is_train=False),\r\n target_transform=targettransform(cfg))\r\n\r\nif __name__ == '__main__':\r\n # 模型测试只支持GPU单卡或多卡,不支持cpu\r\n net = RetainNet(cfg)\r\n # 将模型移动到gpu上,cfg.DEVICE.MAINDEVICE定义了模型所使用的主GPU\r\n net.to(cfg.DEVICE.MAINDEVICE)\r\n # 模型从权重文件中加载权重\r\n net.load_pretrained_weight(cfg.FILE.MODEL_SAVE_ROOT+'/'+'model_20000.pkl')\r\n # 初始化验证器,验证器参数通过cfg进行配置;也可传入参数进行配置,但不建议\r\n evaler = Evaler(cfg, eval_devices=None)\r\n # 验证器开始在数据集上验证模型\r\n ap, map = evaler(model=net,\r\n test_dataset=test_dataset)\r\n print('ap: ',ap)\r\n print('map: ',map)","sub_path":"Retinanet-Pytorch/Demo_eval.py","file_name":"Demo_eval.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"187740058","text":"x=int(input(\"Enter number of elements to enter:\"))\r\nli=[]\r\nfor i in range(0,x):\r\n n=int(input())\r\n li.append(n)\r\ncs=[]\r\nfor i in range(0,x):\r\n sum=0\r\n for j in range(0,i+1):\r\n sum+=li[j]\r\n cs.append(sum)\r\n \r\nprint(\"The list is:\",li)\r\nprint(\"Required list is:\",cs)","sub_path":"Week1/3_10.py","file_name":"3_10.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"344186273","text":"n = int(input())\r\n\r\nsum = 0\r\n\r\nwhile n > 0:\r\n money = float(input())\r\n if money < 0:\r\n print(\"Invalid operation!\")\r\n break\r\n else:\r\n sum += money\r\n n -= 1\r\n print(\"Increase: %.2f\" % money)\r\n\r\nprint(\"Total: %.2f\" % sum)\r\n","sub_path":"AccountBalance.py","file_name":"AccountBalance.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"596530100","text":"# Version2.0\n# 非结构化编程---》 结构化编程 函数\n\n# 项目: 启动函数 初始化函数 管理器函数\nuserDB=[]\nbookDB=[]\ndef initDataBase():\n global userDB\n global bookDB\n # 人\n userDB = [['abc', '123'],\n ['admin', '123'],\n ['zhang3', '123']]\n # 书\n bookDB = [\n ['十万个为什么', '不详', 59.9, '科普', '好看'],\n ['三体', '刘慈欣', 159.9, '科幻', '比前一个好看'],\n ['钢铁是怎样炼成的', '斯基', 259.9, '励志小说', '好看'],\n ['海底两万里', '凡尔纳', 139.9, '科幻小说', '好看'],\n ['活着', '余华', 999.9, '文学小说', '挺好看'],\n ['百年孤独', '马尔克斯', 99.9, '文学小说', '好看'],\n ['时间简史', '霍金', 9.9, '科学文献', '看不懂'],\n ['石头记', '曹雪芹', 59.9, '古典文学', '好看'],\n ['斗破苍穹', '天蚕土豆', 20.0, '网络小说', '悄悄看'],\n ['冰与火之歌', '乔治', 359.9, '魔幻巨作', '好看'], ]\n\n\ndef login():\n while 1:\n # 登录\n name = input('请输入用户名:\\n')\n pwd = input('请输入密码:\\n')\n # 判断是否在数据库\n flag = 0\n for i in userDB: # userDB: list i:列表 i--e:str\n if i[0] == name and i[1] == pwd:\n flag = 1\n break\n if flag == 0:\n print('登录失败,请重新登录')\n else:\n print('登录成功')\n break\n # 进入图书系统\n\ndef regist():\n n1=input('输入用户名')\n p='pwd'\n while 1:\n p=input('请输入密码\\n')\n if check(p):\n break\n u = [n1, p]\n userDB.append(u)\n print('注册成功')\n\ndef bookNews():\n #查看所有书籍信息\n print('书名 作者 价格 类型 评价')\n for i in bookDB:\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\ndef classfyBook():\n #根据类别查询书籍\n print('书的类型有 1科普 2科幻 3励志小说 4科幻小说 5文学小说 6科学文献 7古典文学 8网络小说 9魔幻巨作\\n ')\n n = input('请选择书籍类型\\n')\n if n == '1':\n for i in bookDB:\n if i[3] == '科普':\n print('书名 作者 价格 类型 评价')\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n elif n == '2':\n for i in bookDB:\n if i[3] == '科幻':\n print('书名 作者 价格 类型 评价')\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n elif n == '3':\n for i in bookDB:\n if i[3] == '励志小说':\n print('书名 作者 价格 类型 评价')\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n elif n == '4':\n for i in bookDB:\n if i[3] == '科幻小说':\n print('书名 作者 价格 类型 评价')\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n elif n == '5':\n for i in bookDB:\n if i[3] == '文学小说':\n print('书名 作者 价格 类型 评价')\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n elif n == '6':\n for i in bookDB:\n if i[3] == '科普':\n print('书名 作者 价格 类型 评价')\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n elif n == '7':\n for i in bookDB:\n if i[3] == '古典文学':\n print('书名 作者 价格 类型 评价')\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n elif n == '8':\n for i in bookDB:\n if i[3] == '网络小说':\n print('书名 作者 价格 类型 评价')\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n elif n == '9':\n for i in bookDB:\n if i[3] == '魔幻巨作':\n print('书名 作者 价格 类型 评价')\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n else:\n print('输入错误')\ndef bookMoney():\n #根据价格区间查询书籍\n print('书的价格区间有 1. <=50 2. 50<<100 3. 100<<200 4. 200<< \\n')\n n = int(input('选择价格区间\\n'))\n if n == 1:\n print('书名 作者 价格 类型 评价')\n for i in bookDB:\n if i[2] <= 50.0:\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n elif n == 2:\n print('书名 作者 价格 类型 评价')\n for i in bookDB:\n if i[2] > 50.0 and i[2] <= 100.0:\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n elif n == 3:\n print('书名 作者 价格 类型 评价')\n for i in bookDB:\n if i[2] > 100.0 and i[2] <= 200:\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n elif n == 4:\n print('书名 作者 价格 类型 评价')\n for i in bookDB:\n if i[2] > 200:\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n else:\n print('输入错误')\ndef bookAdd():\n #增加一本书籍\n print('请输入书的信息')\n book1 = ['书名', '作者', '价格', '类型', '评价']\n book2 = []\n for i in range(0, 5):\n if i == 2:\n a = float(input('输入价格'))\n book2.append(a)\n else:\n a = input('输入' + book1[i])\n book2.append(a)\n bookDB.append(book2)\n print('*******输入成功*******')\ndef bookUpdate():\n #书籍修改\n n = input('输入需要修改的书名\\n')\n f = 0\n for i in bookDB:\n if i[0] == n:\n f = 1\n print('书名 作者 价格 类型 评价')\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n while 1:\n xiugai = int(input('0.修改书名 1.修改作者 2.修改价格 3.修改类型 4.修改评价 5.修改完成 返回\\n'))\n if xiugai == 0:\n n1 = input('输入书名\\n')\n i[0] = n1\n print('修改完成了')\n elif xiugai == 1:\n n1 = input('输入作者\\n')\n i[1] = n1\n print('修改完成了')\n elif xiugai == 2:\n n1 = input('输入价格\\n')\n i[2] = n1\n print('修改完成了')\n elif xiugai == 3:\n n1 = input('输入类型\\n')\n i[3] = n1\n print('修改完成了')\n elif xiugai == 4:\n n1 = input('输入评价\\n')\n i[4] = n1\n print('修改完成了')\n elif xiugai == 5:\n break\n else:\n print('输入错误')\n break\n\n if f == 0:\n print('没有该书')\ndef bookDelate():\n #删除书籍\n n = input('输入需要删除的书名\\n')\n f = 0\n for i in bookDB:\n if i[0] == n:\n f = 1\n print('书名 作者 价格 类型 评价')\n print(i[0] + '\\t' + i[1] + '\\t' + str(i[2]) + '\\t' + i[3] + '\\t' + i[4])\n bookDB.remove(i)\n print('删除成功')\n break\n if f == 0:\n print('没有该书')\ndef welcome():\n print('***********欢迎来到飞哥图书管理系统************')\n while 1:\n r=input('1.登录 2.注册 3. 修改密码')\n if r=='1':\n login()\n while 1:\n print('**********欢迎进入图书系统***********')\n r = input('1.查看所有书籍信息\\n2.根据类别查询书籍\\n3.根据价格区间查询书籍\\n4.增加一本书籍\\n5.修改一本书\\n6.删除一本书\\n7.退出系统\\n')\n if r == '1':\n bookNews()\n elif r == '2':\n classfyBook()\n elif r == '3':\n bookMoney()\n elif r == '4':\n bookAdd()\n elif r == '5':\n bookUpdate()\n elif r == '6':\n bookDelate()\n elif r == '7':\n print('欢迎下次光临')\n exit()\n elif r=='2':\n regist()\n elif r=='3':\n pwdUpdate()\n else:\n print('输入有误请重新输入')\n\n\ndef manager(): # 管理器函数:用于系统整体调配\n # 1. 加载数据库\n initDataBase()\n # 2. 启动项目---欢迎界面\n welcome()\n\ndef start(): # 程序的入口 run start main\n manager()\n\ndef ist(i):\n if '~'in i or '!'in i or '@'in i or '#'in i or '$'in i or '%'in i or '^'in i or '&'in i or '*'in i or '-'in i or '='in i or '['in i or ']'in i or '\\\\'in i or ';'in i:\n return True\n else:return False\ndef check(p):\n #密码检查\n n=len(p)\n a,b,c=0,0,0\n a1=0\n x=p[0]\n if x.isalpha():\n a1=1\n for j in p:\n if j.isdecimal():\n a=1\n if j.isalpha():\n b=1\n if ist(j):\n c=1\n if n<=8 or a==1 and b==0 or a==0 and b==1:\n print('等级为低')\n if a+b+c==2 and n>=8:\n print('等级为中')\n if a+b+c==3 and n>=16 and a1==1:\n print('等级为高')\n return True\n if p=='':\n print('密码不能为空')\n if a1==0:\n print('必须字母开头')\n for i in range(10):\n if p.startswith('{0}'.format(i)):\n print('不能以数字开头')\n if n<16:\n print('长度不够,不能少于16位')\n if a+b+c<3:\n print('需要由数字,字母及字符三种组合')\ndef pwdUpdate():\n while 1:\n name = input('请输入用户名:\\n')\n pwd = input('请原输入密码:\\n')\n # 判断是否在数据库\n flag = 0\n for i in userDB: # userDB: list i:列表 i--e:str\n if i[0] == name and i[1] == pwd:\n flag = 1\n break\n if flag == 0:\n print('用户名或密码错误,重新输入')\n else:\n break\n while 1:\n p = input('请输入新密码\\n')\n if check(p):\n break\n i[1]=p\n print('修改成功')\n\n\nstart()\n\n\n","sub_path":"com/baizhi/杜亚博作业/杜亚博_9.5/librarySystem.py","file_name":"librarySystem.py","file_ext":"py","file_size_in_byte":11149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"379140812","text":"import torch\nimport torch.nn as nn\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\n\nepochs = 300\nuse_adjoint = True\nbatch_size = 100\nrun_time = 25.0\ndata_size = 250\nnoise_std = 0.01\n\nif use_adjoint:\n from torchdiffeq import odeint_adjoint as odeint\nelse:\n from torchdiffeq import odeint\n\n\nclass MassSpringDamper(nn.Module):\n def __init__(self, m=1.0, k=1.0, b=0.5):\n super(MassSpringDamper, self).__init__()\n self.m = m\n self.b = b\n self.k = k\n self.A = torch.Tensor([[0, 1],[-self.k/self.m, -self.b/self.m]])\n\n def forward(self, t, x):\n dx = self.A.mm(x)\n return dx\n\nfor i in range(5):\n # true_x0 = torch.Tensor([[1.0],[0.0]])\n true_x0 = torch.Tensor(2,1)\n true_x0[0] = 2.0*torch.rand(1,1)\n true_x0[1] = 1.0*torch.rand(1,1)\n t = torch.linspace(0.0, run_time, data_size)\n with torch.no_grad():\n true_x = odeint(MassSpringDamper(), true_x0, t, method='dopri5')\n y = true_x.squeeze() + noise_std * torch.randn(data_size, 2)\n data_dict = {\"time\": t.numpy(),\n \"y1\": y[:, 0].numpy(),\n \"y2\": y[:, 1].numpy()}\n\n data = pd.DataFrame(data_dict)\n data.to_csv(\"data_set_\"+str(i)+\".csv\")\n\n\n\n\nwith torch.no_grad():\n fplot, ax = plt.subplots(1, 1, figsize=(4, 6))\n\n ax.plot(t.numpy(), true_x[:, 0, 0].numpy())\n ax.plot(t.numpy(), true_x[:, 1, 0].numpy())\n ax.plot(t.numpy(), y[:, 0].numpy(),'*')\n ax.plot(t.numpy(), y[:, 1].numpy(), '*')\n\n ax.set_xlabel('time (s)')\n ax.set_ylabel('states (x)')\n ax.legend(['position','velocity'])\n plt.show()\n\n\n\n\n","sub_path":"create_data_set.py","file_name":"create_data_set.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"473074350","text":"from __future__ import division, unicode_literals\n\nimport string\nfrom functools import wraps\nimport time\n\nimport werkzeug.routing\n\ntry:\n from google.appengine.api import memcache\nexcept ImportError:\n memcache = None\n\nclass Base64Converter(werkzeug.routing.BaseConverter):\n regex = '[0-9a-zA-Z_\\-]+'\n\nclass DigitsConverter(werkzeug.routing.BaseConverter):\n regex = '[0-9]+'\n\nclass DigitListConverter(werkzeug.routing.BaseConverter):\n regex = '[0-9]+(,[0-9]+)*'\n def to_python(self, value):\n return value.split(',')\n def to_url(self, value):\n return ','.join(value)\n\ndef temporary_cache(timeout):\n \"\"\"\n Decorator. Implement cacheing of function results.\n\n Function arguments must always be strings, and never contain slash.\n \"\"\"\n if memcache is None:\n def wrapper(function, timeout=timeout):\n cache = dict()\n def cleanup(current_time):\n old_keys = { key\n for key, (cached_result, cached_time) in cache.items()\n if cached_time < current_time - timeout }\n for key in old_keys:\n del cache[key]\n @wraps(function)\n def wrapped(*args):\n current_time = time.time()\n cached_result, cached_time = cache.get(args, (None, 0))\n delta = current_time - cached_time\n if delta < timeout:\n return cached_result\n result = function(*args)\n cleanup(current_time)\n cache[args] = result, current_time\n return result\n return wrapped\n else: # memcache\n def wrapper(function, timeout=timeout):\n @wraps(function)\n def wrapped(*args):\n key = '/'.join(args)\n cached_result = memcache.get(key)\n if cached_result is not None:\n return cached_result\n result = function(*args)\n try:\n memcache.add(key, result, timeout)\n except ValueError:\n pass\n return result\n return wrapped\n return wrapper\n\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"91479813","text":"from euler_funcs import divisor_list\nfrom itertools import takewhile, chain\nfrom functools import reduce\nfrom operator import mul\n\nprod = lambda L: reduce(mul, L, 1)\n\ndef pair_solutions(k):\n dvs = chain([1],takewhile(lambda e: e<=(k-1)//2, divisor_list(k-1)))\n l = set([(1+d, 1+(k-1)//d) for d in dvs])\n l = [list(e) for e in list(l)]\n l.sort(key=lambda e: e[0]*e[1])\n return l\n\ndef factorizations(n):\n if n == 1:\n return\n divs = divisor_list(n)\n for d in divs:\n nd = n//d\n if d > nd:\n break\n yield [d, nd]\n for factorization in factorizations(nd):\n yield [d] + factorization\n\ndef uniq_factorizations(n):\n L = factorizations(n)\n s = set([])\n for fact in L:\n fact.sort()\n s.add(tuple(fact))\n return s\n\ndef k_value(n=None, l=[]):\n # what number of ones is needed to make n1 * prod(l) = n1 + sum(l)\n if n is None:\n if len(l) == 0:\n return -1\n else:\n n = prod(l)\n sl = sum(l)\n n1 = n - sl\n else:\n if len(l) == 0:\n return -1\n else:\n sl = sum(l)\n n1 = n-sl\n return n1\n\n\ndef list_k_values(n):\n uniq = uniq_factorizations(n)\n for fact in uniq:\n kv = k_value(n, fact)\n print(\"k={}: 1^{} * {} = 1*{} + {}\".format(len(fact) + kv, kv, \"*\".join(map(str, fact)), kv, \"+\".join(map(str,fact))))\n\ndef doit(n):\n cache = {}\n for v in range(1,n):\n for k in get_ks(v):\n if v <= cache.get(k, v):\n cache[k] = v\n return cache\n\ndef get_ks(k):\n uniq = uniq_factorizations(k)\n for fact in uniq:\n kv = k_value(k, fact)\n yield len(fact)+kv\n\ndef convert_to_list(d):\n # d type: Dict[int,int]\n return list(sorted(((k,v) for k,v in d.items()), key=lambda e: e[0]))\n\ndef verify(n, t):\n for i in range(4, n):\n for fact in uniq_factorizations(i):\n kv = k_value(fact)\n if kv + len(fact) == t:\n return (fact, kv, i)\ndef main():\n it = doit(13000)\n psn = list(zip(*convert_to_list(it)))[1]\n print(sum(set(psn[:11998])))\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python/problem_88.py","file_name":"problem_88.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"221389421","text":"# Id numbers for the team members\n# 60264498,66721660,55488661,43376858\n\nfrom collections import defaultdict\nimport time\nimport math\nimport json\nfrom sys import path_importer_cache, stderr\nimport nltk\nimport re\nfrom urllib.parse import urlparse\nfrom nltk.corpus import stopwords\nclass Search_Engine():\n def __init__(self):\n self.dict_index = dict()\n self.query_list = []\n self.load_index()\n self.load_doc_id_dict()\n self.load_word_count_dict()\n \n\n def load_index(self) -> dict: # load the index into a dict\n if self.query_list == []:\n for i in range(26):\n with open('index_'+str(i)+'.json', 'r') as j:\n temp_dict = json.load(j)\n self.dict_index.update(temp_dict)\n else:\n for word in self.query_list:\n if word not in self.dict_index:\n if ord(word[0]) >= 48 and ord(word[0]) <= 57:\n # number\n with open('index_number.json', 'r') as j:\n temp_dict = json.load(j)\n self.dict_index[word] = temp_dict[word]\n del temp_dict\n\n elif ord(word[0]) >= 97 and ord(word[0]) <= 97+26:\n with open('index_'+str(ord(word[0])-97)+'.json', 'r') as j:\n temp_dict = json.load(j)\n self.dict_index.update(temp_dict)\n del temp_dict\n\n\n def load_doc_id_dict(self) -> dict:\n with open('doc_id_ver2.json', 'r') as d:\n self.doc_id_dict = dict(json.load(d))\n\n\n def load_word_count_dict(self):\n with open('word_count_page_ver2.json') as d:\n self.word_count_dict = dict(json.load(d))\n\n\n def get_url(self,doc_id) -> str: # pass in a doc_id and give you the corresponding url\n #print(\" doc_id: \", doc_id)\n return self.doc_id_dict[doc_id]\n\n\n def get_query(self): # split and stem the query into a list of string\n query = input(\"Search(enter nothing to quit): \")\n if query == '':\n exit()\n list_query = re.split(pattern=r'\\W+', string=query)\n stemmer = nltk.PorterStemmer() #jay's get_query\n self.query_list = [stemmer.stem(token) for token in list_query if token != '']\n for i in self.query_list:\n if 'and' == i: # Dealing with the and query, not including 'and' as a query\n self.query_list.remove(i)\n \n\n def get_id2ind(self,lst_ovlp):\n dict_id2ind = dict()\n for id in lst_ovlp:\n lst_ind = []\n for q in self.query_list:\n lst_ind.append(self.dict_index[q][str(id)][1]) # this will get the index list in the posting\n dict_id2ind[id] = lst_ind\n return dict_id2ind\n\n\n def get_phrase_count(self,dict_id2ind):\n phrase_count_dict = dict()\n for k in dict_id2ind: # k is the doc id\n phrase_count = len(dict_id2ind[k][0])\n for i in range(len(dict_id2ind[k])):\n if i == 0:\n word_ind = dict_id2ind[k][0]\n else:\n for ind in word_ind:\n if ind+1 not in dict_id2ind[k][i]:\n phrase_count -= 1\n break\n phrase_count_dict[k] = phrase_count\n return phrase_count_dict\n\n\n def filter_url(self,doc_id_lst):\n path_count = defaultdict(int)\n url_set = set()\n lst_dup = []\n top_url = ''\n for doc_id in doc_id_lst:\n p = urlparse(self.doc_id_dict[doc_id])\n if p.netloc+p.path not in url_set:\n url_str = p.netloc+p.path\n url_str = '/'.join(url_str.split('/')[:2])\n path_count[url_str] += 1\n url_set.add(p.netloc+p.path)\n else:\n lst_dup.append(doc_id)\n find_top = sorted([url for url in path_count.keys()], key= lambda x: path_count[x], reverse=True)\n if len(find_top) != 0:\n top_url = find_top[0] +'/'\n return top_url , [id for id in doc_id_lst if id not in lst_dup + ['https://'+top_url,'http://'+top_url]]\n \n\n def normalize_vector(self, v):\n size_of_v = math.sqrt(sum([i**2 for i in v]))\n return [i/size_of_v for i in v]\n\n\n def result_sorting(self,phrase_count) -> list:\n \n query_document_tf_idf = {}\n for doc_id in phrase_count:\n query_tf_idf_vector = []\n doc_tf_vector = []\n for w in self.query_list:\n\n #query tf idf\n query_term_freq = 1 + math.log(self.query_list.count(w), 2)\n i_doc_freq = math.log( 55394 / len(list(self.dict_index[w].keys())))\n query_tf_idf = query_term_freq * i_doc_freq\n #doc tf idf\n #print(self.dict_index[w])\n #'55283': [161, [4, 10, 27, 40]]\n doc_term_freq = 1 + math.log(self.dict_index[w][str(doc_id)][0], 2)\n\n #create vector\n doc_tf_vector.append(doc_term_freq)\n query_tf_idf_vector.append(query_tf_idf)\n #end for\n # create a dict, which its values are a tuple of two vector. query v on index 0 and doc v on index 1\n \n query_document_tf_idf[doc_id] = (self.normalize_vector(query_tf_idf_vector), self.normalize_vector(doc_tf_vector))\n \n cosine_score_dict = self.calculate_cosine_similarity(query_document_tf_idf)\n \n return sorted([doc_id for doc_id in cosine_score_dict], key= lambda x: cosine_score_dict[x], reverse=True)\n\n\n def calculate_cosine_similarity(self, query_doc_tf_idf_dict):\n self.cos_score_dict = dict()\n for doc_id, qd_vector in query_doc_tf_idf_dict.items():\n cos_score = sum(qd_vector[0][i]+qd_vector[1][i] for i in range(len(qd_vector[0])))\n self.cos_score_dict[doc_id] = cos_score\n return self.cos_score_dict\n\n\n def searching(self): # this is where the actual searching is happening\n start_time = time.time()\n self.load_index()\n lst_all_words = []\n search_result = [] # list of doc ids\n # search machine learning\n # step1. find list of doc_id that contains both machine and learning.\n # step2. for each doc_id, for the ind_list in the index, count how many machine ind+1 is in learning ind list.\n # step3. based on the count you got, calculate tf-idf score.\n for q in self.query_list[:]:\n if q in self.dict_index:\n lst_all_words.append([int(doc_id) for doc_id in self.dict_index[q].keys()])\n else:\n self.query_list.remove(q)\n if len(self.query_list) == 0:\n print('No record')\n return\n lst_ovlp = lst_all_words[0]\n for i in range(len(lst_all_words)):\n lst_ovlp = list(set(lst_ovlp) & set(lst_all_words[i]))\n # we now have the overlapping doc_ids list\n #step2.\n dict_id2ind = self.get_id2ind(lst_ovlp)\n phrase_count = self.get_phrase_count(dict_id2ind)\n sorting_timer = time.time()\n search_result = self.result_sorting(phrase_count)\n print('sorting used', time.time()-sorting_timer,'to execute.')\n top_url, search_result = self.filter_url(search_result)\n if search_result == []:\n self.query_list = self.query_list[0]\n return self.searching()\n if top_url != '':\n print('Search found @: https://'+top_url)\n for doc_id in search_result[:10]:\n print('Search found @:', self.get_url(doc_id))\n #print(\" with score:\", self.cos_score_dict[doc_id])\n \n print('Search used', time.time()-start_time,'to execute.')\n \n\n def run(self):\n while True:\n self.get_query()\n self.searching()\n\n\nif __name__ == '__main__':\n print('--------\\nSearch Engine\\n--------\\n')\n searchEngine = Search_Engine()\n searchEngine.run()\n\n# Structure of the index posting:\n # {token: [doc_id, td_idf_scoring]}\n\n # I eat apple\n #document tf_idf : doc1: 0.3 0.1 0.2\n # q tf_idf 0.3 0.3 0.3\n\n","sub_path":"Inverted_indexer/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":8435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"634286797","text":"#import mysql.connector\nimport sqlite3\nfrom flask import make_response, abort\n\n\n\ndef read():\n\t\n\ttemperatures = []\n\t\n\tconn = sqlite3.connect('dht.db')\n\t\n\tc = conn.cursor()\n\tc.execute('SELECT id, reading_time, devicename, humidity, temperature, moved,label FROM readings')\n\tresults = c.fetchall()\n\t\n\tfor result in results:\n\t\t\t\t\n\t\ttemperatures.append({\n\t\t'reading_time': result[1],\n\t\t'devicename': result[2],\n\t\t\t'humidity': result[3],\n\t\t\t'temp': result[4],\n\t\t\t'moved': result[5],\n\t\t\t'label': result[6]})\n\t\n\tconn.close()\n\t\n\treturn temperatures\n\n\n\ndef create(globaltemperature):\n\t'''\n\tThis function creates a new temperature record in the database\n\tbased on the passed in temperature data\n\t:param globaltemperature: Global temperature record to create in the database\n\t:return: 200 on success\n\t'''\n\t\n\treading_time = globaltemperature.get('reading_time', None)\n\tdevicename = globaltemperature.get('devicename', None)\n\thumidity = globaltemperature.get('humidity', None)\n\ttemp = globaltemperature.get('temp', None)\n\tmoved = globaltemperature.get('moved', None)\n\tlabel = globaltemperature.get('label', None)\n\n\n\tconn = sqlite3.connect('dht.db')\n\t\n\tc = conn.cursor()\t\n\n\tc.execute(\"\"\"INSERT INTO readings ('reading_time', 'devicename', 'humidity', 'temperature','moved','label') VALUES (?, ?, ?, ?, ?,?)\"\"\",\n (reading_time, devicename, humidity, temp, moved,label))\n\tprint((reading_time, devicename, humidity, temp, moved,label))\n\tconn.commit()\n\tconn.close()\n\t\n\t\n\treturn make_response('Global temperature record successfully created', 200)\n","sub_path":"Real-Time Prediction/Cloud/globaltemperature.py","file_name":"globaltemperature.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"578620836","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env\n\nimport os\nimport sys\nimport uuid\nimport argparse\n\n# env\nsys.path.append('/usr/lib/python2.7/dist-packages/')\nsys.path.append('/usr/lib/python2.7/')\nsys.path.append('/usr/local/lib/python2.7/dist-packages/')\nsys.path.append('/data2/django_current/')\nsys.path.append('/data2/django_projects/')\nsys.path.append('/data2/django_third/')\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djforms.settings')\n\n# primt django\nimport django\ndjango.setup()\n\nfrom django.conf import settings\n\nfrom djforms.scholars.models import Presentation\n\n# set up command-line options\ndesc = \"\"\"\nAccepts as input the year to munge\n\"\"\"\n\nparser = argparse.ArgumentParser(description=desc)\n\nparser.add_argument(\n '-y', '--year',\n required=True,\n help=\"Year to munge.\",\n dest='year'\n)\nparser.add_argument(\n '--test',\n action='store_true',\n help=\"Dry run?\",\n dest='test'\n)\n\n\ndef main():\n\n prez = Presentation.objects.filter(date_created__year=year)\n\n for p in prez:\n if p.need_table==\"Yes\" or p.need_electricity==\"Yes\":\n print(\n \"{}|{}|{}\".format(\n p.need_table, p.need_electricity, p.user.email\n )\n )\n\n\n######################\n# shell command line\n######################\n\nif __name__ == '__main__':\n args = parser.parse_args()\n year = args.year\n test = args.test\n\n sys.exit(main())\n","sub_path":"djforms/scholars/table_electricity.py","file_name":"table_electricity.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"477801989","text":"import pandas as pd\r\n\r\ndef get_older_users(age, url):\r\n \"\"\"Descripción:\r\n -----------\r\n Retorna la cantidad de personas cuya edad es mayor a una dada por parámetro\r\n\r\n Parámetros:\r\n -----------\r\n age (int):\r\n Edad mínima con la cual se buscan y se cuentan las personas.\r\n \r\n Retorna:\r\n ---------- \r\n int: 0... N \r\n Cantidad de usuarios con una edad mayor a la dada por parámetro\r\n\r\n Except:\r\n ----------\r\n int: 0\r\n En caso de que no se pueda convertir el parámetro a int\r\n \"\"\"\r\n try:\r\n age = int(age)\r\n except:\r\n return 0\r\n data = pd.read_json(url)\r\n frame1 = pd.DataFrame([info for info in data['results']])\r\n data = pd.concat([frame1, pd.DataFrame((dob for dob in frame1['dob'] if dob['age'] > age))], axis=1)\r\n return data.count()['age']\r\n\r\n\r\nprint(get_older_users(42,'https://magicsolutions.co/users.json'))\r\nprint(get_older_users('32','https://magicsolutions.co/users.json'))\r\nprint(get_older_users('a','https://magicsolutions.co/users.json'))\r\nprint(get_older_users({},'https://magicsolutions.co/users.json'))\r\nprint(get_older_users([],'https://magicsolutions.co/users.json'))","sub_path":"older_users_con_casos.py","file_name":"older_users_con_casos.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"358950746","text":"import os\nlistadeprodutos={}\ndef menuhelp():\n help(menuinicio)\n help(menualterar)\n help(menuconsultar)\n help(categ)\n help(menuhelp)\ndef menuconsultar():\n print(\"\"\" \n Digite (i) para ver todos os produtos\n \n Digite (a) para ver todos os alimentos\n \n Digite (r) para ver todas as roupas\n \n Digite (p) para pesquisar um produto específico\n \n Digite (v) para voltar\n \n \"\"\")\ndef menuinicio():\n print(\"\"\"\n Digite (a) para alterar o estoque\n\n Digite (c) para consultar\n \n Digite (e) para ajuda\n\n Digite (s) para sair\n\n \"\"\")\ndef menualterar():\n print(\"\"\"\n Digite (a) para adicionar\n \n Digite (e) para excluir\n \n \"\"\") \ndef categ(cadastrocateg):\n if cadastrocateg ==\"a\":\n listadeprodutos.fromkeys([\"alimento\"])\n categoria=\"alimento\"\n elif cadastrocateg==\"r\":\n listadeprodutos.fromkeys([\"roupa\"])\n categoria=\"roupa\"\n return (categoria)\ndef alimentoroupa():\n print(\"\"\"\n Digite (a) para alimento\n \n Digite (r) para roupa\n \n Digite (v) para voltar\n \"\"\")\nprodutopreçoa={}\nprodutopreçor={}\nk=0\ncontaadicionados=0\ncontaexcluidos=0\nopção=\"\"\nwhile opção!= \"s\":\n menuinicio() \n opção=input(\"\"\"\n Escolha uma opção: \"\"\")\n os.system('cls' if os.name == 'nt' else 'clear')\n if opção ==\"a\":\n menualterar()\n adicionaexclui=input(\" Escolha uma opção: \")\n os.system('cls' if os.name == 'nt' else 'clear') \n while adicionaexclui==\"a\":\n alimentoroupa() \n cadastrocateg=input(\" Escolha uma opção: \")\n os.system('cls' if os.name == 'nt' else 'clear') \n contaadicionados+=1 \n cat=categ(cadastrocateg) \n if cat==\"alimento\":\n print(\"\") \n produtoa=input(\" Escreva o nome do produto: \")\n print(\"\") \n print(\" Caso o preço possua centavos, utilizar ponto ao invés de virgula.\")\n print(\"\") \n preçoa=float(input(\" Digite o preço: R$ \"))\n os.system('cls' if os.name == 'nt' else 'clear')\n preçoal=str(\"%.2f\" %preçoa) \n a=\"R$\"\n a+=preçoal\n produtopreçoa[produtoa]=a\n listadeprodutos[\"alimento\"]=produtopreçoa\n print(\"\") \n print(\" Produto adicionado com sucesso!\")\n print(\"\") \n print(\" No estoque:\", listadeprodutos) \n elif cat==\"roupa\":\n print(\"\") \n produtor=input(\" Escreva o nome do produto: \")\n print(\"\") \n print(\" Caso o preço possua centavos, utilizar ponto ao invés de virgula.\") \n print(\"\") \n preçor=float(input(\" Digite o preço: R$ \"))\n os.system('cls' if os.name == 'nt' else 'clear')\n preçoro=str(\"%.2f\" %preçor)\n r=\"R$\"\n r+=preçoro \n produtopreçor[produtor]=r\n listadeprodutos[\"roupa\"]=produtopreçor\n print(\"\") \n print(\" Produto adicionado com sucesso! \")\n print(\"\") \n print(\" No estoque:\", listadeprodutos) \n print(\"\") \n print(\"\") \n continuar=input(\" Pressione enter para continuar...\")\n os.system('cls' if os.name == 'nt' else 'clear') \n print(\"\") \n print(\" Para adicionar outros produtos, digite (a)\")\n print(\"\")\n print(\" Para voltar digite (v)\")\n print(\"\")\n adicionaexclui=input(\" Escolha uma opção: \")\n os.system('cls' if os.name == 'nt' else 'clear') \n print(\"\")\n print(\" \", contaexcluidos, \"produto(s) excluido(s)\")\n print(\"\") \n print(\" \", contaadicionados, \"produto(s) adicionado(s)\") \n totalprodutos=contaadicionados - contaexcluidos \n print(\"\") \n print(\" \", totalprodutos, \"produto(s) restante(s) no estoque\") \n print(\"\")\n continuar=input(\" Pressione enter para continuar...\")\n os.system('cls' if os.name == 'nt' else 'clear') \n continue\n while adicionaexclui==\"e\":\n print(\"\")\n print(\" No estoque:\", listadeprodutos) \n print(\"\") \n print(\" Digite (a) para alimento\")\n print(\"\")\n print(\" Digite (r) roupa\")\n print(\"\") \n cadastrocateg=input(\" Escolha uma opção: \")\n os.system('cls' if os.name == 'nt' else 'clear') \n contaexcluidos+=1 \n if cadastrocateg==\"a\": \n print(\"\") \n excluialimento=input(\" Digite o nome do produto a ser excluido: \")\n del produtopreçoa[excluialimento]\n os.system('cls' if os.name == 'nt' else 'clear') \n print(\"\") \n print(\" Produto excluido com sucesso!\")\n print(\"\") \n print(\" No estoque:\", listadeprodutos)\n print(\"\")\n print(\"\") \n elif cadastrocateg==\"r\":\n print(\"\") \n excluiroupa=input(\" Digite o nome do produto a ser excluido: \")\n del produtopreçor[excluiroupa]\n os.system('cls' if os.name == 'nt' else 'clear') \n print(\"\") \n print(\" Produto excluido com sucesso!\")\n print(\"\") \n print(\" No estoque:\", listadeprodutos)\n print(\"\")\n print(\" Para excluir outros produtos, digite (e)\")\n print(\"\")\n print(\" Para voltar digite (v)\")\n print(\"\")\n adicionaexclui=input(\" Escolha uma opção:\")\n os.system('cls' if os.name == 'nt' else 'clear') \n print(\"\")\n print(\" \", contaexcluidos, \"produto(s) excluido(s)\")\n print(\"\") \n print(\" \", contaadicionados, \"produto(s) adicionado(s)\") \n totalprodutos=contaadicionados - contaexcluidos \n print(\"\") \n print(\" \", totalprodutos, \"produto(s) restante(s) no estoque\") \n print(\"\")\n continuar=input(\" Pressione enter para continuar...\")\n os.system('cls' if os.name == 'nt' else 'clear') \n break\n while opção==\"c\":\n menuconsultar()\n consulta=input(\" Escolha uma opção: \") \n os.system('cls' if os.name == 'nt' else 'clear') \n while consulta==\"i\":\n v=set(produtopreçor.keys()) \n w=set(produtopreçoa.keys()) \n x=len(produtopreçoa)\n y=len(produtopreçor)\n z=x+y\n print(\"\") \n print(\" Lista dos produtos: \", listadeprodutos) \n print(\"\") \n print(\" Roupas:\", v, \"e Alimentos:\", w ) \n print(\"\") \n print(\" \", z, \"produto(s) restante(s) no estoque\")\n print(\"\") \n continuar=input(\" Pressione enter para continuar...\") \n os.system('cls' if os.name == 'nt' else 'clear') \n menuconsultar() \n consulta=input(\" Escolha uma opção:\") \n os.system('cls' if os.name == 'nt' else 'clear') \n continue\n while consulta==\"a\": \n print(\"\") \n print(\" \", set(produtopreçoa.keys())) \n print(\"\") \n print(\" \", len(produtopreçoa), \"alimento(s) no estoque\") \n print(\"\")\n continuar=input(\" Pressione enter para continuar...\") \n os.system('cls' if os.name == 'nt' else 'clear') \n menuconsultar() \n consulta=input(\" Escolha uma opção:\") \n os.system('cls' if os.name == 'nt' else 'clear') \n break\n while consulta==\"r\": \n print(\"\") \n print(\" \", set(produtopreçor.keys())) \n print(\"\") \n print(\" \", len(produtopreçor), \"roupa(s) no estoque\")\n print(\"\") \n consulta=input(\" Pressione enter para continuar...\") \n os.system('cls' if os.name == 'nt' else 'clear') \n \n continue\n while consulta==\"p\": \n print(\"\") \n print(\" No estoque: \", listadeprodutos) \n alimentoroupa()\n consultac=input(\" Escolha uma opção: \") \n k+=1 \n os.system('cls' if os.name == 'nt' else 'clear') \n if consultac==\"a\": \n print(\"\") \n consultaalimento=input(\" Digite o nome do produto a ser pesquisado: \")\n os.system('cls' if os.name == 'nt' else 'clear') \n if produtopreçoa.get(consultaalimento) == None :\n print(\"\") \n print(\" Não temos esse produto no estoque\")\n print(\"\")\n continuar=input(\" Pressione o enter para continuar\")\n os.system('cls' if os.name == 'nt' else 'clear')\n elif produtopreçoa.get(consultaalimento) != None:\n print(\" \", consultaalimento, \"custa: R$\", produtopreçoa[consultaalimento], \"reais\")\n print(\"\")\n continuar=input(\" Pressione o enter para continuar\")\n os.system('cls' if os.name == 'nt' else 'clear')\n elif consultac ==\"r\":\n print(\"\") \n consultaroupa=input(\" Digite o nome do produto a ser pesquisado: \")\n if produtopreçor.get(consultaroupa)== None:\n os.system('cls' if os.name == 'nt' else 'clear') \n print(\"\")\n print(\" Não temos esse produto no estoque\")\n print(\"\")\n print(\"\")\n continuar=input(\" Pressione o enter para continuar...\")\n os.system('cls' if os.name == 'nt' else 'clear')\n elif produtopreçor.get(consultaroupa)!=None:\n print(\"\") \n print(\" \", consultaroupa, \"Custa\", produtopreçor[consultaroupa] )\n print(\"\")\n continuar=input(\" Pressione o enter para continuar...\")\n os.system('cls' if os.name == 'nt' else 'clear')\n elif consultac!=\"a\" and consultac!=\"r\": \n print(\"\")\n consulta=input(\" Pressione enter para continuar...\") \n os.system('cls' if os.name == 'nt' else 'clear')\n continue\n if consulta==\"v\":\n print(\"\") \n opção=input(\" Pressione enter para continuar...\")\n os.system('cls' if os.name == 'nt' else 'clear')\n continue\n \n if opção==\"e\":\n menuhelp()\n os.system(\"pause\")\n os.system(\"cls\" if os.name == 'nt' else 'clear')\n elif opção==\"s\":\n print(\"\") \n print(\" Obrigado por usar nosso programa!\")\n","sub_path":"Trab Python 2016.py","file_name":"Trab Python 2016.py","file_ext":"py","file_size_in_byte":12041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"362585633","text":"# -*- coding: utf-8 -*-\nfrom socket import *\nimport threading\nimport Queue\n\n# test in python 2.7\n\n# a dic of local port mapped to machine\nssh_server_dic = {\"host\": 'localhost', \"port\": 0, \"ip\": \"x.x.x.x\"}\n\n# sync all the threads in each of tunnel\nrunning_flag = []\n\n# receive buffSize\nbuffer_size = 2048\n\ndef data_encryp(data):\n temp_data = list(data)\n for i in range(0, len(temp_data)):\n if ord(temp_data[i]) == 255:\n temp_data[i] = chr(0)\n else:\n temp_data[i] = chr(ord(temp_data[i]) + 1)\n return \"\".join(temp_data)\n\ndef data_decryp(data):\n temp_data = list(data)\n for i in range(0, len(temp_data)):\n if ord(temp_data[i]) == 0:\n temp_data[i] = chr(255)\n else:\n temp_data[i] = chr(ord(temp_data[i]) - 1)\n return \"\".join(temp_data)\n\ndef get_data_from_ssh_server(rev_msg, tcp_socket, flag):\n \"\"\"\n :param rev_msg: a queue buffer of message need to be send to SSH client\n :param tcp_socket: instance of socket used for sending data\n :param flag: control this function\n :return: null\n \"\"\"\n while running_flag[flag]:\n data = tcp_socket.recv(buffer_size)\n if len(data):\n rev_msg.put(data_decryp(data))\n else:\n running_flag[flag] = False\n\n\ndef send_data_to_ssh_client(rev_msg, tcp_socket, flag):\n \"\"\"\n :param rev_msg: a queue buffer of message need to be send to SSH client\n :param tcp_socket: instance of socket used for sending data\n :param flag: control this function\n :return: null\n \"\"\"\n while running_flag[flag]:\n try:\n data = rev_msg.get(timeout=10)\n data = tcp_socket.send(data_encryp(data))\n except:\n pass\n\n\ndef get_data_from_ssh_client(send_msg, tcp_socket, flag):\n \"\"\"\n :param send_msg: a queue buffer of message need to be send to SSH server in each machine\n :param tcp_socket: instance of socket used for sending data\n :param flag: control this function\n :return: null\n \"\"\"\n while running_flag[flag]:\n data = tcp_socket.recv(buffer_size)\n if len(data):\n send_msg.put(data_decryp(data))\n else:\n running_flag[flag] = False\n\n\ndef send_data_to_ssh_server(send_msg, tcp_socket, flag):\n \"\"\"\n :param send_msg: a queue buffer of message need to be send to SSH server in each machine\n :param tcp_socket: instance of socket used for sending data\n :param flag: control this function\n :return: null\n \"\"\"\n while running_flag[flag]:\n try:\n data = send_msg.get(timeout=10)\n data = tcp_socket.send(data_encryp(data))\n except:\n pass\n\n\ndef handle_connections(host, ip, port):\n \"\"\"\n :param host: local ip\n :param ip: which machine the data will be forwarded\n :param port: local port\n :return: null\n \"\"\"\n ssh_client_socket = socket(AF_INET, SOCK_STREAM)\n ssh_client_socket.bind((host, port))\n\n # listen 10 client\n ssh_client_socket.listen(10)\n while True:\n ssh_client_side, address = ssh_client_socket.accept()\n\n # two queue for keeping data from SSH client and SSH server\n buffer_send = Queue.Queue()\n buffer_rev = Queue.Queue()\n\n ssh_server_side = socket(AF_INET, SOCK_STREAM)\n ssh_server_side.connect((ip, 2222))\n\n flag = True\n\n running_flag.append(flag)\n\n rev1 = threading.Thread(target=get_data_from_ssh_server,\n args=(buffer_rev, ssh_server_side, len(running_flag) - 1))\n rev2 = threading.Thread(target=send_data_to_ssh_client,\n args=(buffer_rev, ssh_client_side, len(running_flag) - 1))\n\n send1 = threading.Thread(target=get_data_from_ssh_client,\n args=(buffer_send, ssh_client_side, len(running_flag) - 1))\n send2 = threading.Thread(target=send_data_to_ssh_server,\n args=(buffer_send, ssh_server_side, len(running_flag) - 1))\n\n rev1.start()\n rev2.start()\n send1.start()\n send2.start()\n\n\nif __name__ == \"__main__\":\n print(\"start SSH forward server\")\n\n print(\"ssh mapping \" + ssh_server_dic[\"host\"] + \":\" + str(ssh_server_dic[\"port\"]) + \" => \" + ssh_server_dic[\"ip\"] + \":22\")\n t = threading.Thread(target=handle_connections, args=(ssh_server_dic[\"host\"], ssh_server_dic[\"ip\"], ssh_server_dic[\"port\"]))\n t.start()\n\n print(\"initialize SSH forward server done\")\n","sub_path":"forwardSSH/fordwardSSH.py","file_name":"fordwardSSH.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"252271951","text":"import discord\nimport asyncio\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print('logged in as {} ({})'.format(client.user.name, client.user.id))\n # channel = client.get_channel('545382243309191232')\n # message = 'It is good to see the young show an interest in the ways of magic. Its circles can be used in tandem with our unique ways of tinkering.'\n # print('sending message to {}: {}'.format(channel, message))\n # await client.send_message(channel, message)\n\n@client.event\nasync def on_message(message):\n print('message received from {}: {}'.format(message.channel, message.content))\n if client.user.mentioned_in(message):\n await client.add_reaction(message, u\"\\U0001F636\")\n\ndef get_client():\n return client\n\nasync def start():\n yield client.start('NTQ2Mjc1MzUxMjc0MzIzOTY4.D0l7OA.8609bjmEWLIEFX49C0Klbe3SjS4')\n\nasync def stop():\n yield client.logout()\n","sub_path":"src/tobon.py","file_name":"tobon.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"7882202","text":"# Create a sample :class:`pyvista.UniformGrid` dataset.\n#\nimport numpy as np\nimport pyvista as pv\nn = 21\nc = -(n - 1) / 2\nvol = pv.UniformGrid(dimensions=(n, n, n), origin=(c, c, c))\nscalars = np.linalg.norm(vol.points, axis=1)\nscalars *= 255 / scalars.max()\nvol['scalars'] = scalars\n#\n# Demonstrate nearest (default) interpolation.\n#\npl = pv.Plotter()\nactor = pl.add_volume(\n vol,\n show_scalar_bar=False,\n opacity=[0.3, 0.0, 0.05, 0.0, 0.0, 0.0, 1.0, 0.0],\n cmap='plasma',\n)\nactor.prop.interpolation_type = 'nearest'\npl.show()\n#\n# Demonstrate linear interpolation.\n#\npl = pv.Plotter()\nactor = pl.add_volume(\n vol,\n show_scalar_bar=False,\n opacity=[0.3, 0.0, 0.05, 0.0, 0.0, 0.0, 1.0, 0.0],\n cmap='plasma',\n)\nactor.prop.interpolation_type = 'linear'\npl.show()\n","sub_path":"version/0.39/api/plotting/_autosummary/pyvista-plotting-volume_property-VolumeProperty-interpolation_type-1.py","file_name":"pyvista-plotting-volume_property-VolumeProperty-interpolation_type-1.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"204898127","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n# COPYRIGHT (C) 2014-2020 Mitsuo KONDOU.\r\n# This software is released under the MIT License.\r\n# https://github.com/konsan1101\r\n# Thank you for keeping the rules.\r\n\r\n\r\n\r\nimport sys\r\nimport os\r\nimport time\r\nimport datetime\r\nimport codecs\r\nimport glob\r\n\r\nimport queue\r\nimport threading\r\nimport subprocess\r\n\r\nimport pyautogui\r\n\r\n\r\n\r\n# インターフェース\r\nqCtrl_control_kernel = 'temp/control_kernel.txt'\r\nqCtrl_control_speech = 'temp/control_speech.txt'\r\nqCtrl_control_vision = 'temp/control_vision.txt'\r\nqCtrl_control_desktop = 'temp/control_desktop.txt'\r\nqCtrl_control_bgm = 'temp/control_bgm.txt'\r\nqCtrl_control_browser = 'temp/control_browser.txt'\r\nqCtrl_control_player = 'temp/control_player.txt'\r\nqCtrl_control_chatting = 'temp/control_chatting.txt'\r\nqCtrl_control_knowledge = 'temp/control_knowledge.txt'\r\n\r\n# 外部プログラム\r\nqExt_program = '__ext_program.bat'\r\n\r\n\r\n\r\n# qLog,qFunc 共通ルーチン\r\nimport _v5__qLog\r\nqLog = _v5__qLog.qLog_class()\r\nimport _v5__qFunc\r\nqFunc = _v5__qFunc.qFunc_class()\r\n\r\nqPLATFORM = qFunc.getValue('qPLATFORM' )\r\nqRUNATTR = qFunc.getValue('qRUNATTR' )\r\nqHOSTNAME = qFunc.getValue('qHOSTNAME' )\r\nqUSERNAME = qFunc.getValue('qUSERNAME' )\r\nqPath_pictures = qFunc.getValue('qPath_pictures' )\r\nqPath_videos = qFunc.getValue('qPath_videos' )\r\nqPath_cache = qFunc.getValue('qPath_cache' )\r\nqPath_sounds = qFunc.getValue('qPath_sounds' )\r\nqPath_icons = qFunc.getValue('qPath_icons' )\r\nqPath_fonts = qFunc.getValue('qPath_fonts' )\r\nqPath_log = qFunc.getValue('qPath_log' )\r\nqPath_work = qFunc.getValue('qPath_work' )\r\nqPath_rec = qFunc.getValue('qPath_rec' )\r\n\r\nqPath_s_ctrl = qFunc.getValue('qPath_s_ctrl' )\r\nqPath_s_inp = qFunc.getValue('qPath_s_inp' )\r\nqPath_s_wav = qFunc.getValue('qPath_s_wav' )\r\nqPath_s_jul = qFunc.getValue('qPath_s_jul' )\r\nqPath_s_STT = qFunc.getValue('qPath_s_STT' )\r\nqPath_s_TTS = qFunc.getValue('qPath_s_TTS' )\r\nqPath_s_TRA = qFunc.getValue('qPath_s_TRA' )\r\nqPath_s_play = qFunc.getValue('qPath_s_play' )\r\nqPath_v_ctrl = qFunc.getValue('qPath_v_ctrl' )\r\nqPath_v_inp = qFunc.getValue('qPath_v_inp' )\r\nqPath_v_jpg = qFunc.getValue('qPath_v_jpg' )\r\nqPath_v_detect = qFunc.getValue('qPath_v_detect' )\r\nqPath_v_cv = qFunc.getValue('qPath_v_cv' )\r\nqPath_v_photo = qFunc.getValue('qPath_v_photo' )\r\nqPath_v_msg = qFunc.getValue('qPath_v_msg' )\r\nqPath_d_ctrl = qFunc.getValue('qPath_d_ctrl' )\r\nqPath_d_play = qFunc.getValue('qPath_d_play' )\r\nqPath_d_prtscn = qFunc.getValue('qPath_d_prtscn' )\r\nqPath_d_movie = qFunc.getValue('qPath_d_movie' )\r\nqPath_d_upload = qFunc.getValue('qPath_d_upload' )\r\n\r\nqBusy_dev_cpu = qFunc.getValue('qBusy_dev_cpu' )\r\nqBusy_dev_com = qFunc.getValue('qBusy_dev_com' )\r\nqBusy_dev_mic = qFunc.getValue('qBusy_dev_mic' )\r\nqBusy_dev_spk = qFunc.getValue('qBusy_dev_spk' )\r\nqBusy_dev_cam = qFunc.getValue('qBusy_dev_cam' )\r\nqBusy_dev_dsp = qFunc.getValue('qBusy_dev_dsp' )\r\nqBusy_dev_scn = qFunc.getValue('qBusy_dev_scn' )\r\nqBusy_s_ctrl = qFunc.getValue('qBusy_s_ctrl' )\r\nqBusy_s_inp = qFunc.getValue('qBusy_s_inp' )\r\nqBusy_s_wav = qFunc.getValue('qBusy_s_wav' )\r\nqBusy_s_STT = qFunc.getValue('qBusy_s_STT' )\r\nqBusy_s_TTS = qFunc.getValue('qBusy_s_TTS' )\r\nqBusy_s_TRA = qFunc.getValue('qBusy_s_TRA' )\r\nqBusy_s_play = qFunc.getValue('qBusy_s_play' )\r\nqBusy_v_ctrl = qFunc.getValue('qBusy_v_ctrl' )\r\nqBusy_v_inp = qFunc.getValue('qBusy_v_inp' )\r\nqBusy_v_QR = qFunc.getValue('qBusy_v_QR' )\r\nqBusy_v_jpg = qFunc.getValue('qBusy_v_jpg' )\r\nqBusy_v_CV = qFunc.getValue('qBusy_v_CV' )\r\nqBusy_d_ctrl = qFunc.getValue('qBusy_d_ctrl' )\r\nqBusy_d_inp = qFunc.getValue('qBusy_d_inp' )\r\nqBusy_d_QR = qFunc.getValue('qBusy_d_QR' )\r\nqBusy_d_rec = qFunc.getValue('qBusy_d_rec' )\r\nqBusy_d_play = qFunc.getValue('qBusy_d_play' )\r\nqBusy_d_browser = qFunc.getValue('qBusy_d_browser')\r\nqBusy_d_upload = qFunc.getValue('qBusy_d_upload' )\r\nqRdy__s_force = qFunc.getValue('qRdy__s_force' )\r\nqRdy__s_fproc = qFunc.getValue('qRdy__s_fproc' )\r\nqRdy__s_sendkey = qFunc.getValue('qRdy__s_sendkey')\r\nqRdy__v_reader = qFunc.getValue('qRdy__v_reader' )\r\nqRdy__v_sendkey = qFunc.getValue('qRdy__v_sendkey')\r\nqRdy__d_reader = qFunc.getValue('qRdy__d_reader' )\r\nqRdy__d_sendkey = qFunc.getValue('qRdy__d_sendkey')\r\n\r\n\r\n\r\nclass proc_controls:\r\n\r\n def __init__(self, name='thread', id='0', runMode='debug', \r\n micDev='0', micType='bluetooth', micGuide='on', micLevel='777', ):\r\n\r\n self.path = qPath_s_ctrl\r\n\r\n self.runMode = runMode\r\n self.micDev = micDev\r\n self.micType = micType\r\n self.micGuide = micGuide\r\n self.micLevel = micLevel\r\n\r\n self.breakFlag = threading.Event()\r\n self.breakFlag.clear()\r\n self.name = name\r\n self.id = id\r\n self.proc_id = '{0:10s}'.format(name).replace(' ', '_')\r\n self.proc_id = self.proc_id[:-2] + '_' + str(id)\r\n if (runMode == 'debug'):\r\n self.logDisp = True\r\n else:\r\n self.logDisp = False\r\n qLog.log('info', self.proc_id, 'init', display=self.logDisp, )\r\n\r\n self.proc_s = None\r\n self.proc_r = None\r\n self.proc_main = None\r\n self.proc_beat = None\r\n self.proc_last = None\r\n self.proc_step = '0'\r\n self.proc_seq = 0\r\n\r\n # 変数\r\n self.last_text = ''\r\n self.last_time = time.time()\r\n\r\n # 起動条件(kernel.pyと合わせる)\r\n self.run_vision = False\r\n self.run_desktop = False\r\n self.run_bgm = False\r\n self.run_browser = False\r\n self.run_player = False\r\n self.run_chatting = False\r\n self.run_knowledge = False\r\n\r\n if (self.runMode == 'debug'):\r\n self.run_vision = True\r\n self.run_desktop = True\r\n self.run_bgm = True\r\n self.run_browser = True\r\n self.run_player = True\r\n elif (self.runMode == 'hud'):\r\n self.run_vision = True\r\n self.run_desktop = True\r\n self.run_bgm = True\r\n self.run_browser = True\r\n self.run_player = True\r\n elif (self.runMode == 'live'):\r\n self.run_vision = True\r\n self.run_desktop = True\r\n self.run_bgm = True\r\n self.run_browser = True\r\n self.run_player = True\r\n elif (self.runMode == 'translator'):\r\n pass\r\n elif (self.runMode == 'speech'):\r\n pass\r\n elif (self.runMode == 'number'):\r\n pass\r\n elif (self.runMode == 'camera'):\r\n self.run_vision = True\r\n self.run_desktop = True\r\n elif (self.runMode == 'assistant'):\r\n self.run_vision = True\r\n self.run_desktop = True\r\n elif (self.runMode == 'reception'):\r\n self.run_vision = True\r\n\r\n # フォース\r\n self.force_last = 0\r\n\r\n def __del__(self, ):\r\n qLog.log('info', self.proc_id, 'bye!', display=self.logDisp, )\r\n\r\n def begin(self, ):\r\n #qLog.log('info', self.proc_id, 'start')\r\n\r\n self.fileRun = qPath_work + self.proc_id + '.run'\r\n self.fileRdy = qPath_work + self.proc_id + '.rdy'\r\n self.fileBsy = qPath_work + self.proc_id + '.bsy'\r\n qFunc.statusSet(self.fileRun, False)\r\n qFunc.statusSet(self.fileRdy, False)\r\n qFunc.statusSet(self.fileBsy, False)\r\n\r\n self.proc_s = queue.Queue()\r\n self.proc_r = queue.Queue()\r\n self.proc_main = threading.Thread(target=self.main_proc, args=(self.proc_s, self.proc_r, ))\r\n self.proc_beat = time.time()\r\n self.proc_last = time.time()\r\n self.proc_step = '0'\r\n self.proc_seq = 0\r\n\r\n self.proc_main.setDaemon(True)\r\n self.proc_main.start()\r\n\r\n def abort(self, waitMax=5, ):\r\n qLog.log('info', self.proc_id, 'stop', display=self.logDisp, )\r\n\r\n self.breakFlag.set()\r\n chktime = time.time()\r\n while (not self.proc_beat is None) and ((time.time() - chktime) < waitMax):\r\n time.sleep(0.25)\r\n chktime = time.time()\r\n while (os.path.exists(self.fileRun)) and ((time.time() - chktime) < waitMax):\r\n time.sleep(0.25)\r\n\r\n def put(self, data, ):\r\n self.proc_s.put(data)\r\n return True\r\n\r\n def checkGet(self, waitMax=5, ):\r\n chktime = time.time()\r\n while (self.proc_r.qsize() == 0) and ((time.time() - chktime) < waitMax):\r\n time.sleep(0.10)\r\n data = self.get()\r\n return data\r\n\r\n def get(self, ):\r\n if (self.proc_r.qsize() == 0):\r\n return ['', '']\r\n data = self.proc_r.get()\r\n self.proc_r.task_done()\r\n return data\r\n\r\n def main_proc(self, cn_r, cn_s, ):\r\n # ログ\r\n qLog.log('info', self.proc_id, 'start', display=self.logDisp, )\r\n qFunc.statusSet(self.fileRun, True)\r\n self.proc_beat = time.time()\r\n\r\n # 初期設定\r\n self.proc_step = '1'\r\n\r\n # 待機ループ\r\n self.proc_step = '5'\r\n\r\n while (self.proc_step == '5'):\r\n self.proc_beat = time.time()\r\n\r\n # 停止要求確認\r\n if (self.breakFlag.is_set()):\r\n self.breakFlag.clear()\r\n self.proc_step = '9'\r\n break\r\n\r\n # キュー取得\r\n if (cn_r.qsize() > 0):\r\n cn_r_get = cn_r.get()\r\n inp_name = cn_r_get[0]\r\n inp_value = cn_r_get[1]\r\n cn_r.task_done()\r\n else:\r\n inp_name = ''\r\n inp_value = ''\r\n\r\n if (cn_r.qsize() > 1) or (cn_s.qsize() > 20):\r\n qLog.log('warning', self.proc_id, 'queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))\r\n\r\n # レディ設定\r\n if (qFunc.statusCheck(self.fileRdy) == False):\r\n qFunc.statusSet(self.fileRdy, True)\r\n\r\n # ステータス応答\r\n if (inp_name.lower() == '_status_'):\r\n out_name = inp_name\r\n out_value = '_ready_'\r\n cn_s.put([out_name, out_value])\r\n\r\n # フォース リセット\r\n if (qFunc.statusCheck(qRdy__s_force) == False):\r\n if (self.force_last != 0):\r\n self.force_last = 0\r\n else:\r\n if (self.force_last == 0):\r\n self.force_last = time.time()\r\n\r\n # フォース 自動終了(有効10秒)\r\n if (qFunc.statusCheck(qRdy__s_force) == True):\r\n if ((time.time() - self.force_last) > 10):\r\n qFunc.statusSet(qRdy__s_force, False)\r\n qFunc.statusSet(qRdy__s_fproc, False)\r\n self.force_last = 0\r\n\r\n # 処理\r\n path = self.path\r\n path_files = glob.glob(path + '*.txt')\r\n if (len(path_files) > 0):\r\n\r\n #try:\r\n if (True):\r\n\r\n for f in path_files:\r\n\r\n # 停止要求確認\r\n if (self.breakFlag.is_set()):\r\n self.breakFlag.clear()\r\n self.proc_step = '9'\r\n break\r\n\r\n proc_file = f.replace('\\\\', '/')\r\n\r\n if (proc_file[-4:].lower() == '.txt' and proc_file[-8:].lower() != '.wrk.txt'):\r\n f1 = proc_file\r\n f2 = proc_file[:-4] + '.wrk.txt'\r\n try:\r\n os.rename(f1, f2)\r\n proc_file = f2\r\n except Exception as e:\r\n pass\r\n\r\n if (proc_file[-8:].lower() == '.wrk.txt'):\r\n f1 = proc_file\r\n f2 = proc_file[:-8] + proc_file[-4:]\r\n try:\r\n os.rename(f1, f2)\r\n proc_file = f2\r\n except Exception as e:\r\n pass\r\n\r\n # 実行カウンタ\r\n self.proc_last = time.time()\r\n self.proc_seq += 1\r\n if (self.proc_seq > 9999):\r\n self.proc_seq = 1\r\n seq4 = '{:04}'.format(self.proc_seq)\r\n seq2 = '{:02}'.format(self.proc_seq)\r\n\r\n proc_name = proc_file.replace(path, '')\r\n proc_name = proc_name[:-4]\r\n\r\n work_name = self.proc_id + '.' + seq2\r\n work_file = qPath_work + work_name + '.txt'\r\n if (os.path.exists(work_file)):\r\n os.remove(work_file)\r\n\r\n if (proc_file[-9:].lower() != '_sjis.txt'):\r\n proc_txts, proc_text = qFunc.txtsRead(proc_file, encoding='utf-8', exclusive=False, )\r\n else:\r\n proc_txts, proc_text = qFunc.txtsRead(proc_file, encoding='shift_jis', exclusive=False, )\r\n if (proc_text != '') and (proc_text != '!'):\r\n qFunc.txtsWrite(work_file, txts=[proc_text], encoding='utf-8', exclusive=False, mode='w', )\r\n\r\n if (os.path.exists(work_file)):\r\n\r\n qFunc.remove(proc_file)\r\n\r\n # ログ\r\n #if (self.runMode == 'debug') or (not self.micDev.isdigit()):\r\n # qLog.log('info', self.proc_id, '' + proc_name + u' → ' + work_name, display=self.logDisp,)\r\n\r\n # 結果出力\r\n if (cn_s.qsize() < 99):\r\n out_name = '[txts]'\r\n out_value = proc_txts\r\n cn_s.put([out_name, out_value])\r\n\r\n # ビジー設定\r\n if (qFunc.statusCheck(self.fileBsy) == False):\r\n qFunc.statusSet(self.fileBsy, True)\r\n if (str(self.id) == '0'):\r\n qFunc.statusSet(qBusy_s_ctrl, True)\r\n\r\n if (self.micType == 'bluetooth') or (self.micGuide == 'on' or self.micGuide == 'sound'):\r\n qFunc.statusWait_false(qBusy_s_inp , 3)\r\n\r\n # フォース 覚醒\r\n force = False\r\n if (qFunc.checkWakeUpWord(proc_text) == True):\r\n if (qFunc.statusCheck(qRdy__s_force) == False):\r\n qFunc.statusSet(qRdy__s_force, True)\r\n qFunc.statusSet(qRdy__s_fproc, True)\r\n force = True\r\n\r\n # フォース 状態\r\n if (qFunc.statusCheck(qRdy__s_force) == False) \\\r\n and (qFunc.statusCheck(qRdy__s_fproc) == True):\r\n force = True\r\n\r\n # フォース リセット\r\n if (qFunc.statusCheck(qRdy__s_force) == False):\r\n if (self.force_last != 0):\r\n self.force_last = 0\r\n else:\r\n if (self.force_last == 0):\r\n self.force_last = time.time()\r\n\r\n # 処理\r\n self.proc_last = time.time()\r\n self.sub_proc(seq4, proc_file, work_file, proc_name, proc_text, force, cn_s, )\r\n\r\n # フォース 終了\r\n if (qFunc.statusCheck(qRdy__s_force) == False):\r\n qFunc.statusSet(qRdy__s_fproc, False)\r\n\r\n #except Exception as e:\r\n # pass\r\n\r\n\r\n\r\n # ビジー解除\r\n qFunc.statusSet(self.fileBsy, False)\r\n if (str(self.id) == '0'):\r\n qFunc.statusSet(qBusy_s_ctrl, False)\r\n\r\n # アイドリング\r\n slow = False\r\n if (qFunc.statusCheck(qBusy_dev_cpu) == True):\r\n slow = True\r\n\r\n if (slow == True):\r\n time.sleep(1.00)\r\n else:\r\n if (cn_r.qsize() == 0):\r\n time.sleep(0.25)\r\n else:\r\n time.sleep(0.10)\r\n\r\n # 終了処理\r\n if (True):\r\n\r\n # レディ解除\r\n qFunc.statusSet(self.fileRdy, False)\r\n\r\n # ビジー解除\r\n qFunc.statusSet(self.fileBsy, False)\r\n if (str(self.id) == '0'):\r\n qFunc.statusSet(qBusy_s_ctrl, False)\r\n\r\n # キュー削除\r\n while (cn_r.qsize() > 0):\r\n cn_r_get = cn_r.get()\r\n cn_r.task_done()\r\n while (cn_s.qsize() > 0):\r\n cn_s_get = cn_s.get()\r\n cn_s.task_done()\r\n\r\n # ログ\r\n qLog.log('info', self.proc_id, 'end', display=self.logDisp, )\r\n qFunc.statusSet(self.fileRun, False)\r\n self.proc_beat = None\r\n\r\n\r\n\r\n def sub_proc(self, seq4, proc_file, work_file, proc_name, proc_text, force, cn_s, ):\r\n\r\n jp_true = qFunc.in_japanese(proc_text)\r\n if (proc_text == self.last_text) and ((time.time() - self.last_time) < 15):\r\n word_true = False\r\n else:\r\n word_true = True\r\n self.last_text = proc_text\r\n self.last_time = time.time()\r\n\r\n qLog.log('debug', self.proc_id, 'fource = ' + str(force), )\r\n\r\n # 外部プログラム qExt_program\r\n if ((proc_text.find(u'プログラム') >= 0) \\\r\n and ((proc_text.find(u'開始') >= 0) or (proc_text.find(u'起動') >= 0))) \\\r\n or ((proc_text.find(u'サープ') >= 0) \\\r\n and ((proc_text.find(u'開始') >= 0) or (proc_text.find(u'起動') >= 0))):\r\n if (os.name == 'nt'):\r\n if (os.path.exists(qExt_program)):\r\n ext_program = subprocess.Popen([qExt_program, proc_text, ], )\r\n #stdout=subprocess.PIPE, stderr=subprocess.PIPE, )\r\n\r\n # 画面操作\r\n if (proc_text.find(u'メイン') >= 0) and (proc_text.find(u'スクリーン') >= 0):\r\n pyautogui.keyDown('ctrlleft')\r\n pyautogui.keyDown('winleft')\r\n pyautogui.press('left')\r\n pyautogui.press('left')\r\n pyautogui.press('left')\r\n pyautogui.press('left')\r\n pyautogui.press('left')\r\n pyautogui.keyUp('winleft')\r\n pyautogui.keyUp('ctrlleft')\r\n\r\n if (proc_text.find(u'サブ') >= 0) and (proc_text.find(u'スクリーン') >= 0):\r\n pyautogui.keyDown('ctrlleft')\r\n pyautogui.keyDown('winleft')\r\n pyautogui.press('right')\r\n pyautogui.keyUp('winleft')\r\n pyautogui.keyUp('ctrlleft')\r\n\r\n if (proc_text.find(u'スクリーン') >= 0) and (proc_text.find(u'キーボード') >= 0):\r\n pyautogui.keyDown('ctrlleft')\r\n pyautogui.keyDown('winleft')\r\n pyautogui.press('o')\r\n pyautogui.keyUp('winleft')\r\n pyautogui.keyUp('ctrlleft')\r\n\r\n # キーボード操作\r\n if (proc_text[-3:] == u'を入力'):\r\n qFunc.sendKey(proc_text[:-3],cr=True, lf=False)\r\n elif (proc_text[-2:] == u'入力'):\r\n qFunc.sendKey(proc_text[:-2],cr=True, lf=False)\r\n\r\n if (proc_text == u'改行') or (proc_text.lower() == 'enter'):\r\n pyautogui.press('enter')\r\n\r\n if (proc_text.lower() == 'f1') or (proc_text.lower() == 'f 1'):\r\n pyautogui.press('f1')\r\n if (proc_text.lower() == 'f2') or (proc_text.lower() == 'f 2'):\r\n pyautogui.press('f2')\r\n if (proc_text.lower() == 'f3') or (proc_text.lower() == 'f 3'):\r\n pyautogui.press('f3')\r\n if (proc_text.lower() == 'f4') or (proc_text.lower() == 'f 4'):\r\n pyautogui.press('f4')\r\n if (proc_text.lower() == 'f5') or (proc_text.lower() == 'f 5'):\r\n pyautogui.press('f5')\r\n if (proc_text.lower() == 'f6') or (proc_text.lower() == 'f 6'):\r\n pyautogui.press('f6')\r\n if (proc_text.lower() == 'f7') or (proc_text.lower() == 'f 7'):\r\n pyautogui.press('f7')\r\n if (proc_text.lower() == 'f8') or (proc_text.lower() == 'f 8'):\r\n pyautogui.press('f8')\r\n if (proc_text.lower() == 'f9') or (proc_text.lower() == 'f 9'):\r\n pyautogui.press('f9')\r\n if (proc_text.lower() == 'f10') or (proc_text.lower() == 'f 10'):\r\n pyautogui.press('f10')\r\n if (proc_text.lower() == 'f11') or (proc_text.lower() == 'f 11'):\r\n pyautogui.press('f11')\r\n if (proc_text.lower() == 'f12') or (proc_text.lower() == 'f 12'):\r\n pyautogui.press('f12')\r\n\r\n if (proc_text == u'ポーズ') \\\r\n or (proc_text == u'閉じる'):\r\n pyautogui.press('pause')\r\n if (proc_text[-3:] == u'を検索'):\r\n qFunc.sendKey(proc_text[:-3],cr=False, lf=False)\r\n pyautogui.press('f9')\r\n\r\n if (cn_s.qsize() < 99):\r\n\r\n # システム制御\r\n if (proc_text.find(u'リセット') >= 0):\r\n out_name = 'control'\r\n out_value = '_reset_'\r\n cn_s.put([out_name, out_value])\r\n qFunc.txtsWrite(qCtrl_control_kernel , txts=[out_value], encoding='utf-8', exclusive=True, mode='w', )\r\n qFunc.txtsWrite(qCtrl_control_vision , txts=[out_value], encoding='utf-8', exclusive=True, mode='w', )\r\n qFunc.txtsWrite(qCtrl_control_desktop, txts=[out_value], encoding='utf-8', exclusive=True, mode='w', )\r\n\r\n elif ((proc_text.find(u'システム') >= 0) and (proc_text.find(u'終了') >= 0)) \\\r\n or (proc_text == u'バルス'):\r\n out_name = 'control'\r\n out_value = '_end_'\r\n cn_s.put([out_name, out_value])\r\n qFunc.txtsWrite(qCtrl_control_kernel , txts=[out_value], encoding='utf-8', exclusive=True, mode='w', )\r\n\r\n elif (proc_text.find(u'リブート') >= 0) \\\r\n or (proc_text.find(u'再起動') >= 0):\r\n out_name = 'control'\r\n out_value = '_reboot_'\r\n cn_s.put([out_name, out_value])\r\n qFunc.txtsWrite(qCtrl_control_kernel , txts=[out_value], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_bgm = False\r\n self.run_browser = False\r\n self.run_player = False\r\n self.run_chatting = False\r\n self.run_knowledge = False\r\n\r\n # 機能制御\r\n elif (proc_text.find(u'画面') >= 0) \\\r\n and ((proc_text.find(u'開始') >= 0) or (proc_text.find(u'起動') >= 0)):\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_desktop_begin_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_desktop = True\r\n qFunc.statusWait_false(qCtrl_control_kernel, 5)\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_vision_begin_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_vision = True\r\n\r\n elif (proc_text.find(u'画面') >= 0) and (proc_text.find(u'終了') >= 0):\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_desktop_end_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_desktop = False\r\n qFunc.statusWait_false(qCtrl_control_kernel, 5)\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_vision_end_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_vision = False\r\n\r\n elif (proc_text.find(u'ビジョン') >= 0) \\\r\n and ((proc_text.find(u'開始') >= 0) or (proc_text.find(u'起動') >= 0)):\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_vision_begin_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_vision = True\r\n\r\n elif (proc_text.find(u'ビジョン') >= 0) and (proc_text.find(u'終了') >= 0):\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_vision_end_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_vision = False\r\n\r\n elif (proc_text.find(u'デスクトップ') >= 0) \\\r\n and ((proc_text.find(u'開始') >= 0) or (proc_text.find(u'起動') >= 0)):\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_desktop_begin_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_desktop = True\r\n\r\n elif (proc_text.find(u'デスクトップ') >= 0) and (proc_text.find(u'終了') >= 0):\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_desktop_end_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_desktop = False\r\n\r\n\r\n\r\n elif ((proc_text.find(u'BGM') >= 0) or (proc_text.find('BGM') >= 0)) \\\r\n and ((proc_text.find(u'開始') >= 0) or (proc_text.find(u'起動') >= 0)):\r\n # BGM 起動\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_bgm_begin_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_bgm = True\r\n # BGM 開始\r\n qFunc.statusWait_false(qCtrl_control_bgm, 5)\r\n qFunc.txtsWrite(qCtrl_control_bgm ,txts=['_start_'], encoding='utf-8', exclusive=True, mode='w', )\r\n\r\n elif ((proc_text.find(u'BGM') >= 0) or (proc_text.find('BGM') >= 0)) \\\r\n and (proc_text.find(u'終了') >= 0):\r\n # BGM 停止\r\n qFunc.statusWait_false(qCtrl_control_bgm, 5)\r\n qFunc.txtsWrite(qCtrl_control_bgm ,txts=['_stop_'], encoding='utf-8', exclusive=True, mode='w', )\r\n # BGM 終了\r\n qFunc.statusWait_false(qCtrl_control_bgm, 5)\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_bgm_end_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_bgm = False\r\n\r\n elif ((proc_text.find(u'BGM') >= 0) or (proc_text.find('BGM') >= 0)) \\\r\n and ((proc_text.find(u'停止') >= 0) or (proc_text.find(u'ストップ') >= 0)):\r\n # BGM 停止\r\n qFunc.statusWait_false(qCtrl_control_bgm, 5)\r\n qFunc.txtsWrite(qCtrl_control_bgm ,txts=['_stop_'], encoding='utf-8', exclusive=True, mode='w', )\r\n\r\n\r\n\r\n elif (proc_text.find(u'動画') >= 0) \\\r\n and ((proc_text.find(u'開始') >= 0) or (proc_text.find(u'起動') >= 0)):\r\n # 動画 起動\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_player_begin_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_player = True\r\n # 動画 メニュー\r\n qFunc.statusWait_false(qCtrl_control_player, 5)\r\n qFunc.txtsWrite(qCtrl_control_player ,txts=['_stop_'], encoding='utf-8', exclusive=True, mode='w', )\r\n qFunc.statusWait_false(qCtrl_control_player, 5)\r\n qFunc.txtsWrite(qCtrl_control_player ,txts=[u'動画メニュー'], encoding='utf-8', exclusive=True, mode='w', )\r\n\r\n elif (proc_text.find(u'動画') >= 0) and (proc_text.find(u'終了') >= 0):\r\n # 動画 停止\r\n qFunc.statusWait_false(qCtrl_control_player, 5)\r\n qFunc.txtsWrite(qCtrl_control_player ,txts=['_stop_'], encoding='utf-8', exclusive=True, mode='w', )\r\n # 動画 終了\r\n qFunc.statusWait_false(qCtrl_control_player, 5)\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_player_end_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_player = False\r\n\r\n elif (proc_text.find(u'動画') >= 0) \\\r\n and ((proc_text.find(u'停止') >= 0) or (proc_text.find(u'ストップ') >= 0)):\r\n # 動画 停止\r\n qFunc.statusWait_false(qCtrl_control_player, 5)\r\n qFunc.txtsWrite(qCtrl_control_player ,txts=['_stop_'], encoding='utf-8', exclusive=True, mode='w', )\r\n\r\n\r\n\r\n elif (self.run_player == True) \\\r\n and (proc_text.find(u'動画') >=0) and (proc_text.find(u'メニュー') >=0):\r\n # 動画 メニュー\r\n qFunc.statusWait_false(qCtrl_control_player, 5)\r\n qFunc.txtsWrite(qCtrl_control_player ,txts=['_stop_'], encoding='utf-8', exclusive=True, mode='w', )\r\n qFunc.statusWait_false(qCtrl_control_player, 5)\r\n qFunc.txtsWrite(qCtrl_control_player ,txts=[u'動画メニュー'], encoding='utf-8', exclusive=True, mode='w', )\r\n\r\n elif (self.run_player == True) \\\r\n and (word_true == True) \\\r\n and (proc_text.lower() >= '01') and (proc_text.lower() <= '09'):\r\n # 動画 番号で開く\r\n qFunc.statusWait_false(qCtrl_control_player, 5)\r\n qFunc.txtsWrite(qCtrl_control_player ,txts=['_stop_'], encoding='utf-8', exclusive=True, mode='w', )\r\n qFunc.statusWait_false(qCtrl_control_player, 5)\r\n qFunc.txtsWrite(qCtrl_control_player ,txts=[proc_text.lower()], encoding='utf-8', exclusive=True, mode='w', )\r\n\r\n\r\n\r\n elif ((proc_text.find(u'ブラウザ') >= 0) or (proc_text.find(u'ウェブ') >= 0)) \\\r\n and ((proc_text.find(u'開始') >= 0) or (proc_text.find(u'起動') >= 0)):\r\n # ブラウザ 起動\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_browser_begin_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_browser = True\r\n # ブラウザ 開く\r\n qFunc.statusWait_false(qCtrl_control_browser, 5)\r\n qFunc.txtsWrite(qCtrl_control_browser ,txts=['_start_'], encoding='utf-8', exclusive=True, mode='w', )\r\n\r\n elif ((proc_text.find(u'ブラウザ') >= 0) or (proc_text.find(u'ウェブ') >= 0)) \\\r\n and (proc_text.find(u'終了') >= 0):\r\n # ブラウザ 閉じる\r\n qFunc.statusWait_false(qCtrl_control_browser, 5)\r\n qFunc.txtsWrite(qCtrl_control_browser ,txts=['_stop_'], encoding='utf-8', exclusive=True, mode='w', )\r\n # ブラウザ 終了\r\n qFunc.statusWait_false(qCtrl_control_browser, 5)\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_browser_end_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_browser = False\r\n\r\n elif ((proc_text.find(u'ブラウザ') >= 0) or (proc_text.find(u'ウェブ') >= 0)) \\\r\n and ((proc_text.find(u'停止') >= 0) or (proc_text.find(u'ストップ') >= 0)):\r\n # ブラウザ 閉じる\r\n qFunc.statusWait_false(qCtrl_control_browser, 5)\r\n qFunc.txtsWrite(qCtrl_control_browser ,txts=['_stop_'], encoding='utf-8', exclusive=True, mode='w', )\r\n\r\n\r\n\r\n elif ((proc_text.find(u'チャット') >= 0) or (proc_text.find(u'雑談') >= 0)) \\\r\n and ((proc_text.find(u'開始') >= 0) or (proc_text.find(u'起動') >= 0)):\r\n # チャット 起動\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_chatting_begin_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_chatting = True\r\n\r\n elif ((proc_text.find(u'チャット') >= 0) or (proc_text.find(u'雑談') >= 0)) \\\r\n and (proc_text.find(u'終了') >= 0):\r\n # チャット 終了\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_chatting_end_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_chatting = False\r\n\r\n elif ((proc_text.find(u'チャット') >= 0) or (proc_text.find(u'雑談') >= 0)) \\\r\n and ((proc_text.find(u'停止') >= 0) or (proc_text.find(u'ストップ') >= 0)):\r\n # チャット 終了\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_chatting_end_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_chatting = False\r\n\r\n\r\n\r\n elif ((proc_text.find(u'知識') >= 0) or (proc_text.find(u'ナレッジ') >= 0)) \\\r\n and ((proc_text.find(u'開始') >= 0) or (proc_text.find(u'起動') >= 0)):\r\n # ナレッジ 開始\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_knowledge_begin_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_knowledge = True\r\n\r\n elif ((proc_text.find(u'知識') >= 0) or (proc_text.find(u'ナレッジ') >= 0)) \\\r\n and (proc_text.find(u'終了') >= 0):\r\n # ナレッジ 終了\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_knowledge_end_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_knowledge = False\r\n\r\n elif ((proc_text.find(u'知識') >= 0) or (proc_text.find(u'ナレッジ') >= 0)) \\\r\n and ((proc_text.find(u'停止') >= 0) or (proc_text.find(u'ストップ') >= 0)):\r\n # ナレッジ 終了\r\n qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_knowledge_end_'], encoding='utf-8', exclusive=True, mode='w', )\r\n self.run_knowledge = False\r\n\r\n\r\n\r\n else:\r\n\r\n\r\n\r\n # インターフェース\r\n #if (self.run_vision == True):\r\n # qFunc.txtsWrite(qCtrl_control_vision ,txts=[proc_text], encoding='utf-8', exclusive=True, mode='w', )\r\n #if (self.run_desktop == True):\r\n # qFunc.txtsWrite(qCtrl_control_desktop ,txts=[proc_text], encoding='utf-8', exclusive=True, mode='w', )\r\n if (self.run_bgm == True):\r\n qFunc.txtsWrite(qCtrl_control_bgm ,txts=[proc_text], encoding='utf-8', exclusive=True, mode='w', )\r\n if (self.run_browser == True):\r\n if (jp_true == True):\r\n qFunc.txtsWrite(qCtrl_control_browser ,txts=[proc_text], encoding='utf-8', exclusive=True, mode='w', )\r\n #if (self.run_player == True):\r\n # qFunc.txtsWrite(qCtrl_control_player ,txts=[proc_text], encoding='utf-8', exclusive=True, mode='w', )\r\n if (self.run_chatting == True):\r\n if (jp_true == True):\r\n qFunc.txtsWrite(qCtrl_control_chatting ,txts=[proc_text], encoding='utf-8', exclusive=True, mode='w', )\r\n if (self.run_knowledge == True):\r\n if (jp_true == True):\r\n qFunc.txtsWrite(qCtrl_control_knowledge ,txts=[proc_text], encoding='utf-8', exclusive=True, mode='w', )\r\n\r\n\r\n\r\n self.last_text = proc_text\r\n self.last_time = time.time()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # 共通クラス\r\n qFunc.init()\r\n\r\n # ログ\r\n nowTime = datetime.datetime.now()\r\n filename = qPath_log + nowTime.strftime('%Y%m%d.%H%M%S') + '.' + os.path.basename(__file__) + '.log'\r\n qLog.init(mode='logger', filename=filename, )\r\n\r\n # 設定\r\n controls_thread = proc_controls('controls', '0', )\r\n controls_thread.begin()\r\n\r\n\r\n\r\n # ループ\r\n chktime = time.time()\r\n while ((time.time() - chktime) < 15):\r\n\r\n res_data = controls_thread.get()\r\n res_name = res_data[0]\r\n res_value = res_data[1]\r\n if (res_name != ''):\r\n print(res_name, res_value, )\r\n\r\n if (controls_thread.proc_s.qsize() == 0):\r\n controls_thread.put(['_status_', ''])\r\n\r\n time.sleep(0.05)\r\n\r\n\r\n\r\n time.sleep(1.00)\r\n controls_thread.abort()\r\n del controls_thread\r\n\r\n\r\n","sub_path":"_v5_proc_controls.py","file_name":"_v5_proc_controls.py","file_ext":"py","file_size_in_byte":36970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"86218604","text":"import requests\nimport json\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nimport json\nimport csv\nimport pymysql\nimport math\nimport pytz\nimport logging\nimport time\n# def pull_history():\ncsv_columns = ['km100.rpm10c', 'km100.rpm25c', 'km102.rhumid', 'km102.rtemp', 'km102.rtvoc (ppb)', 'rco2 (ppm)', 'ts',\n 'Location', 'Device']\ncsv_file = \"entries.csv\"\ndate_format = '%Y-%m-%dT%H:%M:%SZ'\n\n\ndef request_kaiterra_data(*devs, start=None, finish=None):\n key = 'MGIwMTQ5MmIzY2IzNDkwYWI1YjViZmUwMGE3MThhNjM3ZTA3'\n try:\n # url = 'https://api.kaiterra.cn/v1/sensedges/{}'.format(dev)\n r = requests.post(\n 'https://api.kaiterra.cn/v1/batch?include_headers=false',\n params={'key': key},\n headers={'Content-Type': 'application/json', 'Accept-Encoding': 'gzip'},\n data=json.dumps(\n [{'method': 'GET',\n 'relative_url': 'sensedges/{}/history?series=raw&begin={}&end={}'.format(dev, start, finish)} for dev\n in\n devs]\n )\n )\n\n for response in r.json():\n yield json.loads(response['body'])\n except BaseException as e:\n logging.error(\"Kaiterra request failed: {}, {}\".format(e, e.args))\n\n\ndef connect_writer():\n host = \"testdev2.cluster-cqtvhklvqwhc.us-east-2.rds.amazonaws.com\"\n port = 3306\n dbname = \"device\"\n user = \"admin\"\n password = \"SmellingSaltInfinite\"\n\n conn = pymysql.connect(host, user=user, port=port,\n passwd=password, db=dbname)\n return conn\n\n\ndef get_last_reading_date(dev_id, sql_con):\n date = dt.strptime('2019-08-01T00:00:00Z', date_format)\n with sql_con.cursor() as cursor:\n execute_single_sql(cursor, create_select_last_reading_string(dev_id))\n sql_con.commit()\n result = cursor.fetchone()\n\n if result is not None:\n date = result[6]\n logging.info(\"Last reading for device {} is {}\".format(dev_id, date))\n return date\n\n\ndef load_config():\n with open('config.json') as f:\n json_data = json.load(f)\n return json_data\n\n\n# def write_chunk\n\ndef create_select_last_reading_string(dev):\n sql = \"SELECT * FROM readings WHERE device = '{}' ORDER BY ts DESC;\".format(dev)\n return sql\n\n\ndef convert_table_data_to_list(p):\n # dv = default value\n dv = 0\n data_list = [None] * 10\n data_list[0] = p.get('km100.rpm10c', dv)\n data_list[1] = p.get('km100.rpm25c', dv)\n data_list[2] = p.get('km102.rhumid', dv)\n data_list[3] = p.get('km102.rtemp', dv)\n data_list[4] = p.get('km102.rtvoc (ppb)', dv)\n data_list[5] = p.get('rco2 (ppm)', dv)\n data_list[6] = dt.strptime(p['ts'], date_format)\n data_list[7] = p['Location']\n data_list[8] = p['Device']\n data_list[9] = \"{}-{}\".format(data_list[8], p['ts'])\n return data_list\n\n\ndef execute_bulk_sql(cursor, sql_string, data):\n try:\n cursor.executemany(sql_string, data)\n except pymysql.ProgrammingError as e:\n logging.critical('SQL Error: {!r}, errno is {}'.format(e, e.args[0]))\n except pymysql.DataError as e:\n logging.critical('SQL Error: {!r}, errno is {}'.format(e, e.args[0]))\n except pymysql.IntegrityError as e:\n if str(e.args[0]) == '1062':\n # print(\"Suppressed duplicate error\")\n pass\n else:\n logging.critical('SQL Error: {!r}, errno is {}'.format(e, e.args[0]))\n except pymysql.NotSupportedError as e:\n logging.critical('SQL Error: {!r}, errno is {}'.format(e, e.args[0]))\n except pymysql.OperationalError as e:\n logging.critical('SQL Error: {!r}, errno is {}'.format(e, e.args[0]))\n except BaseException as e:\n logging.critical('Unknown Error: {!r}, errno is {}'.format(e, e.args[0]))\n\n\ndef execute_single_sql(cursor, sql_string):\n try:\n cursor.execute(sql_string)\n except pymysql.ProgrammingError as e:\n logging.critical('SQL Error: {!r}, errno is {}'.format(e, e.args[0]))\n except pymysql.DataError as e:\n logging.critical('SQL Error: {!r}, errno is {}'.format(e, e.args[0]))\n except pymysql.IntegrityError as e:\n logging.critical('SQL Error: {!r}, errno is {}'.format(e, e.args[0]))\n except pymysql.NotSupportedError as e:\n logging.critical('SQL Error: {!r}, errno is {}'.format(e, e.args[0]))\n except pymysql.OperationalError as e:\n logging.critical('SQL Error: {!r}, errno is {}'.format(e, e.args[0]))\n except BaseException as e:\n logging.critical('Unknown Error: {!r}, errno is {}'.format(e, e.args[0]))\n\n\ndef write_chunk_to_db(chunk, sql_conn):\n queries = []\n i = 0\n for row in chunk:\n queries.append(convert_table_data_to_list(row))\n start_time = dt.now()\n with sql_conn.cursor() as cursor:\n sql = \"INSERT INTO `readings` (`pm10c`, `pm25c`, `humid`, `temp`, `tvoc`, `co2`, `ts`, `location`, `device`, `idkey`) \" \\\n \"VALUES (%s, %s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n execute_bulk_sql(cursor, sql, queries)\n logging.info(\"Wrote {} rows to database in {} seconds\".format(len(queries), dt.now() - start_time))\n sql_conn.commit()\n\n\ndef get_device_data_chunk(dev, location, pull_date, chunk_end):\n response_index = 0\n chunk_data = []\n for responses in request_kaiterra_data(dev, start=pull_date.strftime(date_format),\n finish=chunk_end.isoformat().split('.')[0] + 'Z'):\n response_id = None\n for k in responses:\n if k == 'id':\n response_id = responses[k]\n if k == 'data':\n for p in responses[k]:\n data_chunk = p\n data_chunk['Device'] = response_id\n data_chunk['Location'] = location['ID']\n chunk_data.append(p)\n response_index += 1\n return chunk_data\n\n# hours=0: pull since last entry\n\n\ndef get_location_data(location, sql_con, hours=0):\n location_devices = [location['Config']['Device UUIDs'][i] for i in location['Config']['Device UUIDs']]\n for dev in location_devices:\n # start_date = '2019-12-10T00:00:00Z'\n # end_date = '2019-12-10T00:15:00Z'\n if hours == 0:\n start_date = get_last_reading_date(dev, sql_con).strftime(date_format)\n else:\n start_date = dt.now() - timedelta(hours=hours)\n start_date = start_date.strftime(date_format)\n\n end_date = dt.now().strftime(date_format)\n delta = dt.strptime(end_date, date_format) - dt.strptime(start_date, date_format)\n chunk_size = 3\n chunks = int(math.ceil(delta.days / chunk_size))\n if chunks == 0:\n chunks = 1\n data_chunks = []\n for i in range(chunks):\n print(\"Pulling chunk {}\".format(str(i + 1)), flush=True)\n readings = []\n pull_date = dt.strptime(start_date, date_format) + (timedelta(days=chunk_size) * i)\n chun_kend = pull_date + timedelta(days=chunk_size)\n if chun_kend > dt.strptime(end_date, date_format):\n chun_kend = dt.strptime(end_date, date_format)\n logging.info(\"Requesting data for device {} from {} Chunk {}/{}\".format(dev, pull_date, i + 1, chunks))\n dev_data = get_device_data_chunk(dev, location, pull_date, chun_kend)\n logging.info(\"{} rows in chunk for device: {}\".format(len(dev_data), dev))\n data_chunks.append(dev_data)\n time.sleep(1)\n return data_chunks\n\n\ndef key(args):\n pass\n\n\ndef pull_history(hours=0):\n config = load_config()\n sql_con = connect_writer()\n for location in list(config['Locations']):\n loc = config['Locations'][location]\n logging.info(\"Getting data for location: {}...\".format(loc['ID']))\n data = get_location_data(config['Locations'][location], sql_con,hours)\n if len(data) > 0:\n logging.info(\"Got {} chunks from kaiterra for location {}\".format(len(data), loc['ID']))\n for data_chunk in data:\n write_chunk_to_db(data_chunk, sql_con)\n else:\n logging.info(\"No data returned for location {}\".format(loc['ID']))\n","sub_path":"historypull.py","file_name":"historypull.py","file_ext":"py","file_size_in_byte":8197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"393379876","text":"import collections\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as ppl\n\n\ndef minmax(dataset):\n minmax = list()\n for i in range(len(dataset[0])):\n if i == len(dataset[0]) - 1:\n continue\n value_min = dataset[:, i].min()\n value_max = dataset[:, i].max()\n minmax.append([value_min, value_max])\n return minmax\n\n\ndef normalize(dataset, minmax):\n for row in dataset:\n for i in range(len(row)):\n if i == len(row) - 1: # exclude data_Y\n continue\n row[i] = (row[i] - minmax[i][0]) / (minmax[i][1] - minmax[i][0])\n\n return dataset\n\n\ndef minkovskiy_distance(a, b, p):\n distance = 0\n for i in range(len(a) - 1):\n distance += abs(a[i] - b[i]) ** p\n\n return distance ** (1 / p)\n\n\ndef euclidean_distance(a, b):\n return minkovskiy_distance(a, b, 1)\n\n\ndef manhattan_distance(a, b):\n return minkovskiy_distance(a, b, 2)\n\n\ndef default_kernel(x, k, p1, p2, do_abs=False):\n if abs(x) > 1:\n return 0\n if do_abs:\n x = abs(x)\n return k * (1 - x ** p1) ** p2\n\n\ndef uniform_kernel(a):\n return default_kernel(a, 1 / 2, 0, 0)\n\n\ndef triangular_kernel(a):\n return default_kernel(a, 1, 1, 1, True)\n\n\ndef epanechnikov_kernel(a):\n return default_kernel(a, 3 / 4, 2, 1)\n\n\ndef biweight_kernel(a):\n return default_kernel(a, 15 / 16, 2, 2)\n\n\ndef tricube_kernel(a):\n return default_kernel(a, 70 / 81, 3, 3, True)\n\n\ndef launch(window_width, distance, kernel, data_X, data_Y_naive,\n data_Y_one_hot):\n def launch_naive(weights, data_Y):\n result = 0\n for idx in range(weights.shape[0]):\n result += data_Y[idx] * weights[idx]\n\n if (np.sum(weights) == 0):\n return 0\n return result / np.sum(weights)\n\n def launch_one_hot(weights, data_Y):\n result = [0] * data_Y.shape[1]\n for one_hot_idx in range(data_Y.shape[1]):\n for idx in range(data_Y.shape[0]):\n result += data_Y[idx] * weights[idx]\n\n if (np.sum(weights) == 0):\n result[one_hot_idx] = 0\n else:\n result[one_hot_idx] /= np.sum(weights)\n\n return result\n\n def get_distance(test_X, data_X):\n dist_array = np.empty(data_X_learn.shape[0])\n\n for idx in range(data_X.shape[0]):\n dist_array[idx] = distance(test_X, data_X[idx])\n\n return dist_array\n\n def get_weights(distance):\n weights = np.empty(distance.shape[0])\n\n for idx in range(distance.shape[0]):\n weights[idx] = kernel(distance[idx] / window_width)\n\n return weights\n\n tps, tps2 = 0, 0\n fps, fps2 = 0, 0\n tns, tns2 = 0, 0\n fns, fns2 = 0, 0\n\n for test_element_index in range(data_X.shape[0]):\n test_element_naive = (data_X[test_element_index],\n data_Y_naive[test_element_index])\n test_element_one_hot = (data_X[test_element_index],\n data_Y_one_hot[test_element_index, :])\n\n data_X_learn = np.concatenate(\n (data_X[:test_element_index],\n data_X[test_element_index + 1:]),\n axis=0\n )\n\n data_Y_learn_naive = np.concatenate(\n (data_Y_naive[:test_element_index],\n data_Y_naive[test_element_index + 1:]),\n axis=0\n )\n\n data_Y_learn_one_hot = np.concatenate(\n (data_Y_one_hot[:test_element_index],\n data_Y_one_hot[test_element_index + 1:]),\n axis=0\n )\n\n distances = get_distance(test_element_naive[0], data_X_learn)\n weights = get_weights(distances)\n\n naive_result = launch_naive(weights, data_Y_learn_naive)\n one_hot_result = launch_one_hot(weights, data_Y_learn_one_hot)\n\n if naive_result == test_element_naive[1]:\n tps += 1\n fps += 0\n tns += 2\n fns += 0\n else:\n tps += 0\n fps += 1\n tns += 1\n fns += 1\n\n if np.argmax(one_hot_result) == np.argmax(test_element_one_hot[1]):\n tps2 += 1\n fps2 += 0\n tns2 += 2\n fns2 += 0\n else:\n tps2 += 0\n fps2 += 1\n tns2 += 1\n fns2 += 1\n\n presicion = tps / (tps + fps)\n recall = tps / (tps + fns)\n presicion2 = tps2 / (tps2 + fps2)\n recall2 = tps2 / (tps2 + fns2)\n\n f1_naive, f1_one_hot = 0, 0\n\n if presicion + recall != 0:\n f1_naive = 2 * (presicion * recall) / (presicion + recall)\n\n if presicion2 + recall2 != 0:\n f1_one_hot = 2 * (presicion2 * recall2) / (presicion2 + recall2)\n\n return f1_naive, f1_one_hot\n\n\nfilename = 'red-wine-quality.csv'\ndataset = pd.read_csv(filename).to_numpy()\n\nclass5_indexes = [i for i, x in enumerate(dataset) if x[-1] == 5.][:100]\nclass6_indexes = [i for i, x in enumerate(dataset) if x[-1] == 6.][:100]\nclass7_indexes = [i for i, x in enumerate(dataset) if x[-1] == 7.][:100]\n\ndataset5 = dataset[class5_indexes]\ndataset6 = dataset[class6_indexes]\ndataset7 = dataset[class7_indexes]\ndataset = np.concatenate((dataset5, dataset6, dataset7), axis=0)\n\nmin_max = minmax(dataset)\ndataset = normalize(dataset, min_max)\n\ndata_X, data_Y = [], []\nfor dataline in dataset:\n x, y = dataline[:-2], dataline[-1]\n data_X.append(x), data_Y.append(y)\n\ndata_X = np.array(data_X, np.float)\ndata_Y_naive = np.array(data_Y, np.float)\ndata_Y_one_hot = pd.get_dummies(np.array(data_Y, np.float)).to_numpy()\n\n# width, width_change, iterations_count = 0.2, 0.2, 10\n\nwidths = np.arange(0.1, 1, 0.1)\ndistance_funcs = [euclidean_distance, manhattan_distance]\nkernel_funcs = [uniform_kernel, triangular_kernel, epanechnikov_kernel,\n biweight_kernel, tricube_kernel]\n\nfor kernel in kernel_funcs:\n print(kernel)\n\n f1s_naive, f1s_one_hot = [], []\n for width in widths:\n print(width)\n f1_naive, f1_one_hot = launch(width, manhattan_distance, kernel, data_X,\n data_Y_naive, data_Y_one_hot)\n\n f1s_naive.append(f1_naive)\n f1s_one_hot.append(f1_one_hot)\n\n ppl.xlabel('window width')\n ppl.ylabel('f1')\n ppl.title(kernel.__name__)\n ppl.plot(widths, f1s_naive, label='Naive')\n ppl.plot(widths, f1s_one_hot, label='One Hot')\n ppl.show()\n","sub_path":"ml/hw-09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"16578794","text":"import numpy as np\nimport cv2\nimport os\nfrom albumentations import (\n RandomRotate90, Transpose, ShiftScaleRotate, Blur, \n OpticalDistortion, CLAHE, GaussNoise, MotionBlur, \n GridDistortion, HueSaturationValue, IAAAdditiveGaussianNoise, \n MedianBlur, IAAPiecewiseAffine, IAASharpen, IAAEmboss, \n RandomContrast, RandomBrightness, HorizontalFlip, OneOf, Compose, ToGray,\n ElasticTransform\n)\n\ndef strong_aug(p=1):\n return Compose([\n # RandomRotate90(),\n # Flip(),\n # Transpose(),\n HorizontalFlip(p=0.65),\n OneOf([\n IAAAdditiveGaussianNoise(),\n GaussNoise(),\n ], p=0.6),\n # ElasticTransform(p=1),\n OneOf([\n MotionBlur(p=.4),\n MedianBlur(blur_limit=3, p=.5),\n Blur(blur_limit=3, p=.5), \n ], p=0.5),\n ShiftScaleRotate(shift_limit=0.01, scale_limit=0.01, rotate_limit=30, p=.8),\n OneOf([\n OpticalDistortion(p=0.4),\n # GridDistortion(p=.1),\n # IAAPiecewiseAffine(p=0.3),\n ], p=0.9),\n OneOf([\n CLAHE(clip_limit=2),\n IAASharpen(),\n # IAAEmboss(),\n RandomContrast(),\n RandomBrightness(),\n ], p=0.6),\n HueSaturationValue(p=0.3),\n ToGray(p=0.75),\n ], p=p)\n\n\nroot_path = \"/home/lihebeizi/data/FaceRegDataset/train_enhanced\"\ndataset_dict = {}\nfor root, dirs, files in os.walk(root_path):\n parent_dirname = os.path.basename(root)\n if parent_dirname not in dataset_dict:\n dataset_dict[parent_dirname] = []\n for file in files:\n file_path = os.path.join(root, file)\n dataset_dict[parent_dirname].append(file_path)\n\ntarget_lenth = 20\naug = strong_aug(p=1)\n\nfor name in dataset_dict:\n alist = dataset_dict[name]\n alist_len = len(alist)\n if alist_len == 0:\n print(name)\n continue\n num_to_add = target_lenth - alist_len\n for i in range(num_to_add):\n image = cv2.imread(alist[i % alist_len], 1)\n if image is None:\n print(alist[i % alist_len])\n continue\n img_strong_aug = aug(image=image)['image']\n cv2.imwrite(f'{alist[i % alist_len]}.enhance.{i}.jpg', img_strong_aug)\n print(f'{name} added {str(num_to_add)}')\n","sub_path":"script/enhance.py","file_name":"enhance.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"120235948","text":"class RectInsideQuadrilateral(object):\n \"\"\"\n Check if rect is inside the quadrilateral\n by breaking it in to 2 triangles and using barycentric coordinates\n to find any of the rect points inside the triangles\n (https://en.wikipedia.org/wiki/Barycentric_coordinate_system)\n \n Note: The core idea was written by someone else\n i simple extended the idea to work with quadrilaterals\n\n \n \"\"\"\n \n @classmethod\n def sign(cls, p1, p2, p3):\n \"\"\"\n TBD\n\n return ->\n \"\"\"\n return (p1[0] - p3[0]) * (p2[1] - p3[1]) - (p2[0] - p3[0]) * (p1[1] - p3[1])\n\n\n @classmethod\n def pointInTriangle(cls, p1, p2, p3, p4):\n \"\"\"\n TBD\n\n p1 -> Point to check if inside the triangle\n\n Points are handled in clockwise order\n p2 -> Triangle point \n p3 -> -||-\n p4 -> -||-\n\n return -> 'True' if point is inside the triangle points, 'False' otherwise\n \"\"\"\n t1 = cls.sign(p1, p2, p3) < 0\n t2 = cls.sign(p1, p3, p4) < 0\n t3 = cls.sign(p1, p4, p2) < 0\n\n return t1 == t2 and t2 == t3\n\n\n @classmethod\n def rectInsideQuadrilateral(cls, quad_points, rect):\n \"\"\"\n TBD\n\n quad_points -> list of 4 (x, y) points in clockwise order\n rect -> A rect to test if its inside the quadrilateral \n \n return -> 'True' if inside 'False' otherwise\n \"\"\"\n # The quad is broke in to 2 clockwise triangles\n tri1 = quad_points[0], quad_points[1], quad_points[3]\n tri2 = quad_points[3], quad_points[1], quad_points[2]\n\n return all((cls.pointInTriangle(p, *tri1) or cls.pointInTriangle(p, *tri2)) \\\n for p in (rect.topleft, rect.topright, rect.bottomleft, rect.bottomright))\n \n","sub_path":"Scrap/RectInsideQuadrilateral.py","file_name":"RectInsideQuadrilateral.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"521740422","text":"# from referee import Referee\nfrom helpers import *\nfrom board_wrapper import BoardWrapper\n\nBOARD_WRAP = BoardWrapper()\n\nclass Ref_Wrapper:\n\t# def __init__(self):\n\t# \tself.ref = Referee()\n\n\tdef __init__(self,ref):\n\t\tself.ref = ref\n\n\tdef set_players(self,name1,name2):\n\t\tif not isinstance(name1,str) or not isinstance(name2,str):\n\t\t\traise Exception(\"Received invalid player name\")\n\n\t\treturn self.ref.set_players(name1,name2)\n\n\tdef make_action(self,action):\n\t\tif action != PASS:\n\t\t\ttry:\n\t\t\t\taction = BOARD_WRAP.string_to_point(action)\n\t\t\t\tBOARD_WRAP.check_point(action)\n\t\t\texcept:\n\t\t\t\traise Exception(\"Received invalid move\")\n\n\t\treturn self.ref.make_action(action)\n\n\tdef update_boards(self, new_board):\n\t\tBOARD_WRAP.check_board(new_board)\n\t\tself.ref.update_boards(new_board)\n\n\tdef get_winner(self, illegal_move):\n\t\tif not isinstance(illegal_move,bool):\n\t\t\traise Exception(\"Received invalid arg illegal_move\")\n\t\treturn self.ref.get_winner(illegal_move)","sub_path":"Deliverables/6/6.2/ref_wrapper.py","file_name":"ref_wrapper.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"115114798","text":"#!/usr/bin/python\n\n\"\"\"\nThis example shows how to create an empty Mininet object\n(without a topology object) and add nodes to it manually.\n\"\"\"\n\nfrom mininet.net import Mininet\nfrom mininet.node import UserSwitch,RemoteController, OVSKernelSwitch\nfrom mininet.cli import CLI\nfrom mininet.log import setLogLevel, info\nfrom mininet.link import TCLink\nfrom mininet.topo import Topo\n\ndef myNet():\n\n\t\"Create an empty network and add nodes to it.\"\n\n\tnet = Mininet( controller=RemoteController, switch=UserSwitch, link=TCLink, autoStaticArp=True )\n\n\tinfo( '*** Adding controller\\n' )\n\tnet.addController( 'c0' )\n\n\tnet.addHost( 'h1' , mac=\"00:00:00:00:00:01\" )\n\tnet.addHost( 'h2' , mac=\"00:00:00:00:00:02\" )\n\tnet.addHost( 'h3' , mac=\"00:00:00:00:00:03\" )\n\t#leaves\n\tnet.addSwitch('s1')\n\tnet.addSwitch('s2')\n\tnet.addSwitch('s3')\n\t#spines\n\tnet.addSwitch('s4')\n\tnet.addSwitch('s5')\n\n\tinfo( '*** Creating links\\n' )\n\tnet.addLink( 's1', 's4' , bw=3 )\n\tnet.addLink( 's1', 's5' , bw=3 ) \n\tnet.addLink( 's2', 's4' , bw=3 )\n\tnet.addLink( 's2', 's5' , bw=3 )\n\tnet.addLink( 's3', 's4' , bw=3 )\n\tnet.addLink( 's3', 's5' , bw=3 )\n\n\tnet.addLink( 'h1', 's1' , bw=6 )\n\tnet.addLink( 'h2', 's2' , bw=6 )\n\tnet.addLink( 'h3', 's3' , bw=6 )\n\n\tinfo( '*** Starting network\\n')\n\tnet.start()\n\n\tinfo( '*** Disabling tcp-segmentation overload on hosts\\' interfaces\\n')\n\tinfo( ' ofsoftswitch13 supports segments of length <= 1514 only\\n')\n\tnet.get('h1').cmd(\"ethtool -K h1-eth0 tso off\")\n\tnet.get('h2').cmd(\"ethtool -K h2-eth0 tso off\")\n\tnet.get('h3').cmd(\"ethtool -K h3-eth0 tso off\")\n\n\tinfo( '*** Starting tcpdump on node\\'s interfaces\\n')\n\tnet.get('h1').cmd(\"tcpdump -ni h1-eth0 -w ~/h1-eth0.pcap &\")\n\tnet.get('h2').cmd(\"tcpdump -ni h2-eth0 -w ~/h2-eth0.pcap &\")\n\tnet.get('s1').cmd(\"tcpdump -ni s1-eth1 -w ~/s1-eth1.pcap &\")\n\tnet.get('s1').cmd(\"tcpdump -ni s1-eth2 -w ~/s1-eth2.pcap &\")\n\tnet.get('s2').cmd(\"tcpdump -ni s2-eth1 -w ~/s2-eth1.pcap &\")\n\tnet.get('s2').cmd(\"tcpdump -ni s2-eth2 -w ~/s2-eth2.pcap &\")\n\n\tinfo('*** Opening udp port 5001 on h2\\n')\n\tnet.get('h2').cmd(\"nc -u -lp 5001 &\")\n\n\tinfo('*** Opening tcp port 4001 on h1\\n')\n\tnet.get('h1').cmd(\"nc -lp 4001 &\")\n\n\tinfo('\\n*** Opening iperf3 servers on hosts (10.0.0.1-3), on ports 6666 and 6667\\n')\n\n\tnet.get('h1').cmd(\"iperf3 -s -D -p 6666 && iperf3 -s -D -p 6667 && iperf3 -s -D -p 6668\")\n\tnet.get('h2').cmd(\"iperf3 -s -D -p 6666 && iperf3 -s -D -p 6667 && iperf3 -s -D -p 6668\")\n\tnet.get('h3').cmd(\"iperf3 -s -D -p 6666 && iperf3 -s -D -p 6667 && iperf3 -s -D -p 6668\")\n\n\tnet.get('h1').cmd(\"iperf3 -s -D -p 10000 && iperf3 -s -D -p 10001\")\n\tnet.get('h2').cmd(\"iperf3 -s -D -p 10000 && iperf3 -s -D -p 10001\")\n\tnet.get('h3').cmd(\"iperf3 -s -D -p 10000 && iperf3 -s -D -p 10001\")\n\n\t#net.get('h1').cmd(\"iperf3 -c 10.0.0.2 -p 10000 -l 1 -b 1b -t 1000 > outs/out1 &\")\n\t#net.get('h1').cmd(\"iperf3 -c 10.0.0.2 -p 10001 -l 1 -b 1b -t 1000 > outs/out2 &\")\n\t#net.get('h2').cmd(\"iperf3 -c 10.0.0.3 -p 10000 -l 1 -b 1b -t 1000 > outs/out3 &\")\n\t#net.get('h2').cmd(\"iperf3 -c 10.0.0.3 -p 10001 -l 1 -b 1b -t 1000 > outs/out4 &\")\n\t#net.get('h3').cmd(\"iperf3 -c 10.0.0.1 -p 10000 -l 1 -b 1b -t 1000 > outs/out5 &\")\n\t#net.get('h3').cmd(\"iperf3 -c 10.0.0.1 -p 10001 -l 1 -b 1b -t 1000 > outs/out6 &\")\n\n\t#net.get('s4').cmd(\"~/angelo-app/packETHcli -i s4-eth1 -m 2 -d 1000000 -n 0 -f ~/angelo-app/packets/small-tcp.pcap > outs/out1 &\")\n\t#net.get('s4').cmd(\"~/angelo-app/packETHcli -i s4-eth2 -m 2 -d 1000000 -n 0 -f ~/angelo-app/packets/small-tcp.pcap > outs/out2 &\")\n\t#net.get('s4').cmd(\"~/angelo-app/packETHcli -i s4-eth3 -m 2 -d 1000000 -n 0 -f ~/angelo-app/packets/small-tcp.pcap > outs/out3 &\")\n\t#net.get('s5').cmd(\"~/angelo-app/packETHcli -i s5-eth1 -m 2 -d 1000000 -n 0 -f ~/angelo-app/packets/small-tcp.pcap > outs/out4 &\")\n\t#net.get('s5').cmd(\"~/angelo-app/packETHcli -i s5-eth2 -m 2 -d 1000000 -n 0 -f ~/angelo-app/packets/small-tcp.pcap > outs/out5 &\")\n\t#net.get('s5').cmd(\"~/angelo-app/packETHcli -i s5-eth3 -m 2 -d 1000000 -n 0 -f ~/angelo-app/packets/small-tcp.pcap > outs/out6 &\")\n\n\tinfo( '*** Running CLI\\n' )\n\tCLI( net )\n\n\tinfo( '*** Stopping network' )\n\tnet.stop()\n\nif __name__ == '__main__':\n setLogLevel( 'info' )\n myNet()","sub_path":"ryu/app/beba/load_driven_forwarding/leafSpineTopo.py","file_name":"leafSpineTopo.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"227546435","text":"#Imports needed Libraries\nimport json\nimport requests\n\n#Opens json file\ndef apiparse(apireceive):\n weatherlist = str(apireceive[\"weather\"])\n weather = weatherlist[2:-2]\n weather = weather.split()\n weather = weather[3]\n weather = weather[1:-2]\n coordlist = str(apireceive[\"coord\"])\n coord = coordlist.split(\",\")\n lat = coord[0]\n lat = lat[8:]\n lon = coord[1]\n lon = lon[8:-1]\n return lat, lon, weather\n# if weather != \"Clear\":\n# if weather == \"Clear\":\n\n\n","sub_path":"weather/weather/lib/python3.7/site-packages/jparse.py","file_name":"jparse.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"150156777","text":"from google.appengine.ext import db\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import search\nfrom google.appengine.api import urlfetch\n\nimport base64\nimport datetime\nimport json\nimport logging\nimport urllib\nimport webapp2\nimport sys\n\nfrom datamodel import Library, Version, Content, CollectionReference, Dependency\nimport quota\nimport versiontag\nimport util\n\n\nclass AddLibrary(webapp2.RequestHandler):\n def get(self, owner, repo, kind):\n task_url = util.ingest_library_task(owner, repo, kind)\n util.new_task(task_url)\n self.response.write('OK')\n\nclass IngestLibrary(webapp2.RequestHandler):\n def get(self, owner, repo, kind):\n if not (kind == 'element' or kind == 'collection'):\n self.response.set_status(400)\n return\n owner = owner.lower()\n repo = repo.lower()\n library = Library.maybe_create_with_kind(owner, repo, kind)\n library_dirty = False\n if library.error is not None:\n library_dirty = True\n library.error = None\n\n logging.info('created library')\n\n github = quota.GitHub()\n if not github.reserve(3):\n self.response.set_status(500)\n return\n\n response = github.github_resource('repos', owner, repo, etag=library.metadata_etag)\n if response.status_code != 304:\n if response.status_code == 200:\n library.metadata = response.content\n library.metadata_etag = response.headers.get('ETag', None)\n library_dirty = True\n else:\n library.error = 'repo metadata not found (%d)' % response.status_code\n github.release()\n library.put()\n return\n\n response = github.github_resource('repos', owner, repo, 'contributors', etag=library.contributors_etag)\n if response.status_code != 304:\n if response.status_code == 200:\n library.contributors = response.content\n library.contributors_etag = response.headers.get('ETag', None)\n library.contributor_count = len(json.loads(response.content))\n library_dirty = True\n else:\n library.error = 'repo contributors not found (%d)' % response.status_code\n github.release()\n library.put()\n return\n\n\n response = github.github_resource('repos', owner, repo, 'git/refs/tags', etag=library.tags_etag)\n if response.status_code != 304:\n if response.status_code == 200:\n library.tags = response.content\n library.tags_etag = response.headers.get('ETag', None)\n library_dirty = True\n\n data = json.loads(response.content)\n if not isinstance(data, object):\n library.error = 'repo contains no valid version tags'\n github.release()\n library.put()\n return\n for version in data:\n tag = version['ref'][10:]\n if not versiontag.is_valid(tag):\n continue\n sha = version['object']['sha']\n version_object = Version(parent=library.key, id=tag, sha=sha)\n version_object.put()\n task_url = util.ingest_version_task(owner, repo, tag)\n util.new_task(task_url)\n util.publish_analysis_request(owner, repo, tag)\n else:\n library.error = 'repo tags not found (%d)' % response.status_code\n github.release()\n library.put()\n return\n\n if library_dirty:\n library.put()\n github.release()\n\nTIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'\n\nclass IngestVersion(webapp2.RequestHandler):\n def get(self, owner, repo, version):\n logging.info('ingesting version %s/%s/%s', owner, repo, version)\n\n github = quota.GitHub()\n if not github.reserve(1):\n self.response.set_status(500)\n return\n\n key = ndb.Key(Library, '%s/%s' % (owner, repo), Version, version)\n\n response = urlfetch.fetch(util.content_url(owner, repo, version, 'README.md'))\n readme = response.content\n\n try:\n content = Content(parent=key, id='readme', content=readme)\n content.etag = response.headers.get('ETag', None)\n content.put()\n except db.BadValueError:\n ver = key.get()\n ver.error = \"Could not store README.md as a utf-8 string\"\n ver.put()\n self.response.set_status(200)\n return\n\n response = github.markdown(readme)\n content = Content(parent=key, id='readme.html', content=response.content)\n content.put()\n\n response = urlfetch.fetch(util.content_url(owner, repo, version, 'bower.json'))\n try:\n json.loads(response.content)\n except ValueError:\n ver = key.get()\n ver.error = \"This version has a missing or broken bower.json\"\n ver.put()\n self.response.set_status(200)\n return\n\n content = Content(parent=key, id='bower', content=response.content)\n content.etag = response.headers.get('ETag', None)\n content.put()\n\n versions = Library.versions_for_key(key.parent())\n if versions[-1] == version:\n library = key.parent().get()\n if library.kind == \"collection\":\n task_url = util.ingest_dependencies_task(owner, repo, version)\n util.new_task(task_url)\n bower = json.loads(response.content)\n metadata = json.loads(library.metadata)\n logging.info('adding search index for %s', version)\n description = bower.get(\"description\", metadata.get(\"description\", \"\"))\n document = search.Document(doc_id='%s/%s' % (owner, repo), fields=[\n search.AtomField(name='full_name', value=metadata['full_name']),\n search.TextField(name='owner', value=owner),\n search.TextField(name='repo', value=repo),\n search.TextField(name='version', value=version),\n search.TextField(name='repoparts', value=' '.join(repo.split('-'))),\n search.TextField(name='description', value=description),\n search.TextField(name='keywords', value=' '.join(bower.get('keywords', []))),\n search.NumberField(name='stars', value=metadata.get('stargazers_count')),\n search.NumberField(name='subscribers', value=metadata.get('subscribers_count')),\n search.NumberField(name='forks', value=metadata.get('forks')),\n search.NumberField(name='contributors', value=library.contributor_count),\n search.DateField(name='updated_at', value=datetime.datetime.strptime(metadata.get('updated_at'), TIME_FORMAT))\n ])\n index = search.Index('repo')\n index.put(document)\n self.response.set_status(200)\n\nclass IngestDependencies(webapp2.RequestHandler):\n def get(self, owner, repo, version):\n logging.info('ingesting version %s/%s/%s', owner, repo, version)\n key = ndb.Key(Library, '%s/%s' % (owner, repo), Version, version, Content, 'bower')\n bower = json.loads(key.get().content)\n ver = key.parent().get()\n dependencies = bower.get('dependencies', {})\n library_keys = []\n dep_list = []\n for name in dependencies.keys():\n ver.dependencies.append(dependencies[name])\n dep = Dependency.from_string(dependencies[name])\n dep_list.append(dep)\n library_keys.append(ndb.Key(Library, '%s/%s' % (dep.owner.lower(), dep.repo.lower())))\n\n libraries = Library.get_or_create_list(library_keys)\n for i, library in enumerate(libraries):\n dep = dep_list[i]\n library.collections.append(CollectionReference(version=key.parent(), semver=dep.version))\n # FIXME: Can't assume this is an element.\n task_url = util.ingest_library_task(dep.owner.lower(), dep.repo.lower(), 'element')\n util.new_task(task_url)\n libraries.append(ver)\n ndb.put_multi(libraries)\n\nclass IngestAnalysis(webapp2.RequestHandler):\n def post(self):\n message_json = json.loads(urllib.unquote(self.request.body).rstrip('='))\n message = message_json['message']\n data = base64.b64decode(str(message['data']))\n attributes = message['attributes']\n owner = attributes['owner']\n repo = attributes['repo']\n version = attributes['version']\n\n logging.info('Ingesting analysis data %s/%s/%s', owner, repo, version)\n parent = Version.get_by_id(version, parent=ndb.Key(Library, '%s/%s' % (owner, repo)))\n\n # Don't accept the analysis data unless the version still exists in the datastore\n if parent is not None:\n content = Content(parent=parent.key, id='analysis', content=data)\n try:\n content.put()\n # TODO: Which exception is this for?\n # pylint: disable=bare-except\n except:\n logging.error(sys.exc_info()[0])\n\n self.response.set_status(200)\n\ndef delete_library(response, library_key):\n keys = [library_key] + ndb.Query(ancestor=library_key).fetch(keys_only=True)\n ndb.delete_multi(keys)\n\n for key in keys:\n response.write(repr(key.flat()) + '\\n')\n response.write('\\n')\n\n index = search.Index('repo')\n index.delete([library_key.id()])\n\nclass GithubStatus(webapp2.RequestHandler):\n def get(self):\n for key, value in quota.rate_limit().items():\n self.response.write('%s: %s
' % (key, value))\n\nclass DeleteLibrary(webapp2.RequestHandler):\n def get(self, owner, repo):\n self.response.headers['Content-Type'] = 'text/plain'\n delete_library(self.response, ndb.Key(Library, ('%s/%s' % (owner, repo)).lower()))\n\nclass DeleteEverything(webapp2.RequestHandler):\n def get(self):\n while True:\n deleted_something = False\n for library_key in Library.query().fetch(keys_only=True, limit=10):\n delete_library(self.response, library_key)\n deleted_something = True\n if not deleted_something:\n break\n\n # Delete any remaining entries in the search index.\n index = search.Index('repo')\n while True:\n docs = [\n document.doc_id\n for document\n in index.get_range(ids_only=True)]\n\n if not docs:\n break\n\n self.response.write('search docs: %s\\n' + repr(docs))\n index.delete(docs)\n\n self.response.write('Finished')\n\n\n# pylint: disable=invalid-name\napp = webapp2.WSGIApplication([\n webapp2.Route(r'/manage/github', handler=GithubStatus),\n webapp2.Route(r'/manage/add///', handler=AddLibrary, name='add'),\n webapp2.Route(r'/manage/delete//', handler=DeleteLibrary),\n webapp2.Route(r'/manage/delete_everything/yes_i_know_what_i_am_doing', handler=DeleteEverything),\n webapp2.Route(r'/task/ingest/library///', handler=IngestLibrary, name='nom'),\n webapp2.Route(r'/task/ingest/dependencies///', handler=IngestDependencies, name='nomdep'),\n webapp2.Route(r'/task/ingest/version///', handler=IngestVersion, name='nomver'),\n webapp2.Route(r'/_ah/push-handlers/analysis', handler=IngestAnalysis, name='nomalyze'),\n], debug=True)\n","sub_path":"src/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":10551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"277264279","text":"import random\r\n\r\ndef jogar():#para declarar uma funcao utilizamos o def:\r\n\r\n print(\"*******************\")\r\n print(\"Jogo de Adivinhação\")\r\n print(\"*******************\")\r\n\r\n\r\n\r\n numero = random.randrange(1,101) #ira gerar numeros aleatorios de 1 a 100\r\n total_de_tentativas = 0\r\n pontos = 1000\r\n\r\n #Definir o nivel de dificuldade do jogo.\r\n print(\"Qual o Nivel de Difículdade: \",numero)\r\n print(\"(1) Facil (2) Médio (3) Dificil\")\r\n\r\n nivel = int(input(\"Defina o Nivel: \"))\r\n\r\n if(nivel == 1 ):\r\n total_de_tentativas = 20\r\n\r\n elif(nivel == 2 ):\r\n total_de_tentativas = 10\r\n\r\n else:\r\n total_de_tentativas = 5\r\n\r\n for rodada in range (1,total_de_tentativas + 1): #acrescentar o +1 para ir ate a terceira tentativa\r\n print(\"Tentativa {} de {}\".format(rodada, total_de_tentativas))\r\n chute = int( input(\"Digite um numero: \"))\r\n print(\"Você Digitou: \",chute)\r\n\r\n if(chute < 1 or chute > 100):\r\n print(\"Você deve digitar um número entre 1 e 100!\")\r\n continue\r\n\r\n Acertou = numero == chute\r\n Acima = chute > numero\r\n Abaixo = chute < numero\r\n\r\n if(Acertou):\r\n print(\"Você acertou e fez {} pontos\".format(pontos))\r\n break #caso o usuario acerte ira sair do laço\r\n else:\r\n if(Acima):\r\n print(\"O seu numero esta acima do correto\")\r\n elif(Abaixo):\r\n print(\"O seu numero esta abaixo do correto\")\r\n pontos_perdidos = abs(numero - chute) #pontos perdidos da rodada\r\n pontos = round(pontos - pontos_perdidos) #subtraindo os pontos perdidos da pontuação total\r\n print(\"Fim do Jogo\")\r\n\r\nif (__name__ == \"__main__\"): #essa variavel permite que o programa seja executado sem precisar ser chamado pela janela (escolha o jogo)\r\n jogar()","sub_path":"Jogo.py","file_name":"Jogo.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"572271843","text":"import logging\n\nimport peewee\nimport requests\nfrom telegram.ext import CallbackContext\n\nimport config\nfrom wind_bot.models import Subscriber\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef start_command(update, context):\n \"\"\"Send a message when the command /start is issued.\"\"\"\n chat_id = update.message.chat_id\n\n logger.info(f\"Start command received. Chat ID: {chat_id}\")\n\n update.message.reply_text('Hi!')\n\n try:\n Subscriber.get(Subscriber.chat_id == chat_id)\n\n except peewee.DoesNotExist:\n logger.info(f\"Storing new subscriber {chat_id} to DB\")\n Subscriber(chat_id=chat_id).save()\n\n\ndef help_command(update, context):\n \"\"\"Send a message when the command /help is issued.\"\"\"\n logger.info(f\"Help command received. Chat ID: {update.message.chat_id}\")\n\n update.message.reply_text('Help!')\n\n\ndef echo(update, context):\n \"\"\"Echo the user message.\"\"\"\n logger.info(f\"Text received. Chat ID: {update.message.chat_id}\")\n\n update.message.reply_text(update.message.text)\n\n\ndef callback_message(context: CallbackContext):\n forecast = get_wind_forecast()\n wind_forecast = ['\\n'.join([f\"{i['date_time']} {item['name']}: {i['wind_speed']} m/s\"\n for i in item['forecast']]) for item in forecast if item['forecast']]\n\n for subscriber in Subscriber.select():\n chat_id = subscriber.chat_id\n logger.info(f\"Sending message to chat: {chat_id}\")\n if wind_forecast:\n for item in wind_forecast:\n context.bot.send_message(chat_id=chat_id, text=item)\n else:\n context.bot.send_message(chat_id=chat_id, text='Nothing for next five days')\n # context.bot.send_message(chat_id=chat_id, text='test')\n\n\ndef get_cities_ids():\n payload = {\n 'lat': config.LATITUDE,\n 'lon': config.LONGITUDE,\n 'cnt': config.NUMBER_OF_CITIES,\n 'appid': config.OPEN_WEATHER_API_KEY\n }\n forecast = requests.get(config.CURRENT_WEATHER_DATA_URL, params=payload).json()['list']\n cities_ids = [item['id'] for item in forecast]\n\n return cities_ids\n\n\ndef get_wind_forecast():\n cities_ids = get_cities_ids()\n payloads = [{'id': city_id, 'appid': config.OPEN_WEATHER_API_KEY} for city_id in cities_ids]\n forecasts = [requests.get(config.FIVE_DAYS_FORECAST_URL, params=payload).json()\n for payload in payloads]\n wind_forecast = [{'name': forecast['city']['name'], 'forecast': [{'date_time': item['dt_txt'],\n 'wind_speed': item['wind']['speed']} for item in forecast['list']\n if item['wind']['speed'] >= config.MIN_NEEDED_WIND_SPEED]}\n for forecast in forecasts]\n\n return wind_forecast\n","sub_path":"wind_bot/wind_bot.py","file_name":"wind_bot.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"167589406","text":"import pygame\nimport sys\nfrom random import randint\nfrom enum import Enum\n\n\nclass Incidents(Enum):\n start = 0 # 起点\n horseField = 1 # 马场\n encounterThief = 2 # 遭遇小偷\n involvedMurder = 3 # 卷入谋杀案\n changeMoney = 4 # 资金互换\n explosive = 5 # 炸药\n reachGoldMine = 6 # 到达金矿\n haveACold = 7 # 感冒\n strongWind = 8 # 遭遇大风\n friendshipWithLord = 9 # 与当地领主结好\n houseFiled = 10 # 无事件\n house = 11 # 房间\n\n\nclass PlayerTurn(Enum):\n start = 0 # 游戏开始\n PCMove = 1 # PC移动\n NPCMove = 2 # NPC移动\n PCAct = 3 # PC行动\n NPCAct = 4 # NPC行动\n\n\nclass GameTurn(Enum):\n start = 0 # 游戏开始界面\n waitIn = 1 # 等待进入游戏\n initial = 2 # 实例化角色对象、地块对象、掷骰子对象\n playing = 3 # 进入游戏\n over = 4 # 游戏结束界面\n end = 5 # 游戏结束界面绘制完毕\n\n\nclass PC:\n def __init__(self, name):\n self.name = name\n self.position = 0 # 初始位置\n self.money = 1000 # 初始资金\n self.houseCounter = [0, 0, 0, 0, 0]\n self.transportation = \"无\" # 装备\n self.status = \"正常\" # 状态\n self.engine = 0 # 每回合额外移动一格,需激活\n self.chance = False # 互换资金的机会,需激活\n self.item = \"无\" # 持有道具\n self.ill = 0 # 生病冷却,需激活\n self.wind = False # 传送冷却,需激活\n self.free = False # 免费冷却,需激活\n\n def move(self):\n self.engine = 0 # 千里马引擎归零\n\n wind_or_in_jail = False\n point1 = randint(1, 6)\n point2 = 0\n if self.transportation != \"无\":\n point2 = randint(1, 6)\n\n if self.status == \"监禁\":\n self.status = \"保释\"\n point1, point2, wind_or_in_jail = 0, 0, True\n elif self.status == \"保释\":\n self.status = \"正常\"\n\n roll = point1 + point2\n\n if self.ill > 0:\n roll = roll // 2 # 感冒移动速度减半\n roll = 1 if roll == 0 else roll\n self.ill -= 1\n self.status = \"正常\" if self.ill == 0 else self.status\n\n if self.wind is True:\n wind_or_in_jail = self.wind\n roll = randint(1, 50) # 遭遇大风,随机落地\n self.wind = not self.wind\n\n self.position += roll\n self.money += 0 if self.position < 50 else 500 # 绕地图一圈奖励\n self.position -= 0 if self.position < 50 else 50 # 坐标限位\n\n return point1, point2, wind_or_in_jail\n\n def swift_horse_move(self, forward):\n if forward is False: # 后退\n if self.engine == 0 or self.engine == 1:\n self.position -= 1 if self.position > 0 else -49\n self.engine -= 1\n elif forward is True: # 前进\n if self.engine == 0 or self.engine == -1:\n self.position += 1 if self.position < 49 else -49\n self.engine += 1\n\n def incidents(self, Ls):\n land = Ls.lands[self.position]\n # 地块买满的奖励\n if Ls.is_full(self.name) is True:\n self.money += self.houseCounter[0] * 100\n Ls.PCAwardMessage = 2\n # 1.路过敌人房子\n if land.owner != self.name and land.owner != \"事件\" and land.owner != \"系统\":\n self.money -= land.level * 100\n return land.level * 100\n # 特殊事件房间\n elif land.owner == \"事件\" and self.engine == 0:\n if land.incident is Incidents.encounterThief:\n self.money -= 500 if self.transportation == \"无\" else 0\n elif land.incident is Incidents.involvedMurder:\n self.status = \"监禁\" if (self.status != \"保释\" and self.status != \"感冒\") else self.status\n elif land.incident is Incidents.changeMoney:\n self.chance = True\n elif land.incident is Incidents.explosive:\n self.item = \"炸药\"\n elif land.incident is Incidents.reachGoldMine:\n self.money += 1000\n elif land.incident is Incidents.haveACold:\n self.status = \"感冒\"\n self.ill = 3\n elif land.incident is Incidents.strongWind:\n self.wind = True\n elif land.incident == Incidents.friendshipWithLord:\n self.free = True\n return 0\n\n def messages(self, Ls):\n base_messages = self.__base_messages()\n incidents_messages = self.__incidents_messages(Ls)\n return base_messages, incidents_messages\n\n def __base_messages(self):\n messages = list()\n for i in range(3):\n messages.append(list())\n messages[0].append(\"昵称: %s\" % self.name)\n messages[0].append(\"坐标: %d\" % self.position)\n messages[1].append(\"状态: %s\" % self.status)\n messages[1].append(\"装备: %s\" % self.transportation)\n messages[2].append(\"物品: %s\" % self.item)\n messages[2].append(\"资金: %d金币\" % self.money)\n\n return messages\n\n def __incidents_messages(self, Ls):\n land = Ls.lands[self.position]\n messages = list()\n\n # 购买或升级地块及炸药使用提示\n if land.owner != \"事件\":\n if land.owner == \"系统\":\n messages.append(\"这里是一片无主的荒地\")\n if self.free is True:\n messages.append(\"你可以按B键免费建立城堡(仅限一次)\")\n messages.append(\"省去%d金币\" % ((land.level + 1) * 100))\n else:\n messages.append(\"按B键花费%d金币建立城堡\" % ((land.level + 1) * 100))\n elif land.owner == self.name:\n messages.append(\"城墙上的卫兵向你举旗致敬\")\n if land.level < 5:\n if self.free is True:\n messages.append(\"你可以按B键免费升级城堡(仅限一次)\")\n messages.append(\"省去%d金币\" % ((land.level + 1) * 100))\n else:\n messages.append(\"按B键花费%d金币升级城堡\" % ((land.level + 1) * 100))\n else:\n messages.append(\"城堡已经很豪华了!\")\n else:\n messages.append(\"高耸的城堡阴沉地矗立于前方\")\n if self.engine == 0:\n messages.append(\"你到了别人的地盘,不得不支付%d金币过路费\" % (land.level * 100))\n if self.item == \"炸药\":\n messages.append(\"按B键使用炸药炸毁城堡\")\n\n elif land.incident is Incidents.start:\n messages.append(\"事件:你到达了起点\")\n\n elif land.incident is Incidents.horseField:\n messages.append(\"事件:你来到了马场\")\n if self.transportation == \"无\":\n messages.append(\"按B键花费1000金币购买一匹战马\")\n messages.append(\"你将以双倍速度进行移动!\")\n elif self.transportation == \"战马\":\n messages.append(\"按B键花费2000金币将战马升级为千里马\")\n messages.append(\"骑上千里马的你将可以进行额外行动!\")\n elif self.transportation == \"千里马\":\n messages.append(\"这里已经没有值得购买的好马了\")\n messages.append(\"你失望地离开了马场\")\n\n elif land.incident is Incidents.encounterThief:\n messages.append(\"事件:走在大街上的你遇到了小偷\")\n if self.transportation == \"无\":\n messages.append(\"你失去了500金币\")\n else:\n messages.append(\"幸运的是,骑在马上的你没有成为小偷的目标\")\n\n elif land.incident is Incidents.involvedMurder:\n if self.status == \"正常\" or self.status == \"监禁\":\n messages.append(\"事件: 你被卷入一场谋杀案,暂时无法脱身\")\n messages.append(\"你本回合无法移动,不掷骰子\")\n elif self.status == \"保释\":\n messages.append(\"事件: 你已被保释,下一回合将正常移动\")\n elif self.status == \"感冒\":\n messages.append(\"事件: 你因生病在医院修养,渡过了平静的一天\")\n messages.append(\"你幸运地避开了谋杀案的牵连\")\n\n elif land.incident is Incidents.changeMoney:\n messages.append(\"事件: 你碰巧获得了一个与他人互换财富的机会\")\n messages.append(\"按B键与敌人互换资金\")\n\n elif land.incident is Incidents.explosive:\n messages.append(\"事件: 你在路边捡到了炼金术士丢弃的炸药\")\n messages.append(\"在敌人的城堡中按B键使用炸药,仅限一次\")\n\n elif land.incident is Incidents.reachGoldMine:\n messages.append(\"事件: 你在山间偶然发现了一座金矿\")\n messages.append(\"你得到了1000金币\")\n\n elif land.incident is Incidents.haveACold:\n messages.append(\"事件: 你发现自己得了感冒,还好病得不算严重\")\n messages.append(\"你在三回合内的移动速度减半\")\n\n elif land.incident is Incidents.strongWind:\n messages.append(\"事件:你被一阵狂风卷起,身不由己地飞了起来\")\n messages.append(\"你下次移动后将在随机位置出现,不掷骰子\")\n\n elif land.incident is Incidents.friendshipWithLord:\n messages.append(\"事件:你与本地领主建立友谊,得到了他的承诺\")\n messages.append(\"你下次修建或升级城堡完全免费\")\n\n # 千里马移动不能触发事件,不需展示事件提示\n if self.engine != 0 and land.owner == \"事件\" and land.incident is not Incidents.start:\n messages = list()\n messages.append(\"事件:千里马的额外移动不能激活事件\")\n\n # 地块被买满的奖励提示\n if Ls.PCAwardMessage == 1 and Ls.PCAward is True:\n messages.append(\"<地块被买满,奖励将在本回合或下一回合到账>\")\n\n return messages\n\n def buy(self, land):\n self.__buy_land(land)\n self.__buy_horse() if land.incident is Incidents.horseField else False\n\n def __buy_land(self, land):\n price = land.price(self.name)\n if price != 0:\n self.money -= price if self.free is False else 0\n self.free = not self.free if self.free is True else self.free\n land.change_property(self.name)\n self.houseCounter[land.level - 1] += 1\n\n def __buy_horse(self):\n if self.money > 1000 and self.transportation == \"无\":\n self.money -= 1000\n self.transportation = \"战马\"\n elif self.money > 2000 and self.transportation == \"战马\":\n self.money -= 2000\n self.transportation = \"千里马\"\n\n\nclass NPC:\n def __init__(self, name):\n self.name = name\n self.position = 0 # 初始位置\n self.money = 1000 # 初始资金\n self.houseCounter = [0, 0, 0, 0, 0]\n self.transportation = \"无\" # 装备\n self.status = \"正常\" # 状态\n self.engine = 0 # 每回合额外移动一格,需激活\n self.chance = False # 互换资金的机会,需激活\n self.item = \"无\" # 持有道具\n self.ill = 0 # 生病冷却,需激活\n self.wind = False # 传送冷却,需激活\n self.free = False # 免费冷却,需激活\n\n def move(self):\n self.engine = 0 # 千里马引擎归零\n\n wind_or_in_jail = False\n point1 = randint(1, 6)\n point2 = 0\n if self.transportation != \"无\":\n point2 = randint(1, 6)\n\n if self.status == \"监禁\":\n self.status = \"保释\"\n point1, point2, wind_or_in_jail = 0, 0, True\n elif self.status == \"保释\":\n self.status = \"正常\"\n\n roll = point1 + point2\n\n if self.ill > 0:\n roll = roll // 2 # 感冒移动速度减半\n roll = 1 if roll == 0 else roll\n self.ill -= 1\n self.status = \"正常\" if self.ill == 0 else self.status\n\n if self.wind is True:\n wind_or_in_jail = self.wind\n roll = randint(1, 50) # 遭遇大风,随机落地\n self.wind = not self.wind\n\n # 坐标变化\n self.position += roll\n self.money += 0 if self.position < 50 else 500 # 绕地图一圈奖励\n self.position -= 0 if self.position < 50 else 50 # 坐标限位\n\n return point1, point2, wind_or_in_jail\n \n def swift_horse_move(self, lands, land_is_full):\n if self.money > 900:\n forward = self.position + 1 if self.position < 49 else 0\n backward = self.position - 1 if self.position > 0 else 49\n for i in range(5):\n self.__buy_land(lands[forward], land_is_full)\n for i in range(5):\n self.__buy_land(lands[backward], land_is_full)\n\n def incidents(self, Ls):\n land = Ls.lands[self.position]\n # 地块买满的奖励\n if Ls.is_full(self.name) is True:\n self.money += self.houseCounter[0] * 100\n Ls.NPCAwardMessage = 2\n # 1.路过敌人房子\n if land.owner != self.name and land.owner != \"事件\" and land.owner != \"系统\":\n self.money -= land.level * 100\n return land.level * 100\n # 特殊事件房间\n elif land.owner == \"事件\" and self.engine == 0:\n if land.incident is Incidents.encounterThief:\n self.money -= 500 if self.transportation == \"无\" else 0\n elif land.incident is Incidents.involvedMurder:\n self.status = \"监禁\" if (self.status != \"保释\" and self.status != \"感冒\") else self.status\n elif land.incident is Incidents.changeMoney:\n self.chance = True\n elif land.incident is Incidents.explosive:\n self.item = \"炸药\"\n elif land.incident is Incidents.reachGoldMine:\n self.money += 1000\n elif land.incident is Incidents.haveACold:\n self.status = \"感冒\"\n self.ill = 3\n elif land.incident is Incidents.strongWind:\n self.wind = True\n elif land.incident == Incidents.friendshipWithLord:\n self.free = True\n return 0\n\n def messages(self, Ls):\n base_messages = self.__base_messages()\n incidents_messages = self.__incidents_messages(Ls)\n return base_messages, incidents_messages\n\n def __base_messages(self):\n messages = list()\n for i in range(3):\n messages.append(list())\n messages[0].append(\"昵称: %s\" % self.name)\n messages[0].append(\"坐标: %d\" % self.position)\n messages[1].append(\"状态: %s\" % self.status)\n messages[1].append(\"装备: %s\" % self.transportation)\n messages[2].append(\"物品: %s\" % self.item)\n messages[2].append(\"资金: %d金币\" % self.money)\n\n return messages\n\n def __incidents_messages(self, Ls):\n land = Ls.lands[self.position]\n messages = list()\n\n # 购买或升级地块及炸药使用提示\n if land.owner != \"事件\":\n if land.owner == \"系统\":\n messages.append(\"这里是一片无主的荒地\")\n if self.free is True:\n messages.append(\"你可以按B键免费建立城堡(仅限一次)\")\n messages.append(\"省去%d金币\" % ((land.level + 1) * 100))\n else:\n messages.append(\"按B键花费%d金币建立城堡\" % ((land.level + 1) * 100))\n elif land.owner == self.name:\n messages.append(\"城墙上的卫兵向你举旗致敬\")\n if land.level < 5:\n if self.free is True:\n messages.append(\"你可以按B键免费升级城堡(仅限一次)\")\n messages.append(\"省去%d金币\" % ((land.level + 1) * 100))\n else:\n messages.append(\"按B键花费%d金币升级城堡\" % ((land.level + 1) * 100))\n else:\n messages.append(\"城堡已经很豪华了!\")\n else:\n messages.append(\"高耸的城堡阴沉地矗立于前方\")\n if self.engine == 0:\n messages.append(\"你到了别人的地盘,不得不支付%d金币过路费\" % (land.level * 100))\n if self.item == \"炸药\":\n messages.append(\"使用炸药炸毁城堡\")\n\n elif land.incident is Incidents.start:\n messages.append(\"事件:你到达了起点\")\n\n elif land.incident is Incidents.horseField:\n messages.append(\"事件:你来到了马场\")\n if self.transportation == \"无\":\n messages.append(\"花费1000金币购买一匹战马\")\n messages.append(\"你将以双倍速度进行移动!\")\n elif self.transportation == \"战马\":\n messages.append(\"花费2000金币将战马升级为千里马\")\n messages.append(\"骑上千里马的你将可以进行额外行动!\")\n elif self.transportation == \"千里马\":\n messages.append(\"这里已经没有值得购买的好马了\")\n messages.append(\"你失望地离开了马场\")\n\n elif land.incident is Incidents.encounterThief:\n messages.append(\"事件:走在大街上的你遇到了小偷\")\n if self.transportation == \"无\":\n messages.append(\"你失去了500金币\")\n else:\n messages.append(\"幸运的是,骑在马上的你没有成为小偷的目标\")\n\n elif land.incident is Incidents.involvedMurder:\n if self.status == \"正常\" or self.status == \"监禁\":\n messages.append(\"事件: 你被卷入一场谋杀案,暂时无法脱身\")\n messages.append(\"你本回合无法移动,不掷骰子\")\n elif self.status == \"保释\":\n messages.append(\"事件: 你已被保释,下一回合将正常移动\")\n elif self.status == \"感冒\":\n messages.append(\"事件: 你因生病在医院修养,渡过了平静的一天\")\n messages.append(\"你幸运地避开了谋杀案的牵连\")\n\n elif land.incident is Incidents.changeMoney:\n messages.append(\"事件: 你碰巧获得了一个与他人互换财富的机会\")\n\n elif land.incident is Incidents.explosive:\n messages.append(\"事件: 你在路边捡到了炼金术士丢弃的炸药\")\n messages.append(\"在敌人的城堡中使用炸药,仅限一次\")\n\n elif land.incident is Incidents.reachGoldMine:\n messages.append(\"事件: 你在山间偶然发现了一座金矿\")\n messages.append(\"你得到了1000金币\")\n\n elif land.incident is Incidents.haveACold:\n messages.append(\"事件: 你发现自己得了感冒,还好病得不算严重\")\n messages.append(\"你在三回合内的移动速度减半\")\n\n elif land.incident is Incidents.strongWind:\n messages.append(\"事件:你被一阵狂风卷起,身不由己地飞了起来\")\n messages.append(\"你下次移动后将在随机位置出现,不掷骰子\")\n\n elif land.incident is Incidents.friendshipWithLord:\n messages.append(\"事件:你与本地领主建立友谊,得到了他的承诺\")\n messages.append(\"你下次修建或升级城堡完全免费\")\n\n # 千里马移动不能触发事件,不需展示事件提示\n if self.engine != 0 and land.owner == \"事件\" and land.incident is not Incidents.start:\n messages = list()\n messages.append(\"事件:千里马的额外移动不能激活事件\")\n\n # 地块被买满的奖励提示\n if Ls.NPCAwardMessage == 1 and Ls.NPCAward is True:\n messages.append(\"<地块被买满,奖励将在本回合或下一回合到账>\")\n\n return messages\n\n def buy(self, land, land_is_full):\n price = land.price(self.name)\n if (self.money - price) >= self.__money_left_line() or self.free is True:\n self.__buy_land(land, land_is_full)\n self.__buy_horse() if land.incident is Incidents.horseField else False\n\n def __buy_land(self, land, land_is_full):\n price = land.price(self.name)\n if price != 0:\n level = land.level\n if level == 0 or self.free or (level == 1 and self.houseCounter[0] > 15) or \\\n (level == 2 and self.houseCounter[1] > 10) or (level == 3 and self.houseCounter[2] > 7) \\\n or level >= 4 or land_is_full is True or self.money > 2500:\n self.money -= price if self.free is False else 0\n self.free = not self.free if self.free is True else self.free\n land.change_property(self.name)\n self.houseCounter[land.level - 1] += 1\n\n def __buy_horse(self):\n if self.money > 1600 and self.transportation == \"无\":\n self.money -= 1000\n self.transportation = \"战马\"\n elif self.money > 2600 and self.transportation == \"战马\":\n self.money -= 2000\n self.transportation = \"千里马\"\n\n def __money_left_line(self):\n extra_money_for_thief = 0\n if self.transportation == \"无\" and (self.position + 1 < 10 and self.position + 6 >= 10):\n extra_money_for_thief = 500\n if self.houseCounter[0] <= 15:\n return 500 + extra_money_for_thief\n if self.houseCounter[1] <= 10:\n return 700 + extra_money_for_thief\n if self.houseCounter[2] <= 7:\n return 800 + extra_money_for_thief\n return 900 + extra_money_for_thief\n\n\nclass OneLand:\n def __init__(self, position, owner=\"系统\", incident=Incidents.houseFiled):\n self.owner = owner\n self.position = position\n self.level = 0\n self.incident = incident\n\n def price(self, who):\n if (who == self.owner and self.level < 5) or self.owner == \"系统\":\n return (self.level + 1) * 100\n return 0\n\n def change_property(self, who):\n self.owner = who\n self.level += 1\n self.incident = Incidents.house\n\n def bang(self):\n self.owner = \"系统\"\n self.level = 0\n self.incident = Incidents.houseFiled\n\n\nclass Landmasses:\n def __init__(self, PCName, NPCName):\n self.lands = list()\n self.PCName = PCName\n self.NPCName = NPCName\n self.PCAward = False\n self.NPCAward = False\n self.PCAwardMessage = 0\n self.NPCAwardMessage = 0\n\n for i in range(50):\n if (i + 1) % 5 == 1:\n self.lands.append(OneLand(i, owner=\"事件\", incident=Incidents(i // 5)))\n else:\n self.lands.append(OneLand(i))\n\n def is_full(self, name):\n if name == self.PCName and self.PCAward is True:\n return False\n if name == self.NPCName and self.NPCAward is True:\n return False\n counter = 0\n for one_land in self.lands:\n if one_land.level != 0:\n counter += 1\n if counter == 40:\n if name == self.PCName:\n self.PCAward = True\n else:\n self.NPCAward = True\n return True\n return False\n\n\nclass ShootDice:\n def __init__(self):\n self.randomSeries = list()\n self.finalPoints = list()\n self.finalPoints.append(0)\n\n def __set_final_points(self, points):\n self.finalPoints = []\n self.finalPoints.append(points[0])\n if points[1] != 0:\n self.finalPoints.append(points[1])\n\n def __get_random_series(self):\n self.randomSeries = []\n for i in range(200):\n self.randomSeries.append(randint(0, 5))\n\n def set_dice(self, points):\n if points[2] is False:\n self.__set_final_points(points)\n self.__get_random_series()\n\n\nclass MusicPlay:\n def __init__(self):\n self.music = pygame.mixer.music.load(\"./source/BackgroundMusic.mp3\")\n self.isPlaying = True\n pygame.mixer.music.set_volume(0.618)\n pygame.mixer.music.play(-1)\n \n def pause(self, pos):\n if pos[0] > (41 * 25 + 8) and pos[0] < (41 * 25 + 94) and pos[1] > (27 * 25 + 4) and pos[1] < (27 * 25 + 68):\n pygame.mixer.music.pause() if self.isPlaying is True else pygame.mixer.music.unpause()\n self.isPlaying = not self.isPlaying\n return True\n return False\n\n\nclass GameManager:\n def __init__(self):\n pygame.init() # pygame初始化\n self.clock = pygame.time.Clock() # 帧率控制\n self.font = pygame.font.Font(\"./source/simhei.ttf\", 20) # 游戏字体\n self.screen = pygame.display.set_mode((1200, 825)) # 窗口初始化\n pygame.display.set_icon(pygame.image.load(\"./source/Dog.ico\").convert_alpha())\n pygame.display.set_caption(\"大富翁\")\n self.backgroundMusic = MusicPlay() # 背景音乐播放\n\n self.playAgain = False # 多次进行游戏\n self.initialToPlaying = False # 最初进入游戏的标志变量\n self.gameTurn = GameTurn.start # 游戏状态\n self.playerTurn = PlayerTurn.start # 玩家行动回合\n self.PCActKey = [False, False, False] # 存储PCAct按键,依次是K_b, K_a, K_d\n self.cheat = [0, 0, 0] # 开发者模式标记\n self.spaceKeyDown = pygame.event # 用于存储空格被按下的事件\n self.winner = \"\" # 胜利者\n\n self.start = pygame.image.load(\"./source/Start.png\").convert_alpha()\n self.gameFail = pygame.image.load(\"./source/GameFail.png\").convert_alpha()\n self.gameWin = pygame.image.load(\"./source/GameWin.png\").convert_alpha()\n self.gameMap = pygame.image.load(\"./source/Map.png\").convert_alpha()\n self.tips = pygame.image.load(\"./source/Tips.png\").convert_alpha()\n\n self.musicOn = pygame.image.load(\"./source/MusicOn.png\").convert_alpha()\n self.musicOff = pygame.image.load(\"./source/MusicOff.png\").convert_alpha()\n self.musicImage = self.musicOn\n self.musicBarrier = pygame.image.load(\"./source/MusicBarrier.png\").convert_alpha()\n self.musicButtonLocation = (41 * 25, 27 * 25)\n self.musicButtonRect = pygame.Rect(41 * 25, 27 * 25, 100, 75)\n\n self.activeOn = pygame.image.load(\"./source/ActiveOn.png\").convert_alpha()\n self.activeOff = pygame.image.load(\"./source/ActiveOff.png\").convert_alpha()\n\n self.PCName = \"\"\n self.PCImage = pygame.image.load(\"./source/PC.png\").convert_alpha()\n self.PCFixImage = pygame.image.load(\"./source/PCFix.png\").convert_alpha()\n self.PCBoard = [(5 * 25, 4 * 25 + 12), (5 * 25, 8 * 25)]\n\n self.NPCName = \"\"\n self.NPCImage = pygame.image.load(\"./source/NPC.png\").convert_alpha()\n self.NPCFixImage = pygame.image.load(\"./source/NPCFix.png\").convert_alpha()\n self.NPCBoard = [(27 * 25, 4 * 25 + 12), (27 * 25, 8 * 25)]\n\n self.diceImages = list()\n for i in range(6):\n self.diceImages.append(pygame.image.load(\"./source/Dice\" + str(i + 1) + \".png\").convert_alpha())\n self.diceBarrier = pygame.image.load(\"./source/DiceBarrier.png\").convert_alpha()\n self.diceBoard = [(22 * 25, 18 * 25 + 5), (19 * 25 + 12, 18 * 25 + 5), (24 * 25 + 13, 18 * 25 + 5)]\n self.diceLocation = list()\n self.diceLocation.append(self.diceBoard[0])\n self.diceSteps = (0, 0, True)\n\n self.cattleOfPC = list()\n self.wasteland = pygame.image.load(\"./source/Wasteland.png\").convert_alpha()\n self.cattleOfPC.append(self.wasteland)\n for i in range(5):\n self.cattleOfPC.append(pygame.image.load(\"./source/HouseA\" + str(i + 1) + \".png\").convert_alpha())\n\n self.cattleOfNPC = list()\n self.cattleOfNPC.append(self.wasteland)\n for i in range(5):\n self.cattleOfNPC.append(pygame.image.load(\"./source/HouseB\" + str(i + 1) + \".png\").convert_alpha())\n \n def event_deal(self):\n for event in pygame.event.get():\n # 退出游戏\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_e):\n self.__quit()\n\n # 键盘事件\n if event.type == pygame.KEYDOWN:\n # 重玩游戏\n if event.key == pygame.K_r:\n self.playAgain = True\n\n # 进入游戏Play界面\n elif event.key == pygame.K_SPACE and self.gameTurn is GameTurn.waitIn:\n self.gameTurn = GameTurn.initial\n self.spaceKeyDown = event\n\n # 玩家回合切换\n elif event.key == pygame.K_SPACE and self.gameTurn is GameTurn.playing:\n if self.playerTurn is PlayerTurn.start:\n self.playerTurn = PlayerTurn.PCMove\n\n # 开发者模式\n elif self.gameTurn is GameTurn.playing and self.__developer_pattern_check() is False:\n self.__set_developer_pattern(event.key)\n\n # PCAct按键检测\n self.__set_PC_act_key(event.key)\n\n # 音乐播放控制的鼠标事件\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == pygame.BUTTON_LEFT:\n self.__music_pause(event.pos)\n\n def get_character_name(self, PC, NPC):\n self.PCName = PC\n self.NPCName = NPC\n\n @staticmethod\n def update():\n pygame.display.update()\n\n def draw_beginning(self):\n for i in range(150):\n self.clock.tick(300)\n self.screen.blit(self.gameMap, (0, 0))\n self.screen.blit(self.PCFixImage, (6 * 25 + 13 + i * 1 - 10, 6 * 25 + 13 - 10))\n self.screen.blit(self.NPCFixImage, (38 * 25 + 13 - i * 1 - 10, 6 * 25 + 13 - 10))\n self.screen.blit(self.start, (175, 325))\n self.screen.blit(self.musicImage, (41 * 25, 27 * 25))\n pygame.display.update()\n\n def draw_map(self):\n self.screen.blit(self.gameMap, (0, 0))\n\n def draw_character(self, PC_pos, NPC_pos):\n # 动态位置绘制\n if self.playerTurn is PlayerTurn.PCAct or self.playerTurn is PlayerTurn.NPCMove or \\\n self.playerTurn is PlayerTurn.start or self.initialToPlaying is True:\n self.screen.blit(self.NPCImage, self.__location_convert(NPC_pos))\n self.screen.blit(self.PCImage, self.__location_convert(PC_pos))\n else:\n self.screen.blit(self.PCImage, self.__location_convert(PC_pos))\n self.screen.blit(self.NPCImage, self.__location_convert(NPC_pos))\n # 固定位置绘制\n self.screen.blit(self.PCFixImage, (19 * 25 - 21, 4 * 25))\n self.screen.blit(self.NPCFixImage, (41 * 25 - 21, 4 * 25))\n\n def draw_active(self):\n if self.playerTurn is PlayerTurn.PCMove or self.playerTurn is PlayerTurn.PCAct:\n self.screen.blit(self.activeOn, (4 * 25, 4 * 25))\n self.screen.blit(self.activeOn, (4 * 25, 8 * 25))\n self.screen.blit(self.activeOff, (26 * 25, 4 * 25))\n self.screen.blit(self.activeOff, (26 * 25, 8 * 25))\n elif self.playerTurn is PlayerTurn.NPCMove or self.playerTurn is PlayerTurn.NPCAct:\n self.screen.blit(self.activeOff, (4 * 25, 4 * 25))\n self.screen.blit(self.activeOff, (4 * 25, 8 * 25))\n self.screen.blit(self.activeOn, (26 * 25, 4 * 25))\n self.screen.blit(self.activeOn, (26 * 25, 8 * 25))\n else:\n self.screen.blit(self.activeOff, (4 * 25, 4 * 25))\n self.screen.blit(self.activeOff, (4 * 25, 8 * 25))\n self.screen.blit(self.activeOff, (26 * 25, 4 * 25))\n self.screen.blit(self.activeOff, (26 * 25, 8 * 25))\n\n def draw_land(self, all_lands):\n for land in all_lands:\n if land.owner == self.PCName:\n self.screen.blit(self.cattleOfPC[land.level], self.__location_convert(land.position))\n elif land.owner == self.NPCName:\n self.screen.blit(self.cattleOfNPC[land.level], self.__location_convert(land.position))\n else:\n self.screen.blit(self.wasteland, self.__location_convert(land.position))\n\n def draw_tips(self):\n if self.gameTurn is not GameTurn.over:\n self.screen.blit(self.tips, (3 * 25, 27 * 25))\n\n def __developer_pattern_check(self):\n if self.cheat[0] * self.cheat[1] * self.cheat[2] == 1:\n return True\n return False\n\n def __set_developer_pattern(self, event_key):\n if event_key == pygame.K_d:\n self.cheat[0] = 1\n if self.cheat[0] == 1 and event_key == pygame.K_w:\n self.cheat[1] = 1\n if self.cheat[1] == 1 and event_key == pygame.K_q:\n self.cheat[2] = 1\n\n def __set_PC_act_key(self, event_key):\n if event_key == pygame.K_b:\n self.PCActKey[0] = True\n if event_key == pygame.K_a:\n self.PCActKey[1] = True\n if event_key == pygame.K_d:\n self.PCActKey[2] = True\n\n def draw_messages(self, messages):\n PC_messages = messages[0]\n for i in range(len(PC_messages[0])):\n for j in range(len(PC_messages[0][i])):\n self.screen.blit(self.font.render(PC_messages[0][i][j], True, [0, 0, 0]),\n (self.PCBoard[0][0] + j * 25 * 6, self.PCBoard[0][1] + i * 25))\n for i in range(len(PC_messages[1])):\n self.screen.blit(self.font.render(PC_messages[1][i], True, [0, 0, 255]),\n (self.PCBoard[1][0], self.PCBoard[1][1] + i * 25))\n\n NPC_messages = messages[1]\n if self.__developer_pattern_check() is True:\n for i in range(len(NPC_messages[0])):\n for j in range(len(NPC_messages[0][i])):\n self.screen.blit(self.font.render(NPC_messages[0][i][j], True, [0, 0, 0]),\n (self.NPCBoard[0][0] + j * 25 * 6, self.NPCBoard[0][1] + i * 25))\n for i in range(len(NPC_messages[1])):\n self.screen.blit(self.font.render(NPC_messages[1][i], True, [0, 0, 255]),\n (self.NPCBoard[1][0], self.NPCBoard[1][1] + i * 25))\n else:\n self.screen.blit(self.font.render(\"依次按下DWQ进入开发者模式\", True, [0, 0, 255]), (27 * 25, 7 * 25))\n self.screen.blit(self.font.render(\"查看NPC的状态\", True, [0, 0, 255]), (27 * 25, 8 * 25))\n\n def __music_pause(self, mouse_pos):\n if self.backgroundMusic.pause(mouse_pos) is True:\n self.__draw_music_button_change(self.backgroundMusic.isPlaying)\n\n def draw_music_button(self):\n self.screen.blit(self.musicImage, self.musicButtonLocation)\n\n def __draw_music_button_change(self, is_playing):\n self.musicImage = self.musicOn if is_playing is True else self.musicOff\n self.screen.blit(self.musicImage, self.musicButtonLocation) # Button Down\n pygame.display.update(self.musicButtonRect)\n pygame.time.delay(100)\n self.screen.blit(self.musicBarrier, self.musicButtonLocation)\n self.screen.blit(self.musicImage, self.musicButtonLocation) # Button Up\n pygame.display.update(self.musicButtonRect)\n\n def __set_dice_location(self):\n self.diceLocation = []\n if self.diceSteps[1] == 0:\n self.diceLocation.append(self.diceBoard[0])\n else:\n self.diceLocation.append(self.diceBoard[1])\n self.diceLocation.append(self.diceBoard[2])\n\n def draw_dice(self, final_points, random_series):\n if self.diceSteps[2] is False and \\\n (self.playerTurn is PlayerTurn.PCMove or self.playerTurn is PlayerTurn.NPCMove):\n self.__set_dice_location()\n len_of_dice_location = len(self.diceLocation)\n self.clock.tick(5 * len_of_dice_location)\n for rand in random_series:\n self.screen.blit(self.diceBarrier, (18 * 25, 17 * 25))\n for i in range(len_of_dice_location):\n self.screen.blit(self.diceImages[rand], self.diceLocation[i])\n pygame.display.update()\n self.screen.blit(self.diceBarrier, (18 * 25, 17 * 25))\n for i in range(len_of_dice_location):\n self.screen.blit(self.diceImages[final_points[i] - 1], self.diceLocation[i])\n pygame.display.update()\n pygame.time.delay(700)\n\n def draw_fix_dice(self, final_points):\n for i in range(len(self.diceLocation)):\n self.screen.blit(self.diceImages[final_points[i] - 1], self.diceLocation[i])\n\n def turn_change(self):\n if self.playerTurn is PlayerTurn.PCMove:\n self.playerTurn = PlayerTurn.NPCMove\n elif self.playerTurn is PlayerTurn.NPCMove:\n self.playerTurn = PlayerTurn.PCAct\n elif self.playerTurn is PlayerTurn.PCAct:\n self.playerTurn = PlayerTurn.NPCAct\n elif self.playerTurn is PlayerTurn.NPCAct:\n self.playerTurn = PlayerTurn.start\n self.initialToPlaying = False\n pygame.event.post(self.spaceKeyDown) # 抛出一个空格被按下的事件,提高游戏的操作简便性\n\n def game_end_check(self, PC_money, NPC_money):\n if (PC_money <= 0 or NPC_money <= 0) and self.gameTurn is not GameTurn.end:\n self.winner = self.NPCName if PC_money < NPC_money else self.PCName\n self.gameTurn = GameTurn.over\n\n def draw_game_end(self):\n if self.gameTurn is GameTurn.over:\n self.gameTurn = GameTurn.end\n self.screen.blit(self.diceBarrier, (18 * 25, 17 * 25))\n if self.winner == self.PCName:\n self.screen.blit(self.gameWin, (175, 325))\n else:\n self.screen.blit(self.gameFail, (175, 325))\n\n def play_again_check(self):\n if self.playAgain is True:\n self.playAgain = not self.playAgain\n self.gameTurn = GameTurn.start\n self.playerTurn = PlayerTurn.start\n self.PCActKey = [False, False, False]\n self.cheat = [0, 0, 0]\n self.diceLocation = list()\n self.diceLocation.append(self.diceBoard[0])\n self.diceSteps = (0, 0, True)\n\n @staticmethod\n def __location_convert(position):\n if position <= 15:\n return position * 75, 0\n if position <= 25:\n return 15 * 75, (position - 15) * 75\n if position <= 40:\n return (15 - (position - 25)) * 75, 10 * 75\n if position <= 49:\n return 0, (9 - (position - 41)) * 75\n\n def __quit(self):\n if self.backgroundMusic.isPlaying is True:\n pygame.mixer.music.fadeout(1500)\n pygame.time.delay(1500)\n pygame.quit()\n sys.exit(0)\n\n\ngameManager = GameManager() # 游戏各项初始化,素材载入\nwhile True:\n gameManager.event_deal() # 事件处理\n\n if gameManager.gameTurn is GameTurn.start:\n gameManager.draw_beginning() # 开始界面动画绘制\n gameManager.gameTurn = GameTurn.waitIn # 游戏状态切换\n\n if gameManager.gameTurn is GameTurn.initial:\n hero = NPC(\"Naruto\") # PC\n enemy = NPC(\"Sasuke\") # NPC\n lands = Landmasses(hero.name, enemy.name) # 地块初始化\n shootDice = ShootDice() # 骰子管理类\n gameManager.get_character_name(hero.name, enemy.name) # 获取玩家昵称\n gameManager.gameTurn = GameTurn.playing # 游戏状态切换\n gameManager.initialToPlaying = True\n\n # 测试代码\n hero.money = 10000\n enemy.money = 10000\n\n if gameManager.gameTurn is GameTurn.playing:\n gameManager.clock.tick(10) # 帧数\n gameManager.draw_map() # 地图绘制\n gameManager.draw_land(lands.lands) # 地块绘制\n gameManager.draw_character(hero.position, enemy.position) # 人物绘制\n gameManager.draw_active() # 活跃状态绘制\n gameManager.draw_messages((hero.messages(lands), enemy.messages(lands))) # 信息提示绘制\n gameManager.draw_fix_dice(shootDice.finalPoints) # 固定骰子在屏幕上\n gameManager.draw_music_button() # 音乐播放状态图片绘制\n gameManager.game_end_check(hero.money, enemy.money) # 游戏自然结束判定\n gameManager.draw_tips() # 常态操作提示绘制\n\n if gameManager.playerTurn is PlayerTurn.PCMove:\n gameManager.diceSteps = hero.move()\n enemy.money += hero.incidents(lands)\n shootDice.set_dice(gameManager.diceSteps)\n\n elif gameManager.playerTurn is PlayerTurn.PCAct:\n if lands.lands[hero.position].incident is Incidents.changeMoney and enemy.money > hero.money:\n temp = enemy.money\n enemy.money = hero.money\n hero.money = temp\n hero.chance = not enemy.chance\n elif hero.item == \"炸药\":\n now = hero.position\n if lands.lands[now].owner == enemy.name:\n hero.item = \"无\"\n for i in range(lands.lands[now].level):\n hero.houseCounter[i] -= 1\n lands.lands[now].bang()\n for i in range(5):\n hero.buy(lands.lands[now], lands.is_full(hero.name))\n elif hero.transportation == \"千里马\":\n forward = now + 1 if now < 49 else 0\n backward = now - 1 if now > 0 else 49\n if lands.lands[forward].owner == enemy.name:\n hero.item = \"无\"\n for i in range(lands.lands[forward].level):\n enemy.houseCounter[i] -= 1\n lands.lands[forward].bang()\n for i in range(5):\n hero.buy(lands.lands[forward], lands.is_full(hero.name))\n elif lands.lands[backward].owner == enemy.name:\n hero.item = \"无\"\n for i in range(lands.lands[backward].level):\n enemy.houseCounter[i] -= 1\n lands.lands[backward].bang()\n for i in range(5):\n hero.buy(lands.lands[backward], lands.is_full(hero.name))\n for i in range(5):\n hero.buy(lands.lands[hero.position], lands.is_full(hero.name))\n if hero.transportation == \"千里马\":\n hero.swift_horse_move(lands.lands, lands.is_full(hero.name))\n\n elif gameManager.playerTurn is PlayerTurn.NPCMove:\n gameManager.diceSteps = enemy.move()\n hero.money += enemy.incidents(lands)\n shootDice.set_dice(gameManager.diceSteps)\n\n elif gameManager.playerTurn is PlayerTurn.NPCAct:\n if lands.lands[enemy.position].incident is Incidents.changeMoney and hero.money > enemy.money:\n temp = hero.money\n hero.money = enemy.money\n enemy.money = temp\n enemy.chance = not enemy.chance\n elif enemy.item == \"炸药\":\n now = enemy.position\n if lands.lands[now].owner == hero.name:\n enemy.item = \"无\"\n for i in range(lands.lands[now].level):\n hero.houseCounter[i] -= 1\n lands.lands[now].bang()\n for i in range(5):\n enemy.buy(lands.lands[now], lands.is_full(enemy.name))\n elif enemy.transportation == \"千里马\":\n forward = now + 1 if now < 49 else 0\n backward = now - 1 if now > 0 else 49\n if lands.lands[forward].owner == hero.name:\n enemy.item = \"无\"\n for i in range(lands.lands[forward].level):\n hero.houseCounter[i] -= 1\n lands.lands[forward].bang()\n for i in range(5):\n enemy.buy(lands.lands[forward], lands.is_full(enemy.name))\n elif lands.lands[backward].owner == hero.name:\n enemy.item = \"无\"\n for i in range(lands.lands[backward].level):\n hero.houseCounter[i] -= 1\n lands.lands[backward].bang()\n for i in range(5):\n enemy.buy(lands.lands[backward], lands.is_full(enemy.name))\n for i in range(5):\n enemy.buy(lands.lands[enemy.position], lands.is_full(enemy.name))\n if enemy.transportation == \"千里马\":\n enemy.swift_horse_move(lands.lands, lands.is_full(enemy.name))\n\n gameManager.draw_dice(shootDice.finalPoints, shootDice.randomSeries) # 掷骰子动画\n gameManager.turn_change() # 玩家回合切换\n gameManager.draw_game_end() # 游戏结束时绘制游戏结束图片\n gameManager.update() # 画面绘制刷新\n\n gameManager.play_again_check() # 重玩游戏检查\n","sub_path":"OldProject/AlphaVersion/new - Alpha1 - Beta11.py","file_name":"new - Alpha1 - Beta11.py","file_ext":"py","file_size_in_byte":48243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"573417829","text":"# Item 6: Avoid Using start, end and stride in a Single Slice\n\n# Using a third parameter it is possible to select every n'th element in a slice operation\na = ['red', 'orange', 'yellow', 'green', 'blue', 'purple']\nodds = a[::2] # ['red', 'yellow', 'blue']\nevens = a[1::2] # ['orange', 'green', 'purple']\n\n# A common trick to reverse a byte string is the use of -1.\n# # This however does not work for unicode characters encoded as UTF-8 strings.\nx = b'mongoose'\ny = x[::-1] # b'esoognom'\n\n# While this is easy to understand, other slice operations are harder to understand, especially when all three\n# parameters are provided and/or when they are negative:\na = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\na[2::2] # ['c', 'e', 'g']\na[-2::-2] # ['g', 'e', 'c', 'a']\na[-2:2:-2] # ['g', 'e']\na[2:2:-2] # []\n\n# Such slice operations should be avoided to maintain readability. A better way is to separate into a slice and\n# a stride operation, e.g.:\nb = a[::2] # ['a', 'c', 'e', 'g']\nc = b[1:-1] # ['c', 'e']\n\n# If this method is too costly from a memory or runtime perspective, 'islice' of the package 'itertools' poses\n# an alternative that doesn't permit negative start/end/stride values.\n\n\n\n\n\n","sub_path":"item6.py","file_name":"item6.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"503342162","text":"import json\nimport random\nimport datetime\n\nfrom django.http import JsonResponse\nfrom django.views import View\nfrom django.db.models import Q\nfrom dateutil.relativedelta import relativedelta\n\nfrom product.models import (\n MainCategory, \n SubCategory,\n Product,\n ProductInformation,\n ProductTag\n)\n\nclass CategoryView(View):\n def get(self, request):\n try:\n main_categories = MainCategory.objects.prefetch_related('subcategory_set').all()\n if main_categories.exists():\n categories = [{ \n 'id' : main_category.id,\n 'name' : main_category.name,\n 'imageUrl' : main_category.image_url,\n 'imageActiveUrl' : main_category.image_active_url,\n 'sub_categories' : [{\n 'id' : sub_category.id,\n 'name' : sub_category.name\n } for sub_category in main_category.subcategory_set.all()]\n } for main_category in main_categories]\n\n return JsonResponse({'message':'SUCCESS', 'categories':categories}, status=200)\n else:\n return JsonResponse({'message':'This category does not exist.'}, status=404)\n\n except ValueError:\n return JsonResponse({'message':'VALUE_ERROR'}, status=400)\n\n\nclass ProductListView(View):\n def get(self, request):\n try:\n main_category_id = request.GET.get('main')\n sub_category_id = request.GET.get('sub') \n ordering = request.GET.get('ordering') \n search = request.GET.get('search')\n products = Product.objects.select_related('sub_category', 'sub_category__main_category', 'discount')\n \n filters = {}\n\n if main_category_id:\n filters['sub_category__main_category__id'] = main_category_id\n \n product = products.filter(**filters).first()\n\n main_category = {\n 'id' : product.sub_category.main_category.id,\n 'name' : product.sub_category.main_category.name,\n 'imageUrl' : product.sub_category.main_category.image_active_url\n }\n\n sub_categories = [{\n 'id' : sub_category.id,\n 'name' : sub_category.name\n } for sub_category in SubCategory.objects.filter(main_category_id=main_category_id)]\n\n if sub_category_id:\n filters['sub_category_id'] = sub_category_id\n\n sort_type_set = {\n '0' : 'id',\n '1' : '-create_time',\n '2' : '-sales_count',\n '3' : 'price',\n '4' : '-price' \n }\n\n sortings = [\n { \n 'id' : 0, \n 'name' : '추천순'\n },\n { \n 'id' : 1, \n 'name' : '신상품순'\n },\n {\n 'id' : 2, \n 'name' : '인기상품순'\n },\n {\n 'id' : 3, \n 'name' : '낮은 가격순'\n },\n {\n 'id' : 4, \n 'name' : '높은 가격순'\n } \n ]\n\n if ordering in sort_type_set:\n products = products.order_by('is_sold_out', sort_type_set[ordering])\n\n q = Q()\n\n if search:\n q &= Q(name__contains=search) | Q(content__contains=search) | Q(productinformation__information__contains=search)\n\n products = [{\n 'id' : product.id,\n 'name' : product.name,\n 'content' : product.content,\n 'imageUrl' : product.image_url,\n 'isSoldOut' : product.is_sold_out,\n 'discountPercent' : product.discount.discount_percent,\n 'discountName' : product.discount.name,\n 'discountContent' : product.discount.discount_content,\n 'discountPrice' : product.price - int(product.price * product.discount.discount_percent * 0.01),\n 'originalPrice' : product.price\n } for product in products.filter(q, **filters)]\n \n if search: \n return JsonResponse({'message':'SUCCESS', 'products':products}, status=200)\n\n except ValueError:\n return JsonResponse({'message':'VALUE_ERROR'}, status=400)\n return JsonResponse({'message':'SUCCESS', 'mainCategories':main_category, 'subCategories':sub_categories, 'sortings':sortings,'products':products}, status=200)\n\n\nclass MdChoiceView(View):\n def get(self, request):\n try:\n main_category_id = request.GET.get('category', None)\n products = Product.objects.select_related('sub_category', 'sub_category__main_category', 'discount')\n\n products = [{\n 'id' : product.id,\n 'name' : product.name,\n 'content' : product.content,\n 'imageUrl' : product.image_url,\n 'isSoldOut' : product.is_sold_out,\n 'discountPercent' : product.discount.discount_percent,\n 'discountName' : product.discount.name,\n 'discountContent' : product.discount.discount_content,\n 'discountPrice' : product.price - int(product.price * product.discount.discount_percent * 0.01),\n 'originalPrice' : product.price\n } for product in products.filter(Q(sub_category__main_category__id=main_category_id)).order_by('?', 'is_sold_out')[:random.randint(10,12)]]\n\n except ValueError:\n return JsonResponse({'message':'VALUE_ERROR'}, status=400)\n return JsonResponse({'message':'SUCCESS', 'products':products}, status=200)\n\n\nclass ProductDetailView(View):\n def get(self, request, product_id):\n try:\n product = Product.objects.select_related('discount').prefetch_related('productinformation_set').get(id=product_id)\n product.sales_count += 1\n product.save()\n\n if product.productinformation_set.exists():\n product_information = product.productinformation_set.get()\n else:\n product_information = False\n\n product_detail = {\n 'id' : product.id,\n 'name' : product.name,\n 'content' : product.content,\n 'imageUrl' : product.image_url,\n 'discountPercent' : product.discount.discount_percent,\n 'discountName' : product.discount.name,\n 'discountContent' : product.discount.discount_content,\n 'discountPrice' : product.price - int(product.price * product.discount.discount_percent * 0.01),\n 'originalPrice' : product.price,\n 'salesUnit' : product_information.sales_unit if product_information else '',\n 'size' : product_information.size if product_information else '',\n 'otherInformation' : [\n {\n 'category' :'배송구분', \n 'description':'/'.join([shipping.shipping_classification.name for shipping in product_information.productshipping_set.all()]) if product_information else \"\"\n },\n {\n 'category' :'원산지', \n 'description': product_information.origin if product_information else ''\n },\n {\n 'category' :'포장타입', \n 'description': product_information.packing_type.name if product_information else ''\n },\n {\n 'category' :'유통기한', \n 'description': product_information.shelf_life if product_information else ''\n },\n {\n 'category' :'알레르기정보',\n 'description': product_information.allergy_information if product_information else ''\n },\n {\n 'category' :'안내사항', \n 'description': product_information.information if product_information else ''\n }\n ]\n }\n \n except ValueError:\n return JsonResponse({'message':'VALUE_ERROR'}, status=400)\n except Product.DoesNotExist:\n return JsonResponse({'message':'Product does not exist'}, status=404)\n return JsonResponse({'message':'SUCCESS', 'product_detail':product_detail}, status=200)\n\n\nclass MainPageSectionView(View):\n def get(self, request):\n try:\n\n section_types = [\n '?',\n '-create_time',\n 'sales_count',\n '-discount__discount_percent',\n '?'\n ]\n\n section_list = [{\n 'products' : [{\n 'id' : product.id,\n 'name' : product.name,\n 'content' : product.content,\n 'imageUrl' : product.image_url,\n 'isSoldOut' : product.is_sold_out,\n 'discountPercent' : product.discount.discount_percent,\n 'discountName' : product.discount.name,\n 'discountContent' : product.discount.discount_content,\n 'discountPrice' : product.price - int(product.price * product.discount.discount_percent * 0.01),\n 'originalPrice' : product.price\n } for product in Product.objects.select_related('discount').filter(is_sold_out=False).order_by(section_type)[:random.randint(10,12)]]\n }for section_type in section_types]\n\n except ValueError:\n return JsonResponse({'message':'VALUE_ERROR'}, status=400)\n return JsonResponse({'message':'SUCCESS', 'section_list':section_list}, status=200)\n\n\nclass RelatedProductView(View):\n def get(self, request, product_id):\n try:\n sub_category_id = Product.objects.get(pk=product_id).sub_category.id\n related_products = [{\n 'id' : product.id,\n 'name' : product.name,\n 'imageUrl' : product.image_url,\n 'originalPrice' : product.price\n } for product in Product.objects.filter(sub_category_id=sub_category_id, is_sold_out=False).order_by('?')]\n\n except ValueError:\n return JsonResponse({'message':'VALUE_ERROR'}, status=400)\n return JsonResponse({'message':'SUCCESS', 'related_products':related_products}, status=200)\n\n\nclass HomeProductView(View):\n def get(self, request):\n try:\n product_type = request.GET.get('type')\n ordering = request.GET.get('ordering')\n products = Product.objects.select_related('discount')\n\n if product_type == 'new':\n products = products.filter(create_time__gt=datetime.datetime.now() - relativedelta(months=1))\n\n if product_type == 'best':\n products = products.filter(sales_count__gt=3000)\n\n sort_type_set = {\n '0' : '-create_time',\n '1' : '-sales_count',\n '2' : 'price',\n '3' : '-price' \n }\n \n sortings = [\n { \n 'id' : 0, \n 'name' : '신상품순',\n },\n { \n 'id' : 1, \n 'name' : '인기상품순',\n },\n {\n 'id' : 2, \n 'name' : '낮은 가격순',\n },\n {\n 'id' : 3, \n 'name' : '높은 가격순',\n } \n ]\n\n new_products = [{\n 'id' : product.id,\n 'name' : product.name,\n 'content' : product.content,\n 'imageUrl' : product.image_url,\n 'isSoldOut' : product.is_sold_out,\n 'discountPercent' : product.discount.discount_percent,\n 'discountName' : product.discount.name,\n 'discountContent' : product.discount.discount_content,\n 'discountPrice' : product.price - int(product.price * product.discount.discount_percent * 0.01),\n 'originalPrice' : product.price\n } for product in products.order_by('is_sold_out', sort_type_set[ordering])]\n\n except ValueError:\n return JsonResponse({'message':'VALUE_ERROR'}, status=400)\n return JsonResponse({'message':'SUCCESS', 'sortings':sortings, 'new_products':new_products}, status=200)\n \n\nclass SaleProductView(View):\n def get(self, request):\n try:\n ordering = request.GET.get('ordering')\n\n sort_type_set = {\n '0' : '-discount__discount_percent',\n '1' : '-create_time',\n '2' : '-sales_count',\n '3' : 'price',\n '4' : '-price'\n }\n \n sortings = [\n { \n 'id' : 0, \n 'name' : '혜택순',\n },\n { \n 'id' : 1, \n 'name' : '신상품순',\n },\n {\n 'id' : 2, \n 'name' : '인기상품순',\n },\n {\n 'id' : 3, \n 'name' : '낮은 가격순',\n },\n {\n 'id' : 4, \n 'name' : '높은 가격순'\n } \n ]\n \n sale_products = [{\n 'id' : product.id,\n 'name' : product.name,\n 'content' : product.content,\n 'imageUrl' : product.image_url,\n 'isSoldOut' : product.is_sold_out,\n 'discountPercent' : product.discount.discount_percent,\n 'discountName' : product.discount.name,\n 'discountContent' : product.discount.discount_content,\n 'discountPrice' : product.price - int(product.price * product.discount.discount_percent * 0.01),\n 'originalPrice' : product.price\n } for product in Product.objects.select_related('discount').exclude(discount__discount_percent=0).order_by('is_sold_out', sort_type_set[ordering])]\n\n except ValueError:\n return JsonResponse({'message':'VALUE_ERROR'}, status=400)\n return JsonResponse({'message':'SUCCESS', 'sortings':sortings, 'sale_products':sale_products}, status=200)","sub_path":"product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"167521004","text":"# http://www.spoj.com/problems/ABCPATH/\nfrom collections import deque\n\nALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\n\ndx = [1, 1, 0, -1, -1, -1, 0, 1]\ndy = [0, -1, -1, -1, 0, 1, 1, 1]\n\n\ndef calculate(grid):\n queue = deque()\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 'A':\n queue.append((i, j))\n\n visited = set()\n\n level = 0\n while len(queue):\n size = len(queue)\n\n while size:\n y, x = queue.popleft()\n\n visited.add((y, x))\n size -= 1\n\n for i in range(8):\n new_y, new_x = y + dy[i], x + dx[i]\n if (\n new_y >= 0 and new_y < len(grid) and\n new_x >= 0 and new_x < len(grid[0]) and\n (new_y, new_x) not in visited and\n grid[new_y][new_x] == ALPHABET[ALPHABET.index(grid[y][x]) + 1]\n ):\n queue.append((new_y, new_x))\n\n level += 1\n\n return level\n\n\ndef actions():\n # import os\n # with open(os.path.join(os.path.curdir, 'tests/0008_test_case.txt')) as f:\n # while True:\n # yield f.readline().split('\\n')[0]\n while True:\n yield input()\n\n\nif __name__ == '__main__':\n case = 1\n go = actions()\n while True:\n height, width = map(int, next(go).split())\n\n if (height, width) == (0, 0):\n break\n\n grid = []\n for i in range(height):\n grid.append(next(go))\n\n print(\"Case {}: {}\".format(case, calculate(grid)))\n\n case += 1\n","sub_path":"spoj/0008_ABCPATH.py","file_name":"0008_ABCPATH.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"59566471","text":"import numpy as np\n\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.layers import LSTM, Dense, Dropout, Embedding, Input\nfrom keras.layers.merge import concatenate\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.wrappers import Bidirectional\nfrom keras.models import Model\nfrom keras.optimizers import Nadam\n\nhyperparam_opts = {\n 'lstm_size': np.arange(100, 550, 25).tolist(),\n 'dense_size': np.arange(100, 550, 25).tolist(),\n 'dropout_p': np.around(np.arange(0, 0.8, 0.1), 3).tolist(),\n 'rec_dropout_p': np.around(np.arange(0, 0.8, 0.1), 3).tolist(),\n 'batch_size': [512],\n 'activation': ['relu', 'tanh', 'elu'],\n 'bidirectional': [True, False],\n 'lstm_depth': [1, 2],\n 'dense_depth': [1, 2, 3],\n 'batchnorm': [True, False],\n 'lr': [0.0005, 0.001, 0.002, 0.005]\n}\n\n\ndef train(x1, x2, y, val_x1, val_x2,\n val_y, embeddings, stamp, exp_num, **hyperparams):\n\n val_weights = np.ones(len(val_y))\n val_weights *= 0.472001959\n val_weights[val_y == 0] = 1.309028344\n\n n_tokens = embeddings.shape[0]\n embedding_size = embeddings.shape[1]\n maxlen = x1.shape[1]\n\n lstm_size = hyperparams['lstm_size']\n dense_size = hyperparams['dense_size']\n dropout_p = hyperparams['dropout_p']\n rec_dropout_p = hyperparams['rec_dropout_p']\n batch_size = hyperparams['batch_size']\n activation = hyperparams['activation']\n bidirectional = hyperparams['bidirectional']\n lstm_depth = hyperparams['lstm_depth']\n dense_depth = hyperparams['dense_depth']\n batchnorm = hyperparams['batchnorm']\n lr = hyperparams['lr']\n\n embedding = Embedding(n_tokens, embedding_size,\n input_length=maxlen, weights=[embeddings],\n trainable=False, mask_zero=False)\n\n lstms = []\n for i in range(lstm_depth):\n return_sequences = i != lstm_depth - 1\n lstm = LSTM(\n lstm_size, dropout=rec_dropout_p,\n recurrent_dropout=rec_dropout_p, return_sequences=return_sequences)\n if bidirectional:\n lstm = Bidirectional(lstm)\n lstms.append(lstm)\n\n q2_in = Input(shape=[maxlen], dtype='int32')\n q2_vec = embedding(q2_in)\n for lstm in lstms:\n q2_vec = lstm(q2_vec)\n\n q1_in = Input(shape=[maxlen], dtype='int32')\n q1_vec = embedding(q1_in)\n for lstm in lstms:\n q1_vec = lstm(q1_vec)\n\n q2_in = Input(shape=[maxlen], dtype='int32')\n q2_vec = embedding(q2_in)\n for lstm in lstms:\n q2_vec = lstm(q2_vec)\n\n net = concatenate([q1_vec, q2_vec])\n net = Dropout(dropout_p)(net)\n if batchnorm:\n net = BatchNormalization()(net)\n\n for i in range(dense_depth):\n net = Dense(dense_size, activation=activation)(net)\n net = Dropout(dropout_p)(net)\n if batchnorm:\n net = BatchNormalization()(net)\n\n y_hat = Dense(1, activation='sigmoid')(net)\n\n model = Model(inputs=[q1_in, q2_in], outputs=y_hat)\n optim = Nadam(lr=lr)\n model.compile(loss='binary_crossentropy', optimizer=optim, metrics=['acc'])\n\n early_stopping = EarlyStopping(monitor='val_loss', patience=3)\n ckpt_path = 'params/exp_{}.ckpt'.format(exp_num)\n ckpt = ModelCheckpoint(\n ckpt_path, save_best_only=True, save_weights_only=True)\n\n hist = model.fit([x1, x2], y,\n validation_data=([val_x1, val_x2], val_y, val_weights),\n epochs=200, batch_size=batch_size, shuffle=True,\n callbacks=[early_stopping, ckpt],\n class_weight={0: 1.309028344, 1: 0.472001959})\n\n best_val = min(hist.history['val_loss'])\n return best_val, {'model_fnm': ckpt_path, 'hist': hist.history}\n","sub_path":"models/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"471009331","text":"import discord\nimport datetime\nimport requests\nimport typing as t\nimport time\nfrom typing import Optional\nfrom discord.ext import commands\nfrom discord import Member\nfrom config import *\n\nclass Info(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.cooldown(1, 5, commands.BucketType.user)\n @commands.command()\n async def ping(self, ctx):\n\n time1 = time.time()\n\n msg = await ctx.message.reply(embed=discord.Embed(title = \"Pinging...\", color=0x00FFFF))\n\n embed = discord.Embed(\n title = \"Pong!\",\n description = f\"\"\"\nAPI Latency: **{round(self.client.latency * 1000)}ms**\nBot Latency: **{round((time.time() - time1) * 1000)}ms**\n \"\"\",\n color = 0x00FFFF\n )\n embed.set_footer(text=\"Experiencing lag issues? Join our support server!\")\n\n await msg.edit(embed=embed)\n\n @commands.command()\n async def credits(self, ctx):\n embed = discord.Embed(\n title = \"Credits\",\n description = \"This bot wouldn't have been possible without them!\",\n color = MAIN_COLOR\n )\n embed.add_field(\n name=\"Contributors\",\n value=\"\"\"\n- [`Nirlep_5252_`](https://github.com/Nirlep5252) - Owner\n- [`TheUndeadBowman`](https://github.com/TheUndeadBowman) - Supporter, Helper\n- [`CAT`](https://github.com/KittyKart) - Supporter, Helper\n- [`Craftzman7`](https://github.com/Craftzman7) - Helper\n- [`Motzumoto`](https://github.com/Motzumoto) - Helper\n- [`WindowsCmd`](https://github.com/WindowsCmd) - Supporter, Helper\n- [`imkrvishal`](https://github.com/imkrvishal) - Helper\n- [`ELEXR`](https://github.com/ELEXR) - Supporter, Helper\n \"\"\",\n inline=False\n )\n embed.add_field(\n name=\"Other Credits\",\n value=\"\"\"\n- [`Hexbot`](https://github.com/1Prototype1/HexBot) - Game Commands\n \"\"\",\n inline=False\n )\n embed.set_footer(text=\"They are amazing! 💖\")\n\n await ctx.message.reply(embed=embed)\n\n @commands.cooldown(1, 5, commands.BucketType.user)\n @commands.command(aliases = ['guildinfo', 'server_info', 'guild_info'])\n async def serverinfo(self, ctx):\n\n embed = discord.Embed(title = f\"Server Information about **{ctx.guild}**\", color = 0x00FFFF)\n embed.set_author(name = ctx.guild, icon_url = ctx.guild.icon_url)\n embed.add_field(name = \"<:EpicOwner:794075390980653106> Owner:\", value = f\"{ctx.guild.owner.mention}\", inline = True)\n embed.add_field(name = \"🌏 Region:\", value = f\"{str(ctx.guild.region).title()}\", inline = True)\n embed.add_field(name = \"⏰ Created At:\", value = ctx.guild.created_at.strftime(\"%d/%m/%y | %H:%M:%S\"), inline = True)\n embed.add_field(name = \"<:EpicMembers:794075799422238720> Members:\", value = len(ctx.guild.members), inline = True)\n embed.add_field(name = \"👨 Humans:\", value = len(list(filter(lambda m: not m.bot, ctx.guild.members))), inline = True)\n embed.add_field(name = \"🤖 Bots:\", value = len(list(filter(lambda m: m.bot, ctx.guild.members))), inline = True)\n embed.add_field(name = \"<:EpicTextChannel:794076501208465469> Text Channels:\", value = f\"{len(ctx.guild.text_channels)}\", inline = True)\n embed.add_field(name = \"<:EpicVoiceChannel:794076949541814302> Voice Channels:\", value = len(ctx.guild.voice_channels), inline = True)\n try:\n embed.add_field(name = \"💤 AFK Channel\", value = ctx.guild.afk_channel, inline = True)\n embed.add_field(name = \"💤 AFK Timeout\", value = f\"{ctx.guild.afk_timeout}s\", inline = True)\n embed.add_field(name = \"🛡️ Moderation Level:\", value = str(ctx.guild.verification_level).title())\n embed.add_field(name = \"<:EpicInvite:794081254156140554> Invites\", value = len(await ctx.guild.invites()))\n embed.add_field(name = \"<:EpicRules:794079278639349781> Rules Channel:\", value = ctx.guild.rules_channel.mention, inline = True)\n except:\n pass\n try:\n embed.add_field(name = \"🟠 Roles\", value = len(ctx.guild.roles), inline = True)\n except:\n pass\n try:\n embed.add_field(name = \"😊 Emojis\", value = len(ctx.guild.emojis), inline = True)\n except:\n pass\n embed.add_field(name = \"<:EpicBoost:794078431721291797> Server Boosts\", value = ctx.guild.premium_subscription_count, inline = True)\n try:\n embed.add_field(name = \"<:EpicBoost:794078431721291797> Server Boosters\", value = len(ctx.guild.premium_subscribers), inline = True)\n except:\n pass\n embed.add_field(name = \"🔗 Server Icon\", value = f\"[Click Here]({ctx.guild.icon_url})\", inline = True)\n embed.add_field(name = \"🆔 Server ID:\", value = f\"{ctx.guild.id}\", inline = True)\n\n embed.add_field(name = \"‎\",\n value = \"[Invite Bot](https://discord.com/oauth2/authorize?client_id=751100444188737617&scope=bot&permissions=2146958847) | [Discord Server](https://discord.gg/Zj7h8Fp) | [Bug Report](https://docs.google.com/forms/d/1PYkQSB0rMSfZePp7o_iqC1cfecnvlys62GGhfHt9OYo)\",\n inline = False)\n embed.set_footer(text=f\"{ctx.author.guild}\", icon_url=f\"{ctx.guild.icon_url}\")\n embed.timestamp = datetime.datetime.utcnow()\n await ctx.send(embed = embed)\n\n @commands.cooldown(1, 5, commands.BucketType.user)\n @commands.command(aliases = ['profile', 'pfp'])\n async def avatar(self, ctx, target: Optional[Member]):\n target = target or ctx.author\n\n embed = discord.Embed(title = f\"**Avatar of {target.name}#{target.discriminator}**\", color = 0x00FF0C)\n embed.set_image(url = target.avatar_url)\n embed.set_footer(text=f\"{ctx.guild}\", icon_url=f\"{ctx.guild.icon_url}\")\n embed.timestamp = datetime.datetime.utcnow()\n\n await ctx.send(embed = embed)\n\n @commands.cooldown(1, 5, commands.BucketType.user)\n @commands.command()\n async def botinfo(self, ctx):\n embed = discord.Embed(title = \"**Bot Info**\", description = f\"I was built by `Nirlep_5252_` on 3rd September. My help command is `e!help`. I am currently in `{len(self.client.guilds)}` servers, and i have more than `{len(set(self.client.get_all_members()))}` users. I have a total of `103` commands.\", color = 0x00FFFF)\n embed.set_thumbnail(url='https://media.discordapp.net/attachments/757168151141285929/763336446328438784/bot_profile.png')\n embed.add_field(name = \"**Invite EpicBot**\",\n value = f\"[Click Here](https://discord.com/oauth2/authorize?client_id=751100444188737617&scope=bot&permissions=2146958847)\",\n inline = True)\n embed.add_field(name = \"**Support Server**\",\n value = f\"[Click Here](https://discord.gg/Zj7h8Fp)\",\n inline = True)\n embed.add_field(name = \"**Bug Report**\",\n value = f\"[Click Here](https://docs.google.com/forms/d/1PYkQSB0rMSfZePp7o_iqC1cfecnvlys62GGhfHt9OYo/edit?usp=sharing)\",\n inline = True)\n embed.add_field(name = \"**Vote EpicBot**\",\n value = f\"[Click Here](https://botrix.cc/vote/751100444188737617/)\",\n inline = True)\n embed.add_field(name = \"**Our Website**\",\n value = f\"[Click Here](https://epicbot.gq)\",\n inline = True)\n embed.set_footer(text=f\"{ctx.guild}\", icon_url=f\"{ctx.guild.icon_url}\")\n embed.timestamp = datetime.datetime.utcnow()\n\n await ctx.send(embed = embed)\n\n @commands.cooldown(1, 5, commands.BucketType.user)\n @commands.command()\n async def vote(self, ctx):\n\n embed = discord.Embed(\n title = \"Vote EpicBot! \\💖\",\n description = \"Thank you so much!\",\n color = 0x00FFFF,\n url = \"https://top.gg/bot/751100444188737617/vote\"\n )\n embed.set_footer(text=\"I love you! ✨\")\n\n await ctx.send(embed=embed)\n\n @commands.cooldown(1, 5, commands.BucketType.user)\n @commands.command(aliases = ['role-info', 'role_info'])\n async def roleinfo(self, ctx, role: discord.Role):\n embed = discord.Embed(\n title = f\"**Role Information**\",\n color = role.colour)\n embed.add_field(name = \"ID:\", value = f\"{role.id}\", inline = False)\n embed.add_field(name = \"Name:\", value = f\"{role.name}\", inline = False)\n embed.add_field(name = \"Position:\", value = f\"{role.position}\", inline = False)\n embed.add_field(name = \"Created At:\", value = f\"{role.created_at.strftime('%I : %M : %S %p')} | {role.created_at.day} / {role.created_at.month} / {role.created_at.year}\", inline = False)\n embed.add_field(name = \"Hoisted:\", value = f\"{bool(role.hoist)}\")\n embed.add_field(name = \"Members with this role:\", value = f\"{role.members}\", inline = False)\n embed.add_field(name = \"Permissions:\", value = f\"{role.permissions}\", inline = False)\n embed.set_footer(text=f\"{ctx.guild}\", icon_url=f\"{ctx.guild.icon_url}\")\n embed.timestamp = datetime.datetime.utcnow()\n\n await ctx.send(embed = embed)\n\n @commands.cooldown(1, 5, commands.BucketType.user)\n @commands.command()\n async def invite(self, ctx):\n try:\n\n url = \"https://api.statcord.com/v3/751100444188737617\"\n response = requests.get(url)\n yes = response.json()\n\n embed = discord.Embed(\n title = 'Invite EpicBot',\n description = f\"EpicBot is currently in `{len(self.client.guilds)}` Servers and has over `{yes['data'][0]['users']}` Users.\",\n color = 0x00FFFF\n )\n embed.add_field(name = \"**Invite**\",\n value = f\"**[Click Here](https://discord.com/oauth2/authorize?client_id=751100444188737617&scope=bot&permissions=2146958847)**\",\n inline = False)\n embed.set_thumbnail(url='https://media.discordapp.net/attachments/757168151141285929/763336446328438784/bot_profile.png')\n embed.set_footer(text=f\"{ctx.guild}\", icon_url=f\"{ctx.guild.icon_url}\")\n embed.timestamp = datetime.datetime.utcnow()\n\n await ctx.send(embed=embed)\n except:\n embed = discord.Embed(\n title = 'Invite EpicBot',\n description = f\"EpicBot is currently in `{len(self.client.guilds)}` Servers and has over `{len(set(self.client.get_all_members()))}` Users.\",\n color = 0x00FFFF\n )\n embed.add_field(name = \"**Invite**\",\n value = f\"**[Click Here](https://discord.com/oauth2/authorize?client_id=751100444188737617&scope=bot&permissions=2146958847)**\",\n inline = False)\n embed.set_thumbnail(url='https://media.discordapp.net/attachments/757168151141285929/763336446328438784/bot_profile.png')\n embed.set_footer(text=f\"{ctx.guild}\", icon_url=f\"{ctx.guild.icon_url}\")\n embed.timestamp = datetime.datetime.utcnow()\n\n await ctx.send(embed=embed)\n\n @commands.cooldown(1, 5, commands.BucketType.user)\n @commands.command()\n async def bug_report(self, ctx):\n user = ctx.author\n\n embed = discord.Embed(title=str(\"**Bug Report**\"), color=0x00FF0C, description=f\"Hey {user.mention}! In order to report a bug [Click Here](https://docs.google.com/forms/d/1PYkQSB0rMSfZePp7o_iqC1cfecnvlys62GGhfHt9OYo/edit?usp=sharing).\")\n embed.set_thumbnail(url=\"https://media.discordapp.net/attachments/749996055369875459/751648770185494548/EP2.png\")\n embed.add_field(name = \"**Support Server**\", value = f\"You should join out [Support Server](https://discord.gg/Zj7h8Fp) if you need help.\")\n embed.set_footer(text=f\"{user.guild}\", icon_url=f\"{user.guild.icon_url}\")\n embed.timestamp = datetime.datetime.utcnow()\n\n await ctx.send(embed=embed)\n\n @commands.cooldown(1, 5, commands.BucketType.user)\n @commands.command(aliases = ['discord'])\n async def support(self, ctx):\n embed = discord.Embed(title = \"<:EpicDiscord:770889292746194964> Support Server\", description = f\"Hey {ctx.author.mention}! You can join our discord support server by [Clicking Here](https://discord.gg/Zj7h8Fp), OR by using this link - https://discord.gg/Zj7h8Fp\", color = 0x008080)\n embed.set_footer(text = f\"{ctx.author.guild}\", icon_url = ctx.author.guild.icon_url)\n embed.timestamp = datetime.datetime.utcnow()\n await ctx.send(embed = embed)\ndef setup(client):\n client.add_cog(Info(client))\n","sub_path":"Bot/cogs/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":12741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"153557188","text":"from datetime import datetime\r\n\r\nclass Guest:\r\n def __init__(self, Name, FamilyName, Car, controlboard,\r\n CarRotationManager, ID=0, linkedplatform=None,Start=0): # --Initializing Guest credentials/info---\r\n self.Name = Name\r\n self.FamilyName = FamilyName\r\n self.Car = Car\r\n self.controlboard = controlboard\r\n self.CarRotationManager = CarRotationManager\r\n if ID == 0: # In this case, the guest would be a new guest, so when we register him as a guest we don't give him an ID, and we ask the controlboard to generate the ID\r\n self.uniqueID = controlboard.set_id() # ----calling controlboard class to set ID---unique ID given by control board/decision engine\r\n else: # In this case, the guest would have already parked before and he would already have an ID, so instead of generating a new ID we just give him his old one\r\n self.uniqueID = ID\r\n self.parked = False # Boolean variable which indicates if guest is parked or not\r\n self.linkedplatform = None # Variable containing the platform where the guest's car is parked\r\n self.Start=Start # This is the time when the guest parks\r\n\r\n def parked_and_linkedplatform_value(self): # This function checks if the guest is parked and sets the values of linkedplatform and parked accordingly\r\n (boolean, linkedplatform) = self.CarRotationManager.check_if_guest_parked(self)\r\n if boolean == True:\r\n self.parked = True\r\n self.linkedplatform = linkedplatform\r\n else:\r\n self.parked = False\r\n self.linkedplatform = None\r\n\r\n def request_car(self): # Function that releases the car if it is parked\r\n self.parked_and_linkedplatform_value()\r\n if self.parked == False:\r\n print(\"Your car is not parked!\\n\")\r\n return\r\n pos = self.CarRotationManager.get_platform_position(self) # Get the car's current position in the parking\r\n if (pos == -1):\r\n print(\"Your car is not parked!\\n\")\r\n return\r\n self.CarRotationManager.return_platform_to_base(pos) # Move the car to the base position\r\n self.CarRotationManager.release_car(self.linkedplatform) # Release the car\r\n self.parked = False\r\n self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms - 1\r\n print(\"Your \" + self.Car.model + \" has been released.\")\r\n print(\"Have a great day \" + self.Name + \"!\\n\")\r\n self.controlboard.remove_guest_from_file(self) # We remove the guest from the file once his car is not parked anymore\r\n\r\n def park_car(self): # Function that parks the guest's car if it's not already parked\r\n self.parked_and_linkedplatform_value()\r\n if (self.parked == True):\r\n print(\"Your car is already parked!\\n\")\r\n return\r\n platform = self.CarRotationManager.return_empty_platform() # FOUND CLOSEST EMPTY PLATFORM\r\n if (platform == None):\r\n return -1 # PARKING IS FULL\r\n self.CarRotationManager.return_platform_to_base(platform.Position)\r\n platform.link(self) # NOW USER'S CAR IS PARKED ON BASE PLATFORM\r\n self.linkedplatform = platform\r\n self.parked = True\r\n self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms + 1\r\n print(\"Your \" + self.Car.model + \" has been parked!\\n\")\r\n now = datetime.now() # Get the current time, i.e when the user parks his car\r\n array = str(now).split()\r\n string_into_file = array[0] + \"@\" + array[1]\r\n self.controlboard.add_guest_to_file(self,string_into_file) # Add the current time (when the user parked) next to his information in the guest file\r\n self.Start=string_into_file\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Guest.py","file_name":"Guest.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"297429509","text":"from game_engine.play import Play\r\nfrom game_engine.player_human import PlayerHuman\r\n\r\n\r\nclass GameEngine:\r\n \"\"\"\r\n This is temporary class for the CLI testing and presentation\r\n Author(s): Adam Ross; Gustav From\r\n Last-edit-date: 14/02/2019\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n The GameEngine CLI test class constructor\r\n \"\"\"\r\n self.play = Play() # initiates a Play class instance\r\n self.introduction() # Prints to terminal an introduction to the game\r\n\r\n def introduction(self):\r\n print(\"\\n*** Welcome to GameEngine ***\")\r\n\r\n def declare_available_pieces(self):\r\n \"\"\"\r\n Declares to the players the pieces available for selection\r\n Temporary for the CLI testing\r\n \"\"\"\r\n print(\"\\nGame pieces status:\")\r\n print(list(self.play.game.pieces.items())[:int((len(self.play.game.\r\n pieces) + 1) / 2)])\r\n\r\n if len(self.play.game.pieces) > 1:\r\n print(list(self.play.game.pieces.\r\n items())[int((len(self.play.game.pieces) + 1) / 2):])\r\n\r\n def declare_board_status(self):\r\n \"\"\"\r\n Declares to the players the current status of the game board\r\n Temporary for the CLI testing\r\n \"\"\"\r\n print(\"\\nGame board status:\")\r\n print(*(row for row in self.play.game.board), sep=\"\\n\")\r\n\r\n def declare_current_player(self):\r\n \"\"\"\r\n Temporary printing of current player for CLI testing and presenting\r\n \"\"\"\r\n print(\"\\nCurrent player: '\" + self.play.current_player.name + \"'\")\r\n\r\n def declare_selected_piece(self):\r\n \"\"\"\r\n Temporary printing of selected piece for CLI testing and presenting\r\n \"\"\"\r\n print(\"\\nCurrent piece: \" + self.play.selected_piece)\r\n\r\n def game_mode_selection(self):\r\n while True:\r\n print(\"\\nSelect a following game mode (enter number 1 - 3):\")\r\n n = input(\"1: Player vs Player\\n2: Player vs AI\\n3: AI vs AI\\n\")\r\n\r\n if n == \"2\" or n == \"3\":\r\n print(\"\\nSelect a following difficulty (enter number 1 - 3):\")\r\n d = input(\"1: easy\\n2: medium\\n3: hard\\n\")\r\n\r\n if d == \"1\" or d == \"2\" or d == \"3\":\r\n self.play.init_players(int(n), int(d)) # initializes play\r\n break\r\n elif n == \"1\":\r\n self.play.init_players(int(n), None) # initializes players\r\n break\r\n self.declare_current_player() # prints the starting player turn\r\n\r\n def play_game(self):\r\n self.declare_available_pieces() # prints game board status\r\n self.declare_board_status() # prints available pieces status\r\n\r\n if isinstance(self.play.current_player, PlayerHuman):\r\n while True:\r\n pce = input(\"\\nEnter number 0-15 of piece selection: \")\r\n\r\n if self.play.play_selection(pce):\r\n break\r\n else:\r\n self.play.play_selection()\r\n self.declare_selected_piece() # prints the selected piece\r\n self.declare_current_player() # prints the current player turn\r\n\r\n if isinstance(self.play.current_player, PlayerHuman):\r\n while True:\r\n try:\r\n y, x = input(\"\\nEnter 2 ints 0-3 separated by a space: \").\\\r\n split()\r\n\r\n if self.play.play_placement(y, x):\r\n break\r\n except:\r\n continue\r\n else:\r\n self.play.play_placement()\r\n\r\n if self.play.game.has_won_game(self.play.selected_piece):\r\n self.declare_board_status() # prints final status of board\r\n print(\"game won by \" + self.play.current_player.name)\r\n elif not self.play.game.has_next_play(): # checks if turns remaining\r\n self.declare_board_status() # prints final status of board\r\n else:\r\n self.play_game() # plays the next turn\r\n\r\n\r\nif __name__ == '__main__':\r\n app = GameEngine()\r\n app.game_mode_selection()\r\n app.play_game()\r\n","sub_path":"communication_platform/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"56603468","text":"import csv\nimport os \n\nfile_to_load = os.path.join('C:/Users/kayan/UCBWork/python-challenge/PyPoll/raw_data/election_data.csv')\nfile_to_output = os.path.join('analysis','election_data_analysis.txt')\n\nwith open(file_to_load, 'r') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n header = next(csvreader)\n unique_list = []\n total_votes = 0\n\n for row in csvreader:\n if row[2] not in unique_list: \n unique_list.append(str(row[2])) \n total_votes = total_votes +1 \n\nwith open(file_to_load, 'r') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n header = next(csvreader)\n\n vote1 = 0\n vote2 = 0\n vote3 = 0\n vote4 = 0 \n\n for row in csvreader: #count votes\n if row[2] == unique_list[0]:\n vote1 = vote1 + 1 \n elif row[2] == unique_list[1]:\n vote2 = vote2 +1 \n elif row[2] == unique_list[2]:\n vote3 = vote3 +1 \n elif row[2] == unique_list[3]:\n vote4 = vote4 +1 \n percent1 = float((vote1/total_votes)*100) #calculate percentages\n percent2 = float((vote2/total_votes)*100)\n percent3 = float((vote3/total_votes)*100)\n percent4 = float((vote4/total_votes)*100)\n\n summ_list = [\n {'name': unique_list[0], 'percent': percent1, 'vote': vote1},\n {'name': unique_list[1], 'percent': percent2, 'vote': vote2},\n {'name': unique_list[2], 'percent': percent3, 'vote': vote3},\n {'name': unique_list[3], 'percent': percent4, 'vote': vote4}\n ]\n\n find = [i['vote'] for i in summ_list] #find the highest vote in list of dicts\n highest_vote = max (find)\n \n for i in summ_list: #find winner\n if i['vote'] == highest_vote:\n winner = i['name']\n\noutput = (\n f\"\\nElection Results\\n\"\n f\"-----------------------\\n\"\n f\"Total Votes: {total_votes}\\n\"\n f\"-----------------------\\n\" \n f\"{unique_list[0]}: {percent1:.3f}% ({vote1})\\n\"\n f\"{unique_list[1]}: {percent2:.3f}% ({vote2})\\n\"\n f\"{unique_list[2]}: {percent3:.3f}% ({vote3})\\n\"\n f\"{unique_list[3]}: {percent4:.3f}% ({vote4})\\n\"\n f\"------------------------\\n\"\n f\"Winner: {winner}\\n\"\n f\"------------------------\\n\" )\nprint(output)\n\nwith open(file_to_output, \"a\") as txt_file:\n txt_file.write(output)\n \n","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"236402225","text":"from numpy import *\nfrom matplotlib.pyplot import *\nimport matplotlib.dates as mdates\n\ndef show_all(datas, times, frequencies, location = ''):\n\n for i, frequency in enumerate(frequencies):\n data = datas[i]\n\n x_lims = mdates.date2num(times)\n plot_date(x_lims, data, fmt = 'b,', linestyle = '-', tz = None, xdate = True)\n date_format = mdates.DateFormatter('%H:%M:%S')\n gca().xaxis.set_major_formatter(date_format)\n gcf().autofmt_xdate()\n title('SAT1\\nOriginal data, without moving average applied', fontsize = 12)\n xlabel('Time [UTC]', fontsize = 18)\n ylabel('DigitHot, in digits', fontsize = 18)\n savefig(location + '_' + str(abs(frequency)) + '.pdf')\n clf()\n","sub_path":"AstroWeek/Output/show_all.py","file_name":"show_all.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"360680805","text":"import unittest\nimport os\nimport requests\nfrom jsonpath import jsonpath\nfrom unittestreport import ddt, list_data\nfrom handle_conf import conf\nfrom handle_excel import Handle_excel\nfrom handle_log import my_log\nfrom handle_mysql import Handle_DB\nfrom handle_path import DATAS_DIR\nfrom handle_tools import replace_data\n\n\n@ddt\nclass TestWithdraw(unittest.TestCase):\n excel = Handle_excel(os.path.join(DATAS_DIR, 'test02.xlsx'), 'withdraw')\n res = excel.read_excel()\n base_url = conf.get('env', 'base_url')\n db = Handle_DB()\n\n @classmethod\n def setUp(cls):\n url = conf.get('env', 'base_url') + '/member/login'\n params = {\n \"mobile_phone\": conf.get('test_data', 'mobile'),\n \"pwd\": conf.get('test_data', 'pwd')\n }\n headers = eval(conf.get('env', 'headers'))\n response = requests.post(url=url, json=params, headers=headers)\n res = response.json()\n token = jsonpath(res, '$..token')[0]\n headers['Authorization'] = token\n cls.headers = headers\n member_id = jsonpath(res, '$..id')[0]\n cls.member_id = member_id\n\n @list_data(res)\n def test_withdraw(self, item):\n url = self.base_url + item['url']\n method = item['method']\n if '#member_id#' in item['data']:\n item['data'] = item['data'].replace('#member_id#', str(self.member_id))\n\n params = eval(item['data'])\n expected = eval(item['expected'])\n sql = 'select leave_amount from futureloan.member where mobile_phone=\"{}\"'.format(\n conf.get(\"test_data\", \"mobile\"))\n start_amout = self.db.find_one(sql)[0]\n print('用户执行之前,用户的余额:', start_amout)\n\n response = requests.request(url=url, method=method, json=params, headers=self.headers)\n res = response.json()\n end_amout = self.db.find_one(sql)[0]\n print('用户执行之后,用户的余额:', end_amout)\n\n try:\n self.assertEquals(expected['code'], res['code'])\n self.assertEquals(expected['msg'], res['msg'])\n if res['msg'] == 'OK':\n self.assertEquals(float(start_amout - end_amout), params['amount'])\n print(params, params['amount'])\n else:\n self.assertEquals(float(end_amout - start_amout), 0)\n except AssertionError as e:\n my_log.exception(e)\n raise e\n else:\n my_log.info('quxiansuccess')\n","sub_path":"testcases/test_withdraw.py","file_name":"test_withdraw.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"310720692","text":"from data.data import loadData\nfrom ex1.crossvalidation import splitDataForXValidation\nfrom copy import deepcopy\nfrom eval.rmse import rmseEval\nfrom sklearn.ensemble.bagging import BaggingRegressor\nfrom sklearn.svm.classes import SVR\nfrom sklearn.preprocessing.data import StandardScaler\n\nOUTPUT_DATA_FILE = \"/experiments/ex1/ex1_svm2.csv\"\n\nparametersList = []\nfor c in range(1,100):\n for samples in [i * 50 for i in range(1,201)]:\n parametersList.append({\"C\": c, \"max_samples\": samples, \"n_estimators\": 10})\n\nlocations = [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]\n\n# load the data\ndata = {}\ncolumns = []\nloadData(\"/data/york_hour_2013.csv\", [\"timestamp\", \"atc\"], data, columns)\n\nall_features = deepcopy(columns)\nall_features.remove(\"target\")\nall_features.remove(\"location\")\n\noutput = open(OUTPUT_DATA_FILE, 'w')\noutput.write(\"n_estimators,C,max_samples,rmse\\n\")\n\ndef evalOne(parameters):\n all_obs = []\n all_pred = []\n for location in locations:\n trainX, testX, trainY, testY = splitDataForXValidation(location, \"location\", data, all_features, \"target\")\n normalizer_X = StandardScaler()\n trainX = normalizer_X.fit_transform(trainX)\n testX = normalizer_X.transform(testX)\n normalizer_Y = StandardScaler()\n trainY = normalizer_Y.fit_transform(trainY)\n testY = normalizer_Y.transform(testY)\n model = BaggingRegressor(base_estimator=SVR(kernel='rbf', C=parameters[\"C\"], cache_size=5000), max_samples=parameters[\"max_samples\"],n_estimators=parameters[\"n_estimators\"], verbose=0, n_jobs=-1)\n model.fit(trainX, trainY)\n prediction = model.predict(testX)\n prediction = normalizer_Y.inverse_transform(prediction)\n testY = normalizer_Y.inverse_transform(testY)\n all_obs.extend(testY)\n all_pred.extend(prediction)\n \n return rmseEval(all_obs, all_pred)[1]\n\nfor p in parametersList:\n print(str(p))\n rmse = evalOne(p)\n print(\"\\t\" + str(rmse))\n output.write(str(p[\"n_estimators\"]) + \",\" + str(p[\"C\"]) + \",\" + str(p[\"max_samples\"]) + \",\" + str(rmse) + \"\\n\")\n output.flush()\n \noutput.close()\n","sub_path":"experiments/src/ex1/ex1_svm2.py","file_name":"ex1_svm2.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"28408871","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\nif os.name == \"nt\":\n extra_deps = []\nelse:\n extra_deps = [\n \"python-Levenshtein\",\n ]\n\nquiet = '--quiet' in sys.argv or '-q' in sys.argv\n\ntry:\n with open(\"paperwork_backend/_version.py\", \"r\") as file_descriptor:\n version = file_descriptor.read().strip()\n version = version.split(\" \")[2][1:-1]\n if not quiet:\n print(\"Paperwork-backend version: {}\".format(version))\n if \"-\" in version:\n version = version.split(\"-\")[0]\nexcept FileNotFoundError:\n print(\"ERROR: _version.py file is missing\")\n print(\"ERROR: Please run 'make version' first\")\n sys.exit(1)\n\nsetup(\n name=\"paperwork-backend\",\n version=version,\n description=(\n \"Paperwork's backend\"\n ),\n long_description=\"\"\"Paperwork is a GUI to make papers searchable.\n\nThis is the backend part of Paperwork. It manages:\n- The work directory / Access to the documents\n- Indexing\n- Searching\n- Suggestions\n- Import\n- Export\n\nThere is no GUI here. The GUI is\n.\n \"\"\",\n keywords=\"documents\",\n url=(\n \"https://gitlab.gnome.org/World/OpenPaperwork/paperwork/tree/master/\"\n \"paperwork-backend\"\n ),\n download_url=(\n \"https://gitlab.gnome.org/World/OpenPaperwork/paperwork/-\"\n \"/archive/{}/paperwork-{}.tar.gz\".format(version, version)\n ),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: End Users/Desktop\",\n (\"License :: OSI Approved ::\"\n \" GNU General Public License v3 or later (GPLv3+)\"),\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Multimedia :: Graphics :: Capture :: Scanners\",\n \"Topic :: Multimedia :: Graphics :: Graphics Conversion\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Text Processing :: Filters\",\n \"Topic :: Text Processing :: Indexing\",\n ],\n license=\"GPLv3+\",\n author=\"Jerome Flesch\",\n author_email=\"jflesch@openpaper.work\",\n packages=find_packages(),\n entry_points={\n 'console_scripts': [\n 'paperwork-shell = paperwork_backend.shell_cmd:main',\n ],\n },\n zip_safe=True,\n install_requires=[\n \"distro\",\n \"natsort\",\n \"Pillow\",\n \"pycountry\",\n \"pyocr\",\n \"simplebayes\",\n \"termcolor\", # used by paperwork-chkdeps\n \"Whoosh\",\n # paperwork-shell chkdeps take care of all the dependencies that can't\n # be handled here. Mainly, dependencies using gobject introspection\n # (libpoppler, etc)\n ] + extra_deps\n)\n\nif quiet:\n sys.exit(0)\n\nprint(\"============================================================\")\nprint(\"============================================================\")\nprint(\"|| IMPORTANT ||\")\nprint(\"|| Please run 'paperwork-shell chkdeps paperwork_backend' ||\")\nprint(\"|| to find any missing dependency ||\")\nprint(\"============================================================\")\nprint(\"============================================================\")\n","sub_path":"pypi_install_script/paperwork-backend-1.3.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"471360135","text":"import time\nfrom datetime import datetime as dt\n\n\nhost_file = 'hosts'\n\nredirect = '127.0.0.1'\n\nwebsite_list = ['vk.com', 'mail.ru']\n\n\ndef blocker_process():\n while True:\n ymd = (dt.now().year, dt.now().month, dt.now().day)\n if dt(*ymd, 8) < dt.now() < dt(*ymd, 16):\n print('Rihanna')\n when_work()\n else:\n print('Drake')\n when_chill()\n time.sleep(5)\n\n\ndef when_work():\n file = open(host_file, '+r')\n content = file.read()\n for website in website_list:\n if website in content:\n pass\n else:\n file.write(redirect + ' ' + website + '\\n')\n\n\ndef when_chill():\n file = open(host_file, '+r')\n content = file.readlines()\n file.seek(0)\n for line in content:\n if not any(website in line for website in website_list):\n file.write(line)\n file.truncate()\n\n\nblocker_process()\n","sub_path":"blocker.py","file_name":"blocker.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"483151673","text":"###### Hall C Software Source SConscript Build File #####\n###### Author: Edward Brash (brash@jlab.org) June 2013\n\nimport os\nimport re\nimport SCons.Util\nImport('pbaseenv')\n\nlist = Glob('*.cxx', exclude=['main.C'])\n\npbaseenv.Object('main.C')\n\nsotarget = 'HallC'\n\n#srclib = pbaseenv.SharedLibrary(target = sotarget, source = list+['HallCDict.so'],SHLIBVERSION=['$VERSION'],LIBS=[''])\nsrclib = pbaseenv.SharedLibrary(target = sotarget, source = list+['HallCDict.so'],SHLIBPREFIX='../lib',LIBS=[''])\nprint ('Source shared library = %s\\n' % srclib)\n\nlinkbase =pbaseenv.subst('$SHLIBPREFIX')+sotarget\n\ncleantarget = linkbase+'.so.'+pbaseenv.subst('$VERSION')\nlocalmajorcleantarget = '../'+linkbase+'.so'\n\nprint('cleantarget = %s\\n' % cleantarget)\nprint('localmajorcleantarget = %s\\n' % localmajorcleantarget)\ntry:\n os.symlink(cleantarget,localmajorcleantarget)\nexcept:\n print (\" Continuing ... \")\n\nClean(srclib,cleantarget)\nClean(srclib,localmajorcleantarget)\n\n#baseenv.Install('../',srclib)\n#baseenv.Alias('install',['../'])\n","sub_path":"src/SConscript.py","file_name":"SConscript.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"348926024","text":"\nimport argparse\nimport os\nimport pickle\nimport time\nimport logging\nfrom logging import handlers\nimport numpy as np\nfrom sklearn.metrics.cluster import normalized_mutual_info_score\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom utils import *\nfrom sklearn.cluster import KMeans\nfrom model.vgg import *\nimport pandas as pd\nfrom SSL_model_and_data import *\nfrom torch.utils.data.sampler import WeightedRandomSampler\n\n\nargs={'seed':4396,\n 'batch':64,\n 'num_workers':0,\n 'lr':0.001,'wd':-4,'momentum':0.9,'pca':64,'n_init':5,'lr_classifier':0.05,\n 'known_label_size':500,\n 'nmb_cluster':100,\n 'batches_print':100,\n 'frozen_model_path':'./model_save_Dec16_07_05/parameter_epoch_80.pkl',\n 'frozen_model_conv':5,\n 'T1':10, # 假标签部分何时开始有权重\n 'T2':60, # 权重最大到此时为止\n 'epoch': 100,\n 'max_alpha':0.6, #权重最大为\n 'confidence_threshold':0.4, #达到多少置信度才将这个样本加入计算loss\n 'use_additional_features':False, #是否要在冻住的特征层之外,再加一个特征层。\n\n\n 'batch_print':200 #每过多少个batch print一次,一共是700个batch。\n }\n\n\n\n\n\ndef GetLabeledIndex(truth_label): #有一些样本是know label的,得到他们的序号\n\n # 抽出的样本必须和之前用的是一样的,所以set一样的seed\n\n np.random.seed(seed=args['seed'])\n known_label_size = args['known_label_size'] # 有多少个已知标签\n assert known_label_size % 10 == 0, \"known_label_size must be 10的倍数\"\n\n chosen_index = []\n\n for classed_label in range(10):\n this_class_index = np.flatnonzero(truth_label == classed_label)\n tmp = np.random.choice(this_class_index, int(known_label_size / 10), replace=False)\n chosen_index.extend(tmp.tolist())\n #chosen_index[classed_label] = tmp\n\n return chosen_index #是一个list\n\ndef My_read_model(dir):\n checkpoint = torch.load(dir)\n\n def rename_key(key):\n if not 'module' in key:\n return key\n return ''.join(key.split('.module'))\n\n checkpoint = {rename_key(key): val\n for key, val\n in checkpoint.items()}\n\n model = vgg16(out=args['nmb_cluster'])\n model.load_state_dict(checkpoint)\n model.cuda()\n return model\n\ndef get_unlabeled_weight(now_epoch):\n alpha = 0.0\n T1=args['T1']\n T2=args['T2']\n max_alpha=args['max_alpha']\n if now_epoch > T1:\n alpha = (now_epoch - T1) / (T2 - T1) * max_alpha\n if now_epoch > T2:\n alpha = max_alpha\n return alpha\n\n# def cal_weight_confidence(prob):\n# a=args['confidence_threshold']\n#\n# result = np.ones(len(prob))\n# y=torch.tensor(result, device=torch.device('cuda'), dtype=torch.float)\n#\n# #y=nn.Parameter( torch.ones(len(prob),dtype=torch.float) )\n# y[prob13:\n # break\n\n logger.info('************* EPOCH %d/%d ***************************\\n'%(big_epoch_iter,args['epoch']) )\n\n\n\n #这一步相当重要,get data loader\n dataloader=get_DATASET_and_LOADER(big_epoch_iter)\n print(\"length of dataloader %d\"%len(dataloader))\n #\n\n\n model.train()\n\n loss_known_array=[] #记录已知label部分的loss\n loss_unknown_array=[] #位置label部分的loss\n total_loss_array=[] #总loss,等于 loss_known+alpha*loss_unknown\n known_acc_array=[] #已知label的acc\n unknown_acc_array=[] #未知label的 acc\n num_of_known_label_array=[] #一个batch里面有多少个已知label\n prob_for_pseudo_label_array=[] #那些伪标签,他们的最大类的概率是多少\n\n for step, batch_data in enumerate(dataloader):\n\n\n\n batch_num=len(dataloader)\n data,label,is_known=map(lambda x: x.to(DEVICE), batch_data)\n model.zero_grad() #梯度清零\n\n outputs= model(data)\n #print(\"outpus\",outputs[0:3,:])\n #print(\"labels\",label)\n #为真标签计算loss\n loss_known = crossentropyloss(outputs, label)\n loss_known=loss_known*is_known\n #print(\"loss_known1\", loss_known)\n loss_known= torch.sum( loss_known) /(1e-5+torch.sum(is_known))\n #print(\"loss_known2\", loss_known)\n\n #为假标签计算loss\n with torch.no_grad():\n softmax_output = softmax_func(outputs) #这样得到的就是概率\n prob=torch.max(softmax_output,dim=1)[0] #最大的那一个概率\n\n weight_by_confidence=prob.clone()\n weight_by_confidence[weight_by_confidence length of list raise IndexError.\n \"\"\"\n if not self.head:\n return\n\n list_size = 1\n current = self.head\n while current.next and current.next.next:\n current = current.next.next\n list_size += 2\n if current.next:\n list_size += 1\n\n if (k > 0 and abs(k) >= list_size) or (k < 0 and abs(k) > list_size):\n raise IndexError\n\n if k < 0:\n k += list_size\n\n current = self.head\n while k:\n if not current.next:\n return\n current = current.next\n k -= 1\n\n return current.data\n\n","sub_path":"ex8/skipi_list.py","file_name":"skipi_list.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"241735269","text":"# stdlib imports\n\nimport os\nimport numpy as np\nimport urllib\nimport json\nfrom datetime import timedelta\nimport collections\n\n# local imports\nfrom mapio.shake import getHeaderData\nfrom libcomcat.search import get_event_by_id, search\nfrom libcomcat.classes import VersionOption\nfrom mapio.basemapcity import BasemapCities\nfrom mapio.multihaz import MultiHazardGrid\n\nimport matplotlib.cm as cm # Don't delete this, it's needed in an eval function\n\n\ndef is_grid_point_source(grid):\n \"\"\"Was the shakemap grid constructed with a point source?\n\n This makes use of the 'urat' layer, which is the ratio of the predicted ground motion\n standard deviation to the GMPE standard deviation. The only reason this could ever be\n greater than 1.0 is if the uncertainty of the prediction is inflated due to the point\n source approxmiation; further, if a point source was used, there will always be some\n locations with 'urat' > 1.0.\n\n Args:\n grid (ShakeGrid): A ShakeGrid object from MapIO.\n\n Returns:\n bool: True if point rupture.\n \"\"\"\n data = grid.getData()\n urat = data['urat'].getData()\n max_urat = np.max(urat)\n if max_urat > (1 + np.finfo(float).eps):\n return True\n else:\n return False\n\n\ndef get_event_comcat(shakefile, timewindow=60, degwindow=0.3, magwindow=0.2):\n \"\"\"\n Find an event in comcat, searching first by event id and if that\n fails searching by magnitude, time, and location.\n\n Args:\n shakefile (str): path to shakemap .xml file of event to find\n timewindow (float): width of time window to search around time defined\n in shakefile (in seconds)\n degwindow (float): width of area to search around location specified in\n shakefile (in degrees).\n magwindow (float): width of magnitude window to search around the\n magnitude specified in shakefile.\n\n Returns:\n None if event not found, else tuple (info, detail, shakemap) where,\n * info: json formatted dictionary of info.json for the event\n * detail: event detail from comcat\n * shakemap: shakemap of event found (from comcat)\n\n \"\"\"\n header_dicts = getHeaderData(shakefile)\n grid_dict = header_dicts[0]\n event_dict = header_dicts[1]\n version = grid_dict['shakemap_version']\n try:\n eid = event_dict['event_id']\n net = 'us'\n if 'event_network' in event_dict:\n net = event_dict['event_network']\n if not eid.startswith(net):\n eid = net + eid\n detail = get_event_by_id(eid, includesuperseded=True)\n except:\n lat = event_dict['lat']\n lon = event_dict['lon']\n mag = event_dict['magnitude']\n time = event_dict['event_timestamp']\n starttime = time - timedelta(seconds=timewindow)\n endtime = time + timedelta(seconds=timewindow)\n minlat = lat - degwindow\n minlon = lon - degwindow\n maxlat = lat + degwindow\n maxlon = lon + degwindow\n minmag = max(0, mag - magwindow)\n maxmag = min(10, mag + magwindow)\n events = search(starttime=starttime,\n endtime=endtime,\n minmagnitude=minmag,\n maxmagnitude=maxmag,\n minlatitude=minlat,\n minlongitude=minlon,\n maxlatitude=maxlat,\n maxlongitude=maxlon)\n if not len(events):\n return None\n detail = events[0].getDetailEvent()\n allversions = detail.getProducts('shakemap', version=VersionOption.ALL)\n # Find the right version\n vers = [allv.version for allv in allversions]\n idx = np.where(np.array(vers) == version)[0][0]\n shakemap = allversions[idx]\n infobytes, url = shakemap.getContentBytes('info.json')\n info = json.loads(infobytes.decode('utf-8'))\n return info, detail, shakemap\n\n\ndef parseMapConfig(config, fileext=None):\n \"\"\"\n Parse config for mapping options.\n\n Args:\n config (ConfigObj): ConfigObj object.\n fileext (str): File extension to add to relative filepaths, will be\n prepended to any file paths in config.\n\n Returns:\n dict: Dictionary of map options pulled from config file.\n \"\"\"\n topofile = None\n roadfolder = None\n cityfile = None\n roadcolor = '6E6E6E'\n countrycolor = '177F10'\n watercolor = 'B8EEFF'\n ALPHA = 0.7\n oceanfile = None\n #oceanref = None\n #roadref = None\n #cityref = None\n\n if fileext is None:\n fileext = '.'\n if 'dem' in config:\n topofile = os.path.join(fileext, config['dem']['file'])\n if os.path.exists(topofile) is False:\n print('DEM not valid - hillshade will not be possible\\n')\n if 'ocean' in config:\n oceanfile = os.path.join(fileext, config['ocean']['file'])\n #try:\n # oceanref = config['ocean']['shortref']\n #except:\n # oceanref = 'unknown'\n if 'roads' in config:\n roadfolder = os.path.join(fileext, config['roads']['file'])\n if os.path.exists(roadfolder) is False:\n print('roadfolder not valid - roads will not be displayed\\n')\n roadfolder = None\n #try:\n # roadref = config['roads']['shortref']\n #except:\n # roadref = 'unknown'\n if 'cities' in config:\n cityfile = os.path.join(fileext, config['cities']['file'])\n #try:\n # cityref = config['cities']['shortref']\n #except:\n # cityref = 'unknown'\n if os.path.exists(cityfile):\n try:\n BasemapCities.loadFromGeoNames(cityfile=cityfile)\n except Exception as e:\n print(e)\n print('cities file not valid - cities will not be displayed\\n')\n cityfile = None\n else:\n print('cities file not valid - cities will not be displayed\\n')\n cityfile = None\n if 'roadcolor' in config['colors']:\n roadcolor = config['colors']['roadcolor']\n if 'countrycolor' in config['colors']:\n countrycolor = config['colors']['countrycolor']\n if 'watercolor' in config['colors']:\n watercolor = config['colors']['watercolor']\n if 'alpha' in config['colors']:\n ALPHA = float(config['colors']['alpha'])\n\n countrycolor = '#'+countrycolor\n watercolor = '#'+watercolor\n roadcolor = '#'+roadcolor\n\n mapin = {'topofile': topofile, 'roadfolder': roadfolder,\n 'cityfile': cityfile, 'roadcolor': roadcolor,\n 'countrycolor': countrycolor, 'watercolor': watercolor,\n 'ALPHA': ALPHA, 'oceanfile': oceanfile} # 'roadref': roadref, 'cityref': cityref, 'oceanref': oceanref\n\n return mapin\n\n\ndef parseConfigLayers(maplayers, config, keys=None):\n \"\"\"\n Parse things that need to coodinate with each layer (like lims, logscale,\n colormaps etc.) from config file, in right order, where the order is from\n maplayers.\n\n Args:\n maplayers (dict): Dictionary containing model output.\n config (ConfigObj): Config object describing options for specific\n model.\n keys (list): List of keys of maplayers to process, e.g. ``['model']``.\n\n Returns:\n tuple: (plotorder, logscale, lims, colormaps, maskthreshes) where:\n * plotorder: maplayers keys in order of plotting.\n * logscale: list of logscale options from config corresponding to\n keys in plotorder (same order).\n * lims: list of colorbar limits from config corresponding to keys\n in plotorder (same order).\n * colormaps: list of colormaps from config corresponding to keys\n in plotorder (same order),\n * maskthreshes: list of mask thresholds from config corresponding\n to keys in plotorder (same order).\n\n \"\"\"\n #TODO:\n # - Add ability to interpret custom color maps.\n\n # get all key names, create a plotorder list in case maplayers is not an\n # ordered dict, making sure that anything called 'model' is first\n if keys is None:\n keys = list(maplayers.keys())\n plotorder = []\n\n configkeys = list(config.keys())\n\n try:\n limits = config[configkeys[0]]['display_options']['lims']\n lims = []\n except:\n lims = None\n limits = None\n\n try:\n colors = config[configkeys[0]]['display_options']['colors']\n colormaps = []\n except:\n colormaps = None\n colors = None\n\n try:\n logs = config[configkeys[0]]['display_options']['logscale']\n logscale = []\n except:\n logscale = False\n logs = None\n\n try:\n masks = config[configkeys[0]]['display_options']['maskthresholds']\n maskthreshes = []\n except:\n maskthreshes = None\n masks = None\n\n try:\n default = \\\n config[configkeys[0]]['display_options']['colors']['default']\n default = eval(default)\n except:\n default = None\n\n for i, key in enumerate(keys):\n plotorder += [key]\n if limits is not None:\n found = False\n for l in limits:\n getlim = None\n if l in key:\n if type(limits[l]) is list:\n getlim = np.array(limits[l]).astype(np.float)\n else:\n try:\n getlim = eval(limits[l])\n except:\n getlim = None\n lims.append(getlim)\n found = True\n if not found:\n lims.append(None)\n\n if colors is not None:\n found = False\n for c in colors:\n if c in key:\n getcol = colors[c]\n colorobject = eval(getcol)\n if colorobject is None:\n colorobject = default\n colormaps.append(colorobject)\n found = True\n if not found:\n colormaps.append(default)\n\n if logs is not None:\n found = False\n for g in logs:\n getlog = False\n if g in key:\n if logs[g].lower() == 'true':\n getlog = True\n logscale.append(getlog)\n found = True\n if not found:\n logscale.append(False)\n\n if masks is not None:\n found = False\n for m in masks:\n if m in key:\n getmask = eval(masks[m])\n maskthreshes.append(getmask)\n found = True\n if not found:\n maskthreshes.append(None)\n\n # Reorder everything so model is first, if it's not already\n if plotorder[0] != 'model':\n indx = [idx for idx, key in enumerate(plotorder) if key == 'model']\n if len(indx) == 1:\n indx = indx[0]\n firstpo = plotorder.pop(indx)\n plotorder = [firstpo] + plotorder\n firstlog = logscale.pop(indx)\n logscale = [firstlog] + logscale\n firstlim = lims.pop(indx)\n lims = [firstlim] + lims\n firstcol = colormaps.pop(indx)\n colormaps = [firstcol] + colormaps\n\n return plotorder, logscale, lims, colormaps, maskthreshes\n\n\ndef text_to_json(input1):\n \"\"\"Simplification of text_to_json from shakelib.rupture.factory\n\n Args:\n input1 (str): url or filepath to text file\n\n Returns:\n json formatted stream of input1\n \"\"\"\n if os.path.exists(input1):\n with open(input1, 'r') as f:\n lines = f.readlines()\n else:\n with urllib.request.urlopen(input1) as f:\n lines = f.readlines()\n\n x = []\n y = []\n z = []\n reference = ''\n # convert to geojson\n for line in lines:\n sline = line.strip()\n if sline.startswith('#'):\n reference += sline.strip('#').strip('Source: ')\n continue\n if sline.startswith('>'):\n if len(x): # start of new line segment\n x.append(np.nan)\n y.append(np.nan)\n z.append(np.nan)\n continue\n else: # start of file\n continue\n if not len(sline.strip()):\n continue\n parts = sline.split()\n\n y.append(float(parts[0]))\n x.append(float(parts[1]))\n if len(parts) >= 3:\n z.append(float(parts[2]))\n else:\n print('Fault file has no depths, assuming zero depth')\n z.append(0.0)\n coords = []\n poly = []\n for lon, lat, dep in zip(x, y, z):\n if np.isnan(lon):\n coords.append(poly)\n poly = []\n else:\n poly.append([lon, lat, dep])\n if poly != []:\n coords.append(poly)\n\n d = {\n \"type\": \"FeatureCollection\",\n \"metadata\": {\n 'reference': reference\n },\n \"features\": [\n {\n \"type\": \"Feature\",\n \"properties\": {\n \"rupture type\": \"rupture extent\"\n },\n \"geometry\": {\n \"type\": \"MultiPolygon\",\n \"coordinates\": [coords]\n }\n }\n ]\n }\n return json.dumps(d)\n\n\ndef write_floats(filename, grid2d):\n \"\"\"Create a binary (with acc. header file) version of a Grid2D object.\n\n Args:\n filename (str): String filename to write (i.e., 'probability.flt')\n grid2d (Grid2D): MapIO Grid2D object.\n\n Returns:\n Given a filename input of \"probability.flt\", this function will\n create that file, plus a text file called \"probability.hdr\".\n \"\"\"\n geodict = grid2d.getGeoDict().asDict()\n array = grid2d.getData().astype('float32')\n np.save(filename, array)\n npyfilename = filename + '.npy'\n os.rename(npyfilename, filename)\n fpath, fname = os.path.split(filename)\n fbase, _ = os.path.splitext(fname)\n hdrfile = os.path.join(fpath, fbase + '.hdr')\n f = open(hdrfile, 'wt')\n for key, value in geodict.items():\n if isinstance(value, int):\n fmt = '%s = %i\\n'\n elif isinstance(value, float):\n fmt = '%s = %.4f\\n'\n else:\n fmt = '%s = %s\\n'\n f.write(fmt % (key, value))\n f.close()\n\n\ndef savelayers(grids, filename):\n \"\"\"\n Save ground failure layers object as a MultiHazard HDF file, preserving\n metadata structures. All layers must have same geodictionary.\n\n Args:\n grids: Ground failure layers object.\n filename (str): Path to where you want to save this file.\n\n Returns:\n .hdf5 file containing ground failure layers\n \"\"\"\n layers = collections.OrderedDict()\n metadata = collections.OrderedDict()\n for key in list(grids.keys()):\n layers[key] = grids[key]['grid'].getData()\n metadata[key] = {\n 'description': grids[key]['description'],\n 'type': grids[key]['type'],\n 'label': grids[key]['label']\n }\n origin = {}\n header = {}\n mgrid = MultiHazardGrid(layers, grids[key]['grid'].getGeoDict(),\n origin,\n header,\n metadata=metadata)\n mgrid.save(filename)\n\n\ndef loadlayers(filename):\n \"\"\"\n Load a MultiHazard HDF file back in as a ground failure layers object in\n active memory (must have been saved for this purpose).\n Args:\n filename (str): Path to layers file (hdf5 extension).\n\n Returns:\n Ground failure layers object\n \"\"\"\n mgrid = MultiHazardGrid.load(filename)\n grids = collections.OrderedDict()\n for key in mgrid.getLayerNames():\n grids[key] = {\n 'grid': mgrid.getData()[key],\n 'description': mgrid.getMetadata()[key]['description'],\n 'type': mgrid.getMetadata()[key]['type'],\n 'label': mgrid.getMetadata()[key]['label']\n }\n\n return grids\n","sub_path":"gfail/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":16128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"411519782","text":"\"\"\"ROSARIO VALERO MIRANDA - 1º DAW - PRACTICA5 - EJERCICIO 7\r\nEscriu un programa que demani un nombre (límit) i després et demani nombres\r\nfins a què la suma dels nombres introduits superi un nombre inicial.\r\nEl programa termina escribint la llista de nombres\"\"\"\r\n\r\nprint(\"Introduce el número limite\")\r\nnum=int(input())\r\n\r\nprint(\"Introduce un valor\")\r\nnum1=int(input())\r\n\r\nnum2=num1\r\nlista=[num1]\r\n\r\nwhile num>num2:\r\n print(\"Introduce otro valor\")\r\n num1=int(input())\r\n num2=num2+num1\r\n lista.append(num1)\r\n\r\nlista.pop() \r\nprint(\"El limite a superar es\", num, \".La lista creada és\", lista)\r\n\r\n","sub_path":"Ejercicios-Pr5/ejercicio7.py","file_name":"ejercicio7.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"42357694","text":"import cv2\nimport os\n\n# 이미지 찾기\n\n# 이미지 불러오기\nimg = cv2.imread(\"../img/suwon.jpg\")\n\n\n\n# gray함수로 변환\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# blur 처리\ngray = cv2.GaussianBlur(gray,(5,5),0)\n\nmser = cv2.MSER_create()\nregions,_ = mser.detectRegions(gray)\n\nclone = img.copy()\n\nhulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]\n\n# 사각형안에 다른사각형 지워주기\nremove1 = []\nfor i,c1 in enumerate(hulls):\n\n x, y, w, h = cv2.boundingRect(c1)\n r1_start = (x,y)\n r1_end = (x+w, y+h)\n\n for j,c2 in enumerate(hulls):\n\n if i == j:\n continue\n\n x, y, w, h = cv2.boundingRect(c2)\n r2_start = (x,y)\n r2_end = (x+w, y+h)\n\n # 큰사각형안에 작은사각형이 있는지\n if r1_start[0] >= r2_start[0] and r1_start[1] >= r2_start[1] and r1_end[0] <= r2_end[0] and r1_end[1] <= r2_end[1]:\n remove1.append(i)\n\n#count = 0\nfor j,cnt in enumerate(hulls):\n if j in remove1: continue\n x, y, w, h = cv2.boundingRect(cnt)\n margin = 10\n cv2.rectangle(clone, (x-margin, y-margin), (x + w + margin, y + h + margin), (0, 255, 0), 1)\n# count = count+1\n\n#print(count)\n\n\ncv2.imshow('mser', clone)\ncv2.waitKey(0)","sub_path":"crawling_translate/opencv.py","file_name":"opencv.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"142569851","text":"import VocConstants\nimport torch\n\n\ndef read_instances_from_file(inst_file, max_sent_len, keep_case):\n ''' Convert file into word seq lists and vocab '''\n\n word_insts = []\n trimmed_sent_count = 0\n with open(inst_file) as f:\n for sent in f:\n if not keep_case:\n sent = sent.lower()\n words = sent.split()\n if len(words) > max_sent_len:\n trimmed_sent_count += 1\n word_inst = words[:max_sent_len]\n\n if word_inst:\n word_insts += [[VocConstants.BOS_WORD] + word_inst + [VocConstants.EOS_WORD]]\n else:\n word_insts += [None]\n\n print('[Info] Get {} instances from {}'.format(len(word_insts), inst_file))\n\n if trimmed_sent_count > 0:\n print('[Warning] {} instances are trimmed to the max sentence length {}.'\n .format(trimmed_sent_count, max_sent_len))\n\n return word_insts\n\n\ndef build_vocab_idx(word_insts, min_word_count):\n ''' Trim vocab by number of occurence '''\n\n full_vocab = set(w for sent in word_insts for w in sent)\n print('[Info] Original Vocabulary size =', len(full_vocab))\n\n word2idx = {\n VocConstants.BOS_WORD: VocConstants.BOS,\n VocConstants.EOS_WORD: VocConstants.EOS,\n VocConstants.PAD_WORD: VocConstants.PAD,\n VocConstants.UNK_WORD: VocConstants.UNK}\n\n word_count = {w: 0 for w in full_vocab}\n\n for sent in word_insts:\n for word in sent:\n word_count[word] += 1\n\n ignored_word_count = 0\n for word, count in word_count.items():\n if word not in word2idx:\n if count > min_word_count:\n word2idx[word] = len(word2idx)\n else:\n ignored_word_count += 1\n\n print('[Info] Trimmed vocabulary size = {},'.format(len(word2idx)),\n 'each with minimum occurrence = {}'.format(min_word_count))\n print(\"[Info] Ignored word count = {}\".format(ignored_word_count))\n return word2idx\n\n\ndef convert_instance_to_idx_seq(word_insts, word2idx):\n ''' Mapping words to idx sequence. '''\n return [[word2idx.get(w, VocConstants.UNK) for w in s] for s in word_insts]\n\n\n# src = read_instances_from_file('./data/training/train.en', 10, keep_case=False)\n# tgt = read_instances_from_file('./data/training/train.de', 10, keep_case=False)\n# src, tgt = list(zip(*[(s, t) for s, t in zip(src, tgt) if s and t]))\n#\n# word2idx = build_vocab_idx(src, 0)\n# idx_seq = convert_instance_to_idx_seq(src, word2idx)\n#\n# print(idx_seq)\n\n\ndef main():\n max_token_seq_len = VocConstants.MAX_WORD_SEQ_LEN\n\n train_src_word_insts = read_instances_from_file(VocConstants.training_en, max_token_seq_len, False)\n train_tgt_word_insts = read_instances_from_file(VocConstants.training_de, max_token_seq_len, False)\n\n valid_src_word_insts = read_instances_from_file(VocConstants.validation_en, max_token_seq_len, False)\n valid_tgt_word_insts = read_instances_from_file(VocConstants.validation_de, max_token_seq_len, False)\n\n # test_src_word_insts = read_instances_from_file(VocConstants.test_en, max_token_seq_len, False)\n # test_tgt_word_insts = read_instances_from_file(VocConstants.test_de, max_token_seq_len, False)\n\n if len(train_src_word_insts) != len(train_tgt_word_insts):\n print('[Warning] The training set count is not equal.')\n minlen = min(len(train_src_word_insts), len(train_tgt_word_insts))\n train_tgt_word_insts = train_tgt_word_insts[:minlen]\n train_src_word_insts = train_src_word_insts[:minlen]\n\n train_src_word_insts, train_tgt_word_insts = list(\n zip(*[(s, t) for s, t in zip(train_src_word_insts, train_tgt_word_insts) if s and t]))\n\n if len(valid_src_word_insts) != len(valid_tgt_word_insts):\n print('[Warning] The validation set count is not equal.')\n minlen = min(len(valid_src_word_insts), len(valid_tgt_word_insts))\n valid_src_word_insts = valid_src_word_insts[:minlen]\n valid_tgt_word_insts = valid_tgt_word_insts[:minlen]\n\n valid_src_word_insts, valid_tgt_word_insts = list(\n zip(*[(s, t) for s, t in zip(valid_src_word_insts, valid_tgt_word_insts) if s and t]))\n\n # if len(test_src_word_insts) != len(test_tgt_word_insts):\n # print('[Warning] The training set count is not equal.')\n # minlen = min(len(test_src_word_insts), len(test_tgt_word_insts))\n # test_src_word_insts = test_src_word_insts[:minlen]\n # test_tgt_word_insts = test_tgt_word_insts[:minlen]\n #\n # test_src_word_insts, test_tgt_word_insts = list(\n # zip(*[(s, t) for s, t in zip(test_src_word_insts, test_tgt_word_insts) if s and t]))\n\n print('[Info] Build vocabulary for source.')\n src_word2idx = build_vocab_idx(train_src_word_insts,VocConstants.MIN_WORD_COUNT)\n\n print('[Info] Build vocabulary for target.')\n tgt_word2idx = build_vocab_idx(train_tgt_word_insts,VocConstants.MIN_WORD_COUNT)\n\n print('[Info] Convert source instances to index sequence.')\n train_src_insts = convert_instance_to_idx_seq(train_src_word_insts,src_word2idx)\n valid_src_insts = convert_instance_to_idx_seq(valid_src_word_insts,src_word2idx)\n\n print('[Info] Convert target instances to index sequence.')\n train_tgt_insts = convert_instance_to_idx_seq(train_tgt_word_insts,tgt_word2idx)\n valid_tgt_insts = convert_instance_to_idx_seq(valid_tgt_word_insts,tgt_word2idx)\n\n data = {\n 'dict':\n {'src': src_word2idx,\n 'tgt': tgt_word2idx},\n 'train':\n {'src': train_src_insts,\n 'tgt': train_tgt_insts},\n 'valid':\n {'src': valid_src_insts,\n 'tgt': valid_tgt_insts}\n }\n print('[Info] Dumping the processed data to pickle file ./data/data.chkpt')\n torch.save(data,'./data/data.chkpt')\n print('[Info] Finish.')\n\nif __name__ == '__main__':\n main()\n\n\n\n\n","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":5868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"187411212","text":"inFile = open(\"in2.txt\")\r\noutFile = open(\"out2.txt\", 'w')\r\n\r\nnumCases = int(inFile.readline())\r\nfor i in range(1, numCases+1):\r\n line = inFile.readline().split()\r\n \r\n points = [0]*int(line[0])\r\n s = int(line[1])\r\n p = int(line[2])\r\n for n in range(len(points)):\r\n points[n] = int(line[n+3])\r\n \r\n numNormal = 0\r\n numSurprising = 0 \r\n \r\n for g in points:\r\n isNormal = False\r\n if (p-1) < 0:\r\n if p <= g:\r\n numNormal += 1\r\n isNormal = True\r\n else:\r\n if ((3*p)-2) <= g:\r\n numNormal += 1\r\n isNormal = True\r\n \r\n if not isNormal:\r\n if (p-2) < 0:\r\n if p <= g:\r\n numSurprising += 1\r\n else:\r\n if ((3*p)-4) <= g:\r\n numSurprising += 1\r\n \r\n if numSurprising > s:\r\n numSurprising = s\r\n \r\n outFile.write(\"Case #\" + str(i) + \": \" + str(numNormal + numSurprising) + '\\n')","sub_path":"solutions_1595491_0/Python/stephsolis/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"271794071","text":"\"\"\"init file for restul api\"\"\"\nfrom fastapi import FastAPI\n\nfrom starlette.staticfiles import StaticFiles\n\nfrom app.adapters import database, redis\nfrom app.common import logger, settings\nfrom app.common.metrics import Metrics\n\nfrom app.api.rest import middlewares\n\n\ndef init_services(app):\n\n @app.on_event(\"startup\")\n async def init_db_conn_pool():\n app.db_conns = database.PoolConnections(\n write_dsn=settings.WRITE_DB_DSN,\n read_dsn=settings.READ_DB_DSN,\n min_pool_size=settings.DB_MIN_POOL_SIZE,\n max_pool_size=settings.DB_MAX_POOL_SIZE\n )\n await app.db_conns.connect()\n\n @app.on_event(\"startup\")\n async def init_redis_conn_pool():\n app.redis_conns = redis.RedisPoolConnections(\n settings.REDIS_HOST,\n settings.REDIS_PORT,\n min_pool_size=settings.REDIS_MIN_POOL_SIZE,\n max_pool_size=settings.REDIS_MAX_POOL_SIZE\n )\n await app.redis_conns.connect()\n \n @app.on_event(\"shutdown\")\n async def close_db_conn_pool():\n await app.db_conns.close()\n\n \n @app.on_event(\"shutdown\")\n async def close_redis_conn_pool():\n await app.redis_conns.close()\n await app.redis_conns.wait_closed()\n \n\ndef init_middleware(app):\n app.add_middleware(middlewares.DBSessionMiddleware)\n app.add_middleware(middlewares.RedisSessionMiddleware)\n app.add_middleware(middlewares.RestContextMiddleware)\n app.add_middleware(middlewares.LogRequestMiddleWare)\n app.add_middleware(middlewares.MetricsMiddleware,\n metrics=Metrics('restapi', settings.APP_ENV))\n\n\ndef init_routes(app):\n if settings.APP_ENV == 'local' or settings.UNIT_TEST:\n logger.info('Serving test coverage reports at /tests')\n app.mount('/tests', app=StaticFiles(\n directory='/srv/root/tests/htmlcov/',\n html=True,\n check_dir=False\n ))\n\n from . import v1\n app.include_router(v1.router, prefix=\"/v1\")\n\n from . import monitoring\n app.include_router(monitoring.router)\n\n\ndef bootstrap():\n logger.warning(\"rest api starting\")\n api = FastAPI(\n title=\"Tipstoday API\",\n description=\"\",\n docs_url=None,\n redoc_url=\"/docs\"\n )\n\n init_services(api)\n init_middleware(api)\n init_routes(api)\n\n return api\n\n\n\n\n","sub_path":"main-service-master/mount/app/api/rest/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"527975868","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\n\n# fix sys path so we don't need to setup PYTHONPATH\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\", \"..\"))\nos.environ['DJANGO_SETTINGS_MODULE'] = 'userena.runtests.settings'\n\nimport django\n\nif django.VERSION >= (1, 7, 0):\n # starting from 1.7.0 we need to run setup() in order to populate\n # app config\n django.setup()\n\nfrom django.conf import settings\nfrom django.test.utils import get_runner\n\n\ndef usage():\n return \"\"\"\n Usage: python runtests.py [UnitTestClass].[method]\n\n You can pass the Class name of the `UnitTestClass` you want to test.\n\n Append a method name if you only want to test a specific method of that\n class.\n \"\"\"\n\n\ndef main():\n TestRunner = get_runner(settings)\n test_runner = TestRunner(verbosity=2, failfast=False)\n\n if len(sys.argv) > 1:\n test_modules = sys.argv[1:]\n elif len(sys.argv) == 1:\n test_modules = []\n else:\n print(usage())\n sys.exit(1)\n\n if (1, 6, 0) <= django.VERSION < (1, 9, 0):\n # this is a compat hack because in django>=1.6.0 you must provide\n # module like \"userena.contrib.umessages\" not \"umessages\"\n from django.db.models import get_app\n test_modules = [\n # be more strict by adding .tests to not run umessages tests twice\n # if both userena and umessages are tested\n get_app(module_name).__name__[:-7] + \".tests\"\n for module_name\n in test_modules\n ]\n elif django.VERSION >= (1, 9, 0):\n from django.apps import apps\n test_modules = [\n # be more strict by adding .tests to not run umessages tests twice\n # if both userena and umessages are tested\n apps.get_app_config(module_name).name + \".tests\"\n for module_name\n in test_modules\n ]\n\n if django.VERSION < (1, 7, 0):\n # starting from 1.7.0 built in django migrations are run\n # for older releases this patch is required to enable testing with\n # migrations\n from south.management.commands import patch_for_test_db_setup\n patch_for_test_db_setup()\n\n failures = test_runner.run_tests(test_modules or ['userena'])\n sys.exit(failures)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"userena/runtests/runtests.py","file_name":"runtests.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"410784905","text":"\"\"\"\nAuthor : Lily\nDate : 2018-09-20\nQQ : 339600718\nPULL&BEAR PULL&BEAR PullBear-s\n抓取思路:所有数据都在一个主页面上,解析页面上的数据即可。\n注意: 所有的数据都在p标签里。\nURL:https://mp.weixin.qq.com/s/aj7UPr9l9qnnhB_mDTHUIw\n\n注意\n\"\"\"\nimport re\nimport datetime\nimport requests\nfrom lxml import etree\n\n\nfilename = \"PullBear-s\" + re.sub('[^0-9]', '', str(datetime.datetime.now())) + \".csv\"\nf = open(filename, 'w', encoding='utf-8')\nf.write('name,address,phone,\\n')\nurl = \"https://mp.weixin.qq.com/s/aj7UPr9l9qnnhB_mDTHUIw\"\nhtml = requests.get(url).text\nhtml_lxml = etree.HTML(html)\naddresss = html_lxml.xpath('//*[@id=\"js_content\"]/section/section/section/section/p//text()')\n\nprint(addresss)\nprint(len(addresss))\nfor i in range(len(addresss)):\n\n if \"地址\" in addresss[i]:\n name = addresss[i-1]\n address = addresss[i].replace('地址:', '')\n if \"电话:\"== addresss[i+1]:\n phone = addresss[i + 2]\n print(addresss[i + 2])\n else:\n phone = addresss[i+1].replace('电话:', '')\n\n f.write(name + \",\" + address + \",\" + phone + \"\\n\")\n\n","sub_path":"lily/Brands_capture/PullBear-s/PullBear-s.py","file_name":"PullBear-s.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"86410963","text":"# pip install coolsms-python-sdk==2.0.3\nimport sys\nfrom sdk.api.message import Message\nfrom sdk.exceptions import CoolsmsException\nimport pandas as pd\n\ndef sendMessage(btn_name):\n # api_key, secret설정\n api_key = \"NCSSTF3LTXVTVZAE\"\n api_secret = \"RPNJEHR1MDK5EWOQUD7XVFDGX89EGJHP\"\n\n # 친구목록 중 번호를 불러와서 tolist(), 리스트에 있는 모든 번호에 문자\n friend = pd.read_csv('./file/friend.csv')\n f = friend['phone'].tolist()\n\n\n # 버튼에 따른 메시지 설정\n msg = '.'\n\n if btn_name == 'light_on_btn':\n msg = \"불 켜주세요\"\n\n elif btn_name == 'light_off_btn':\n msg = \"불 꺼주세요\"\n\n elif btn_name == 'cold_btn':\n msg = \"추워요\"\n\n elif btn_name == 'hot_btn':\n msg = \"더워요\"\n\n elif btn_name == 'big_btn':\n msg = \"대변 하고싶어요\"\n\n elif btn_name == 'small_btn':\n msg = \"소변 하고싶어요\"\n\n elif btn_name == 'water_btn':\n msg = \"물 주세요\"\n\n elif btn_name == 'out_btn':\n msg = \"나가고 싶어요\"\n\n elif btn_name == 'pose_btn':\n msg = '자세가 불편해요'\n\n elif btn_name == 'emergency_btn':\n msg = '비상호출'\n\n elif btn_name == 'hungry_btn':\n msg = \"배고파요\"\n\n elif btn_name == 'full_btn':\n msg = \"배불러요\"\n\n\n for i in f: # 친구 목록을 읽는 form 문 ~ 82 line\n params = dict()\n params['type'] = 'sms' # Message type ( sms, lms, mms, ata )\n phone = i.replace('-','') # 010-0000-0000 --> 01000000000\n params['to'] = phone # Recipients Number '01000000000,01000000001'\n print(phone)\n params['from'] = '01025914739' # Sender number - 계정에 등록된 번호\n params['text'] = msg # Message\n\n cool = Message(api_key, api_secret)\n\n #send 외엔 성공, 실패 여부 체크 없어도됨\n try:\n response = cool.send(params)\n print(\"Success Count : %s\" % response['success_count'])\n print(\"Error Count : %s\" % response['error_count'])\n print(\"Group ID : %s\" % response['group_id'])\n if \"error_list\" in response:\n print(\"Error List : %s\" % response['error_list'])\n except CoolsmsException as e:\n print(\"Error Code : %s\" % e.code)\n print(\"Error Message : %s\" % e.msg)\n","sub_path":"final/SendMessage.py","file_name":"SendMessage.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"282034823","text":"##########################################################################\n#\n# pgAdmin 4 - PostgreSQL Tools\n#\n# Copyright (C) 2013 - 2020, The pgAdmin Development Team\n# This software is released under the PostgreSQL Licence\n#\n##########################################################################\n\nimport os\nfrom flask import current_app\n\n\ndef _create_directory_if_not_exists(_path):\n if _path and not os.path.exists(_path):\n os.mkdir(_path)\n\n\ndef create_app_data_directory(config):\n \"\"\"\n Create the required directories (if not present).\n \"\"\"\n # Create the directory containing the configuration file (if not present).\n _create_directory_if_not_exists(os.path.dirname(config.SQLITE_PATH))\n # Try to set the permissions on the directory, but don't complain\n # if we can't. This may be the case on a mounted directory, e.g. in\n # OpenShift. We'll still secure the config database anyway.\n if os.name != 'nt':\n try:\n os.chmod(os.path.dirname(config.SQLITE_PATH), 0o700)\n except Exception as e:\n # The flask app isn't setup yet, so we can't use the logger\n print('WARNING: Failed to set ACL on the directory containing the '\n 'configuration database: {}'.format(e))\n\n # Create the directory containing the log file (if not present).\n _create_directory_if_not_exists(os.path.dirname(config.LOG_FILE))\n\n # Create the session directory (if not present).\n _create_directory_if_not_exists(config.SESSION_DB_PATH)\n if os.name != 'nt':\n os.chmod(config.SESSION_DB_PATH, 0o700)\n\n # Create the storage directory (if not present).\n _create_directory_if_not_exists(config.STORAGE_DIR)\n","sub_path":"web/pgadmin/setup/data_directory.py","file_name":"data_directory.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"225729893","text":"# Practical 5\n# Galaxy Data Structures\ndata = {}\ngMap = []\nfor elements_in_row in range(5):\n times = 5\n gMap += [[0] * 5]\n\n\ndef recruit(name, coords):\n row = gMap[coords[0]] # returns a list containing the row from the map\n if row[coords[1]] == \"E\": # avoiding the case of overwriting names\n overwritten_name = list(gMap.keys())[list(gMap.values()).index(coords)][0]\n # https://stackoverflow.com/questions/8023306/get-key-by-value-in-dictionary\n # Basically, it separates the dictionary's values in a list,\n # finds the position of the value you have, and gets the key at that position\n del data[overwritten_name]\n print(f\"{overwritten_name} was removed from the galaxy, because a new member was added on these coordinates!\")\n row[coords[1]] = \"E\" # updates the element on a specific position\n data.update({name: coords}) # updating the dictionary with data (the same as data[name] = coords)\n\n\ndef retire(name):\n if name not in data:\n print(f\"Sorry {name} was not found in the database!\")\n else:\n coords = data[name]\n row = gMap[coords[0]] # returns a list containing the row from the map\n row[coords[1]] = 0 # updates the element on a specific position\n del data[name]\n\n\ndef printMap(map):\n for row in map:\n for g in row:\n print(g, end=\" \")\n print()\n\n\ndef ships(data):\n print(\"%d Ships.\" % len(data))\n\n\ndef crew(data):\n if data:\n print(f\"Crew: {', '.join([name for name in data.keys()])}\")\n else:\n print(\"We don't have any crew members yet\")\n\n\ndef crew_coords(data):\n if data:\n for x in data:\n print(\"Name: %s\" % x)\n coords = data[x]\n coords = [x+1 for x in coords]\n print(\"Coords: %s\\n\" % coords)\n else:\n print(\"We don't have any crew members yet\")\n\n\n# resetting the map to its default values and clearing all of the arrays\ndef resetting(map):\n for lists in range(len(map)):\n for elements_in_rows in range(len(map[lists])):\n map[lists][elements_in_rows] = 0\n # print()\n # printMap(gMap)\n data.clear()\n # print(data) to show it works\n printMap(gMap)\n\nprint('Here is the map of the galaxy:')\nprintMap(gMap)\n\nquit = False\nwhile not quit:\n ans = input(\"What do you want to do? 1.Recruit 2.Retire 3.PrintMap 4.PrintShips 5.PrintCrew 6.PrintCrewCoords 7.ResetTheMap 8.Quit (1, 2, 3, 4, 5, 6, 7 or 8): \")\n\n if ans == \"1\":\n name = input(\"Who do you want to recruit? \")\n coords = input(\"Where do you want them given in co-ords e.g. '1 1' or '5 5': \")\n try:\n coords = [int(x) for x in coords.split()] # coords.split() works the same\n x, y = int(coords[0]), int(coords[1])\n coords = (x-1, y-1) # creating a tuple for user's coords\n recruit(name, coords)\n except:\n print('Please enter valid coordinates.')\n elif ans == \"2\":\n name = input(\"Who do you want to retire? \")\n retire(name)\n elif ans == \"3\":\n printMap(gMap)\n elif ans == \"4\":\n ships(data)\n elif ans == \"5\":\n crew_coords(data)\n elif ans == \"6\":\n crew(data)\n elif ans == \"7\":\n resetting(gMap)\n elif ans == \"8\":\n print(\"Quitting Program.......\")\n quit = True\n else:\n print(\"Command not understood!\")\n\n print()\n","sub_path":"Python/Python Fundamentals/5.Data Structures and Built-In Functions/4.1Marked Exercise (Battleship).py","file_name":"4.1Marked Exercise (Battleship).py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"553411995","text":"# -*- coding: utf-8 -*-\n\"\"\"For updating + syncing the interwiki cache.\"\"\"\nimport errno\nimport os\nimport subprocess\n\nimport scap.cli as cli\nimport scap.lint as lint\nimport scap.main as main\nimport scap.utils as utils\n\n\n@cli.command('update-interwiki-cache')\nclass UpdateInterwikiCache(main.SyncFile):\n \"\"\"Scap sub-command to update and sync the interwiki cache.\"\"\"\n\n @cli.argument('--force', action='store_true', help='Skip canary checks')\n def main(self, *extra_args):\n \"\"\"Update the latest interwiki cache.\"\"\"\n self.arguments.message = 'Updating interwiki cache'\n self.arguments.file = os.path.join('wmf-config', 'interwiki.php')\n return super(UpdateInterwikiCache, self).main(*extra_args)\n\n def _before_cluster_sync(self):\n interwikifile = os.path.join(\n self.config['stage_dir'], self.arguments.file)\n if not os.path.exists(interwikifile):\n raise IOError(\n errno.ENOENT, 'File/directory not found', interwikifile)\n\n relpath = os.path.relpath(interwikifile, self.config['stage_dir'])\n self.include = relpath\n\n with open(interwikifile, 'w') as outfile:\n subprocess.check_call(\n ['/usr/local/bin/mwscript',\n 'extensions/WikimediaMaintenance/dumpInterwiki.php'],\n stdout=outfile\n )\n\n # This shouldn't happen, but let's be safe\n lint.check_valid_syntax(interwikifile)\n\n subprocess.check_call(['/usr/bin/git', 'add', interwikifile])\n subprocess.check_call(['/usr/bin/git', 'commit', '-q', '-m',\n self.arguments.message])\n\n subprocess.check_call(['/usr/bin/git', 'push', '-q', 'origin',\n 'HEAD:refs/for/master%l=Code-Review+2'])\n\n if not utils.confirm('Has your change merged yet?'):\n subprocess.check_call(['/usr/bin/git', 'reset', '--hard',\n 'origin/master'])\n raise RuntimeError('Aborting, you should not sync unmerged code')\n\n subprocess.check_call(['/usr/bin/git', 'pull', '-q'])\n","sub_path":"scap/plugins/updateinterwikicache.py","file_name":"updateinterwikicache.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"290458287","text":"# First Missing Positive\n\n# Given an unsorted integer array, find the smallest missing positive integer.\n\n# Example 1:\n# Input: [1,2,0]\n# Output: 3\n\n# Example 2:\n# Input: [3,4,-1,1]\n# Output: 2\n\n# Example 3:\n# Input: [7,8,9,11,12]\n# Output: 1\n\n# Note:\n# Your algorithm should run in O(n) time and uses constant extra space.\n\nclass Solution(object):\n def firstMissingPositive(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # 将值为i的数放在下标i-1的位置\n for i in range(len(nums)):\n while 0 < nums[i] <= len(nums) and nums[i] != nums[nums[i] - 1]:\n j = nums[i] - 1\n nums[i], nums[j] = nums[j], nums[i]\n\n for i in range(len(nums)):\n if nums[i] != i + 1:\n return i + 1\n return len(nums) + 1\n","sub_path":"41.py","file_name":"41.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"651176340","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, unicode_literals, print_function\nfrom urllib import request,parse\nfrom bs4 import BeautifulSoup\nimport copy\nimport random\nimport os\n\ndef AddAgent():\n # User-Agent 列表\n useragent_list = [\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\",\n \"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5\",\n \"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\"\n ]\n # 随机选择一个User-Agent字符串\n # random.choice()这个方法可以从列表中随机的取出一个元素\n return random.choice(useragent_list)\n\ndef Mkdir(path):\n \"\"\"创建目录\n\n Parameters\n ----------\n path : str or unicode\n 路径\n Returns\n -------\n success: True \n failed : False\n \"\"\"\n # 去除首位空格\n path=path.strip()\n # 去除尾部 \\ 符号\n path=path.rstrip(\"\\\\\")\n \n # 判断路径是否存在\n # 存在 True\n # 不存在 False\n isExists=os.path.exists(path)\n \n # 判断结果\n if not isExists:\n # 如果不存在则创建目录\n # 创建目录操作函数\n os.makedirs(path) \n \n print(path+'创建成功')\n return True\n else:\n # 如果目录存在则不创建,并提示目录已存在\n print(path+'目录已存在')\n return False \n\n\n\nclass BaseRequest(object):\n\n @staticmethod\n def GetUrlContent(url):\n \"\"\"获取当前网页的内容\n\n Parameters\n ----------\n url : str or unicode\n 需要下载的地址\n Returns\n -------\n success: str \n 当前网页的内容\n failed : None\n \"\"\"\n user_agent = AddAgent()\n try:\n # 构造请求对象\n req = request.Request(url)\n # 添加一个请求报头,添加的时候是User-Agent\n req.add_header(\"User-Agent\", user_agent)\n\n # 发送请求,返回服务器响应\n reseponse = request.urlopen(req)\n except Exception as e:\n print(e)\n return None \n else:\n if reseponse is None:\n print('没有找到网页')\n return None\n\n # 返回响应的html内容\n return reseponse.read()\n\n\n @staticmethod\n def GetUrlSoup(url):\n \"\"\"获取当前网页beautifulsoup解析后的内容\n\n Parameters\n ----------\n url : str or unicode\n 需要下载的地址\n Returns\n -------\n success: bs4 \n 当前网页的内容\n failed : None\n \"\"\"\n user_agent = AddAgent()\n try:\n # 构造请求对象\n req = request.Request(url)\n # 添加一个请求报头,添加的时候是User-Agent\n req.add_header(\"User-Agent\", user_agent)\n\n # 发送请求,返回服务器响应\n reseponse = request.urlopen(req)\n except Exception as e:\n print(e)\n return None \n else:\n if reseponse is None:\n print('没有找到网页')\n return None\n\n\n try:\n #BeautifulSoup解析网页\n soup = BeautifulSoup(reseponse,\"html.parser\")\n except AttributeError as e:\n print(\"BeautifulSoup parser failed!\")\n return None \n\n # 返回解析后的网页内容\n return soup\n\n @staticmethod\n def DownloadData(download_url, download_path, file_name):\n \"\"\"下载数据\n\n Parameters\n ----------\n download_url : str or unicode\n 需要下载的地址\n download_path:str or unicode\n 需要存储的路径\n Returns\n -------\n success: True \n failed : False\n \"\"\"\n\n if not os.path.exists(download_path):\n if not Mkdir(download_path):\n print(\"path is exists\")\n\n path = download_path + file_name\n if os.path.isfile(path):\n print(\"%s is exists\"%path)\n return True\n\n try:\n request.urlretrieve(download_url, path)\n except Exception as e:\n print(e)\n return False\n\n return True","sub_path":"spider/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"339961860","text":"import webapp2\nfrom google.appengine.api import users\nfrom google.appengine.ext.webapp import template\n\n\nimport db\n\n\nclass SaveHandler(webapp2.RequestHandler):\n def post(self):\n config = self.request.get('config')\n data = self.request.get('data')\n \n chart = db.StoreChart(users.get_current_user(),\n config,\n data)\n\n self.response.out.write('id = %s
chart.data_sha224 = %s
config = %s\\n' % (\n chart.key().id(), chart.data_sha224, chart.options))\n\n\nclass ViewHandler(webapp2.RequestHandler):\n def get(self):\n id = self.request.get('id')\n chart = db.LoadChart(int(id))\n\n self.response.out.write('id = %s
chart.data_sha224 = %s
config = %s\\n' % (\n chart.key().id(), chart.data_sha224, chart.options))\n\n\nclass AdminListHandler(webapp2.RequestHandler):\n def get(self):\n template_values = {\n 'keys' : db.ListChartKeys()\n }\n self.response.out.write(template.render('templates/admin-list.html', template_values))\n\nclass FooHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n self.response.out.write('You are: %s\\n' % user)\n\n\napp = webapp2.WSGIApplication([\n ('/save', SaveHandler),\n ('/view', ViewHandler),\n ('/foo', FooHandler),\n ('/admin-list', AdminListHandler)\n], debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"380069632","text":"#!/bin/env python3\nfrom switchblade import Switchblade\n\nimport string\nimport random\n\nimport socket\nimport inspect\n\nimport base64\n\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.PublicKey import RSA\n\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\nimport hashlib\n\ndef get_socket_wrapper(args, cryptors=None):\n return lambda sock, _args=args, cryptors=cryptors: wrap_socket(sock, _args, cryptors=cryptors)()\n\ndef wrap_socket(sock, _args, cryptors=None):\n if _args.verbosity > 1:\n print(\"Wrapping socket: {}\".format(sock))\n class CryptSocket(object):\n def __init__(self, *args, **kwargs):\n if inspect.isclass(sock):\n self._cryptsock = sock(*args, **kwargs)\n else:\n self._cryptsock = sock\n\n self.args = _args\n self.cryptors = cryptors\n self._first_contact = False\n if self.args.verbosity > 2:\n print(\"Internal socket: {}\".format(self._cryptsock))\n\n def __getattribute__(self, name):\n try: \n x = super(CryptSocket,self).__getattribute__(name)\n except AttributeError as e: \n pass\n else:\n return x\n x = self._cryptsock.__getattribute__(name)\n if x.__name__ == \"send\":\n return self._send_encrypted\n elif x.__name__ == \"recv\":\n return self._recv_encrypted\n else:\n return x\n #if type(x) == type(self.__init__): # it is an instance method\n\n def _send_encrypted(self, _bytes, *args, **kwargs):\n if self.args.verbosity > 2:\n print(\"Encrypting: {}\".format(_bytes))\n if not self._first_contact:\n self._first_contact = True\n for c in self.cryptors:\n c.handshake(self._cryptsock)\n for c in self.cryptors:\n if self.args.verbosity > 2:\n print(\"Using {} cryptor\".format(type(c).__name__))\n _bytes = c.encrypt(_bytes)\n return self._cryptsock.send(_bytes, *args, **kwargs)\n\n def _recv_encrypted(self, *args, **kwargs):\n _bytes = self._cryptsock.recv(*args, **kwargs)\n if self.args.verbosity > 2:\n print(\"Decrypting: {}\".format(_bytes))\n if not self._first_contact:\n self._first_contact = True\n for c in self.cryptors[::-1]:\n c.handshake(self._cryptsock) \n \n for c in self.cryptors[::-1]:\n if self.args.verbosity > 2:\n print(\"Using {} cryptor\".format(type(c).__name__))\n _bytes = c.decrypt(_bytes)\n return _bytes\n\n return CryptSocket\n\n \nclass Cryptblade(Switchblade):\n def print_banner(self):\n self.print_local(\"\"\"\n\n*********************************************************************************************\n* *\n* ██████╗██████╗ ██╗ ██╗██████╗ ████████╗██████╗ ██╗ █████╗ ██████╗ ███████╗ *\n* ██╔════╝██╔══██╗╚██╗ ██╔╝██╔══██╗╚══██╔══╝██╔══██╗██║ ██╔══██╗██╔══██╗██╔════╝ *\n* ██║ ██████╔╝ ╚████╔╝ ██████╔╝ ██║ ██████╔╝██║ ███████║██║ ██║█████╗ *\n* ██║ ██╔══██╗ ╚██╔╝ ██╔═══╝ ██║ ██╔══██╗██║ ██╔══██║██║ ██║██╔══╝ *\n* ╚██████╗██║ ██║ ██║ ██║ ██║ ██████╔╝███████╗██║ ██║██████╔╝███████╗ *\n* ╚═════╝╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝╚═════╝ ╚══════╝ *\n* *\n*********************************************************************************************\n \n\"\"\")\n\n @staticmethod\n def rand_key(size=32, use_digits=True):\n digits = \"\"\n if use_digits:\n digits = string.digits \n return ''.join(random.choices(string.ascii_letters + digits, k=size))\n\n @staticmethod\n def get_parser():\n parser = Switchblade.get_parser()\n # TODO: consider adding mutliple keys\n parser.add_argument('--encrypt', help=\"Encrypt / decrypt communications using this algorithm. Must be one of: ['rc4', 'b64', 'b64_url', 'aes'].\\n\"+\n \"You can pass multiple options to add layers of encryption / encoding\", nargs=\"+\")\n parser.add_argument('-k', '--key', help=\"Encrypt communications using this key / keyfile (depending on the algoritm). Use with -e (--encrypt).\")\n parser.add_argument('--server_key', help=\"Encrypt communications using this key / keyfile (depending on the algoritm). Use with -e (--encrypt).\")\n parser.add_argument('--client_key', help=\"Encrypt communications using this key / keyfile (depending on the algoritm). Use with -e (--encrypt).\")\n\n return parser\n\n\n######################################################################################################\nclass CryptModule:\n def __init__(self, local_key=None, remote_key=None, state=None):\n self.local_key = local_key\n self.remote_key = remote_key\n self.state = state\n\n def encrypt(self, msg):\n return msg\n\n def decrypt(self, msg):\n return msg\n\n # If a handshake, key exchange is needed before communication can\n # begin, implement it here\n def handshake(self, sock):\n pass\n\nclass B64Encode(CryptModule):\n def __init__(self, *args, urlsafe=False, **kwargs):\n super().__init__(*args, **kwargs)\n\n if urlsafe:\n self._encode = base64.urlsafe_b64encode\n self._decode = base64.urlsafe_b64decode\n else:\n self._encode = base64.b64encode\n self._decode = base64.b64decode\n\n def encrypt(self, msg):\n return super().encrypt(self._encode(msg))\n\n def decrypt(self, msg):\n return self._decode(super().decrypt(msg))\n\n#####################################################\n# WIP: Need to make args for pub / private keys.\n#####################################################\n#class RSACrypt(CryptModule):\n# def __init__(self, *args, **kwargs):\n# super().__init__(*args, **kwargs)\n# with open(self.local_key, 'rb') as f:\n# self.pub_local = f.read()\n# with open(self.remote_key, 'rb') as f:\n# self.pub_remote = f.read()\n#####################################################\n\nclass AESCrypt(CryptModule):\n # https://stackoverflow.com/questions/12524994/encrypt-decrypt-using-pycrypto-aes-256\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.BS = 16\n\n def pad(self, s):\n return s + (self.BS - len(s) % self.BS) * bytes([self.BS - len(s) % self.BS])\n def unpad(self, s):\n return s[:-ord(s[len(s)-1:])] \n\n def encrypt(self, msg):\n msg = self.pad(msg)\n iv = Random.new().read(AES.block_size)\n cipher = AES.new(self.local_key, AES.MODE_CBC, iv)\n return iv + cipher.encrypt(msg)\n\n def decrypt(self, enc):\n iv = enc[:16]\n cipher = AES.new(self.remote_key, AES.MODE_CBC, iv)\n return self.unpad(cipher.decrypt( enc[16:] ))\n\n# TODO: Fix bug - does not work with UDP\nclass RC4Crypt(CryptModule):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self.state:\n self.state = {True:{}, False:{}}\n\n def _rc4_setup(self, key, state):\n try:\n key = [ord(c) for c in key]\n except TypeError as e:\n pass\n S = list(range(256))\n j = 0\n for i in range(256):\n j = (j + S[i] + key[i % len(key)]) % 256\n S[i], S[j] = S[j], S[i]\n\n # Store the current crypto state for later\n state.update({'key':key, 'S':S, 'i':0, 'j':0})\n\n def _rc4_prga(self, state):\n # Restore state of the crypto algorithm\n S, i, j = [state[x] for x in ['S','i','j']]\n #print(S, i, j)\n while True:\n #print(S, i, j)\n i = (i + 1) % 256\n j = (j + S[i]) % 256\n S[i], S[j] = S[j], S[i]\n next_key = S[(S[i] + S[j]) % 256]\n #print(next_key)\n yield next_key\n\n def rc4(self, msg, decrypt=False):\n # Maintain two keystream states, one for each direction.\n # Use convention that DEcrypt direction uses the reverse of the key\n # to reduce key reuse. The peer must reverse the key for the ENcrypt direction\n state = self.state[decrypt]\n if not 'rc4_prga' in state:\n if decrypt:\n key = self.remote_key\n else:\n key = self.local_key\n\n self._rc4_setup(key, state)\n state['rc4_prga'] = self._rc4_prga(state)\n\n msg = [ord(c) if not type(c) is int else c for c in msg]\n return bytes(msgbyte ^ keybyte for msgbyte, keybyte in zip(msg, state['rc4_prga']))\n #return bytes(b\"\".join([chr(ord(msgbyte) ^ keybyte) for msgbyte, keybyte in zip(msg, prga())]))\n\n def encrypt(self, msg):\n return super().encrypt(self.rc4(msg, decrypt=False))\n\n def decrypt(self, msg):\n return self.rc4(super().decrypt(msg), decrypt=True)\n\n###########################################################\n#\n# Driver\n#\n###########################################################\n\nif __name__==\"__main__\":\n parser = Cryptblade.get_parser()\n args = parser.parse_args()\n supported = ['rc4', 'b64', 'b64_url', 'aes']\n assert not bool(args.server_key) != bool(args.client_key), \"Either use just --key or both --client_key and --server_key\"\n if args.key:\n assert not bool(args.server_key or args.client_key), \"Either use just --key or both --client_key and --server_key\"\n\n #TODO add validation that checks that keys are present when necessary\n cryptors = []\n if args.encrypt:\n for alg in args.encrypt:\n if alg not in supported:\n Exception('Encryption algorithm {} not available. Try one of {}'.format(args.encrypt, supported))\n\n # assertions mean that if this is true, then only args.key is set\n if args.key:\n args.server_key = args.key\n args.client_key = args.key[::-1]\n \n if not args.listen:\n # every client internally things it is a server, but when we run the command, we want\n # the --server_key and --client_key args to match on both boxes\n args.server_key, args.client_key = args.client_key, args.server_key\n # now server_key and client_key are set\n\n # things that don't take keyfiles\n if alg in ['rc4', 'aes']:\n assert args.server_key and args.client_key, \"Keys are required for {}\".format(alg)\n if len(args.server_key) < 32:\n args.server_key = hashlib.sha256(args.server_key.encode()).digest()\n if len(args.client_key) < 32:\n args.client_key = hashlib.sha256(args.client_key.encode()).digest()\n\n cryptor = None \n if alg == \"rc4\":\n cryptor = RC4Crypt(local_key=args.server_key, remote_key=args.client_key)\n elif alg == \"b64\":\n cryptor = B64Encode(urlsafe=False)\n elif alg == \"b64_url\":\n cryptor = B64Encode(urlsafe=True)\n elif alg == \"aes\":\n cryptor = AESCrypt(local_key=args.server_key, remote_key=args.client_key)\n\n if not cryptor:\n raise Exception(\"Failed to get a Cryptor for {}\".format(alg))\n cryptors.append(cryptor)\n sb = Cryptblade(args, wrap_sock=get_socket_wrapper(args, cryptors=cryptors))\n sb.listener()\n\n","sub_path":"cryptblade.py","file_name":"cryptblade.py","file_ext":"py","file_size_in_byte":12721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"405399209","text":"\"\"\"\nProblem Statement #\nFind the minimum depth of a binary tree. The minimum depth is the number of nodes along the shortest path from the root node to the nearest leaf node.\n\"\"\"\nfrom collections import deque\n\ndef find_minimum_depth(root):\n q = deque()\n q.append(root)\n level = 0\n while q:\n level_size = len(q)\n level += 1\n for _ in range(level_size):\n curr = q.popleft()\n if curr.left is None and curr.right is None:\n return level\n if curr.left is not None:\n q.append(curr.left)\n if curr.right is not None:\n q.append(curr.right)\n","sub_path":"grokking/tree_bfs/find_minimum_depth/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"188415994","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mclr\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import models\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\ndef genData(size=500):\n data = np.random.rand(size, 2)*2 - 1\n label = np.zeros([size, 1])\n for i, p in enumerate(data):\n if (p[0]+0.2)**2 + (0.6*p[1])**2 >= 0.25:\n label[i] = 0.\n else:\n label[i] = 1.\n div = round(size*0.8)\n train_data = data[:div, :]\n test_data = data[div:, :]\n train_label = label[:div, :]\n test_label = label[div:, :]\n return (train_data, train_label), (test_data, test_label)\n\n\ndef drawResults(data, label, prediction):\n p_label = np.array([round(x[0]) for x in prediction])\n plt.scatter(data[:, 0], data[:, 1], s=30, c=label[:, 0], cmap=mclr.ListedColormap(['red', 'blue']))\n plt.scatter(data[:, 0], data[:, 1], s=10, c=p_label, cmap=mclr.ListedColormap(['red', 'blue']))\n plt.grid()\n plt.show()\n\n# рассказать про входной и выходной слои\n# про функции рассчета потерь\n# от числа нейронов в последнем слое зависит функция loss\n# в первом слое число зависит от единицы входных данных\n\n(train_data, train_label), (test_data, test_label) = genData()\nprint(train_data.shape)\n\nmodel = Sequential()\nmodel.add(Dense(2, activation='relu'))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nH = model.fit(train_data, train_label, epochs=105, batch_size=10, validation_data=(test_data, test_label), verbose=1)\n\n#Получение ошибки и точности в процессе обучения\nloss = H.history['loss']\nval_loss = H.history['val_loss']\nacc = H.history['accuracy']\nval_acc = H.history['val_accuracy']\nepochs = range(1, len(loss) + 1)\n\n#Построение графика ошибки\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()\n\n#Построение графика точности\nplt.clf()\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\nplt.show()\n\n#Получение и вывод результатов на тестовом наборе\n\nresults = model.evaluate(test_data, test_label)\nprint(results)\n\n#Вывод результатов бинарной классификации\n\nall_data = np.vstack((train_data, test_data))\nall_label = np.vstack((train_label, test_label))\npred = model.predict(all_data)\ndrawResults(all_data, all_label, pred)","sub_path":"practise2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"241702695","text":"def run(sim, trainer):\n \"\"\"\n Analyze the behavior of your trained policies using the simulation and trainer\n from your RL experiment.\n\n Args:\n sim:\n Simulation Manager object from the experiment.\n trainer:\n Trainer that computes actions using the trained policies.\n \"\"\"\n # Run the simulation with actions chosen from the trained policies\n policy_agent_mapping = trainer.config['multiagent']['policy_mapping_fn']\n for episode in range(100):\n print('Episode: {}'.format(episode))\n obs = sim.reset()\n done = {agent: False for agent in obs}\n while True: # Run until the episode ends\n # Get actions from policies\n joint_action = {}\n for agent_id, agent_obs in obs.items():\n if done[agent_id]: continue # Don't get actions for done agents\n policy_id = policy_agent_mapping(agent_id)\n action = trainer.compute_action(agent_obs, policy_id=policy_id)\n joint_action[agent_id] = action\n # Step the simulation\n obs, reward, done, info = sim.step(joint_action)\n if done['__all__']:\n break\n","sub_path":"examples/analysis_prototype.py","file_name":"analysis_prototype.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"140211864","text":"from bs4 import BeautifulSoup, SoupStrainer\nfrom typing import Optional, Dict, Any\nfrom pyoembed import oEmbed, PyOembedException\n\ndef get_oembed_data(url: str,\n maxwidth: Optional[int]=640,\n maxheight: Optional[int]=480) -> Optional[Dict[str, Any]]:\n try:\n data = oEmbed(url, maxwidth=maxwidth, maxheight=maxheight)\n except PyOembedException:\n return None\n\n oembed_resource_type = data.get('type', '')\n image = data.get('url', data.get('image'))\n thumbnail = data.get('thumbnail_url')\n html = data.pop('html', '')\n if oembed_resource_type == 'photo' and image:\n data['image'] = image\n # Add a key to identify oembed metadata as opposed to other metadata\n data['oembed'] = True\n\n elif oembed_resource_type == 'video' and html and thumbnail:\n data['html'] = get_safe_html(html)\n data['image'] = thumbnail\n # Add a key to identify oembed metadata as opposed to other metadata\n data['oembed'] = True\n\n return data\n\ndef get_safe_html(html: str) -> str:\n \"\"\"Return a safe version of the oEmbed html.\n\n Verify that the HTML:\n 1. has a single iframe\n 2. the src uses a schema relative URL or explicitly specifies http(s)\n\n \"\"\"\n if html.startswith(''):\n html = html[9:-3]\n soup = BeautifulSoup(html, 'lxml', parse_only=SoupStrainer('iframe'))\n iframe = soup.find('iframe')\n if iframe is not None and iframe.get('src').startswith(('http://', 'https://', '//')):\n return str(soup)\n return ''\n","sub_path":"zerver/lib/url_preview/oembed/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"550575894","text":"import webapp2\nimport urllib\nimport cgi\nimport time\nimport json\nimport unittest\n\nfrom google.appengine.ext import ndb\n\n# Settings\nTimeBlock = 5 # Minutes\nRequiredFields = ['Name','Email','CoverLetter','JobID']\nJobsAddKey = '12345'\n\n# Functions\ndef RequiredFieldControl(p):\n\tresult = True\n\tfor item in p:\n\t\tif ((item in RequiredFields) == True) and (p[item] == ''):\n\t\t\tresult = False\n\treturn result\n\n# Models\nclass Jobs(ndb.Model):\n\tjob_id \t\t= ndb.StringProperty(required=True)\n\tjob_desc\t= ndb.StringProperty(required=True)\n\ttimestamp \t= ndb.DateTimeProperty(auto_now_add=True)\n\nclass JobApplications(ndb.Model):\n\tname \t= ndb.StringProperty(required=True)\n\temail \t= ndb.StringProperty(required=True)\n\tcover\t= ndb.StringProperty(required=True)\n\tjob_id\t= ndb.StringProperty(required=True)\n\tgithub\t= ndb.StringProperty(required=False)\n\tcv_url\t= ndb.StringProperty(required=False)\n\tstatus\t= ndb.StringProperty(required=False, default='Waiting')\n\ttimestamp = ndb.DateTimeProperty(auto_now_add=True)\n\nclass IPList(ndb.Model):\n\tip_address\t= ndb.StringProperty(required=True)\n\ttimestamp\t= ndb.DateTimeProperty(auto_now_add=True)\n\n# Views\nclass JobOps(webapp2.RequestHandler):\n\tdef AddJob(self, jobid, jobdesc):\n\t\tJobs(\n\t\t\tjob_id = jobid,\n\t\t\tjob_desc = jobdesc\n\t\t).put()\n\n\tdef post(self):\n\t\tself.response.headers['Content-Type'] = 'application/json'\n\t\tp = self.request.POST\n\t\tresult = {}\n\t\tif(p['Key'] == JobsAddKey and len(p)>0):\n\t\t\tself.AddJob(p['JobID'], p['JobDesc'])\n\t\t\tresult[\"Status\"] = 'Job was added!'\n\t\telse:\n\t\t\tresult[\"Message\"] = 'Houston! We have a problem! Please look at API documentation.'\n\t\tself.response.write(json.dumps(result))\n\nclass Application(webapp2.RequestHandler):\n\tdef AddNew(self, p):\n\t\tif p.get('CVURL') == None:\n\t\t\tp['CVURL'] = \"Null\"\n\t\tif p.get('GitHubURL') == None:\n\t\t\tp['GitHubURL'] = \"Null\"\n\t\tjob_application = JobApplications(\n\t\t\tname = p['Name'],\n\t\t\temail = p['Email'],\n\t\t\tcover = p['CoverLetter'],\n\t\t\tgithub = p['GitHubURL'],\n\t\t\tcv_url = p['CVURL'],\n\t\t\tjob_id = p['JobID']\n\t\t)\n\n\t\tip_address = IPList(\n\t\t\tip_address = self.request.remote_addr\n\t\t)\n\t\tresult = {}\n\n\t\tip_address.put()\n\t\tjob_application.put()\n\t\tapplication_id = job_application.key.id()\n\n\t\tresult[\"Message\"] = 'Your application is successfull. You can learn the result of your application using API.'\n\t\tresult[\"ApplicationID\"] = application_id\n\t\tresult[\"Email\"] = p['Email']\n\t\tresult[\"Status\"] = 'Waiting'\n\n\t\treturn result\n\t\n\tdef ContolIP(self, ip_addr):\n\t\tip_check_query = IPList.query(\n\t\t\tIPList.ip_address == ip_addr\t\n\t\t).order(-IPList.timestamp)\n\t\tip_check = ip_check_query.fetch(1)\n\t\tif len(ip_check) == 1:\n\t\t\tif int(ip_check[0].timestamp.strftime('%s')) > int(time.time()-(int(TimeBlock)*60)):\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True\n\t\telse:\n\t\t\treturn True\n\n\tdef ControlApplication(self, e_mail, jobid):\n\t\tcontrol_application_query = JobApplications.query(\n\t\t\tndb.AND(\n\t\t\t\tJobApplications.job_id == jobid,\n\t\t\t\tJobApplications.email == e_mail\n\t\t\t)\n\t\t)\n\n\t\tcontrol_application = control_application_query.fetch(1)\n\t\tif len(control_application) == 1:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\t\n\tdef ControlJob(self, jobid):\n\t\tcontrol_job_query = Jobs.query(\n\t\t\tJobs.job_id == jobid\n\t\t)\n\t\tcontrol_job = control_job_query.fetch(1)\n\t\tif len(control_job) == 0:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\tdef ResultApplication(self,e_mail):\n\t\tcontrol_application_query = JobApplications.query(\n\t\t\tJobApplications.email == e_mail\n\t\t)\n\n\t\tcontrol_application = control_application_query.fetch()\n\t\tapplications = {}\n\t\tapplication = {}\n\t\ti = 0\n\n\t\tfor x in control_application:\n\t\t\tapplication['Name'] = x.name\n\t\t\tapplication['Email'] = x.email\n\t\t\tapplication['CoverLetter'] = x.cover\n\t\t\tapplication['JobID'] = x.job_id\n\t\t\tapplication['Status'] = x.status\n\t\t\tapplication['Date'] = x.timestamp.strftime('%e %B %Y')\n\n\t\t\tapplications[i] = application\n\t\t\ti = i+1\n\n\t\tself.response.write(json.dumps(applications))\n\n\tdef get(self):\n\t\tself.response.headers['Content-Type'] = 'application/json'\n\t\tp = self.request.GET\n\t\tfor item in p:\n\t\t\tp[item] = cgi.escape(p[item])\n\n\t\t\n\n\t\tself.ResultApplication(p['Email'])\n\n\tdef post(self):\n\t\tself.response.headers['Content-Type'] = 'application/json'\n\t\tp = self.request.POST\n\t\tfor item in p:\n\t\t\tp[item] = cgi.escape(p[item])\n\t\t\n\t\tRC = RequiredFieldControl(p)\n\t\tIC = self.ContolIP(self.request.remote_addr)\n\t\tCJ = self.ControlJob(p['JobID'])\n\t\tCA = self.ControlApplication(p['Email'],p['JobID'])\n\n\t\ttry:\n\t\t\tif( IC == True and RC == True and CA == True and CJ == True ):\n\t\t\t\tresult = self.AddNew(p)\n\t\t\telif(IC == False):\n\t\t\t\tresult = {}\n\t\t\t\tresult = {\"Message\": ('Please wait %d minutes!' % TimeBlock), \"Status\": \"False\"}\n\t\t\telif(CA == False):\n\t\t\t\tresult = {}\n\t\t\t\tresult[\"Message\"] = 'You\\'ve applied to this job already.'\n\t\t\telif(RC == False):\n\t\t\t\tresult = {}\n\t\t\t\tresult[\"Message\"] = 'Please fill required fields.'\n\t\t\telif(CJ == False):\n\t\t\t\tresult = {}\n\t\t\t\tresult[\"Message\"] = 'There is not a job like this.'\n\t\texcept:\n\t\t\tresult = {}\n\t\t\tresult[\"Message\"] = 'Houston! We have a problem! Please look at API documentation.'\n\n\t\tself.response.write(json.dumps(result))\n\napplication = webapp2.WSGIApplication([\n ('/Job',Application),\n ('/AddJob',JobOps),\n], debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"327485092","text":"import re, datetime, random\n\nfrom detectmobilebrowser import *\n\nDOMAIN = 'WordPicker.net'\nURL = 'www.' + DOMAIN.lower()\nWWW = 'http://' + URL\n\nTITLE = 'Word Picker'\nSUBTITLE = 'Word Finder for Popular Word Games and Crosswords'\nYEAR = datetime.date.today().year\n\nPROMO = 'Word Finder for Popular Word Games and Crosswords'\n\n#PUBLISHERS = ['bannerplay','amazon-us-1','amazon-us-2','amazon-us-3','revenuehits']\nPUBLISHERS = ['google']\n\ndef html_defaults(host,user_agent=None):\n return {\n 'domain': DOMAIN,\n 'www': WWW,\n 'title': TITLE,\n 'subtitle': SUBTITLE,\n 'year': YEAR,\n 'promo': PROMO,\n 'publisher1': PUBLISHERS[random.randint(0,len(PUBLISHERS)-1)],\n 'publisher2': PUBLISHERS[random.randint(0,len(PUBLISHERS)-1)],\n 'is_mobile': is_mobile_browser(user_agent)\n }\n","sub_path":"html.py","file_name":"html.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"616564105","text":"# -*- coding: utf-8 -*-\nfrom __future__ import annotations\n\nimport os\n\nfrom pygerber.API2D import render_from_json, render_from_toml, render_from_yaml\nfrom pygerber.mathclasses import format_bytes\nfrom pygerber.parser.pillow.api import _skip_next_render\n\n\ndef handle_pillow_cli(args):\n if args.dry is True:\n _skip_next_render()\n print(f\"Rendering {args.specfile['filepath']} as {args.specfile['type'].upper()}\")\n if args.specfile[\"type\"] == \"json\":\n image = render_from_json(args.specfile[\"filepath\"])\n elif args.specfile[\"type\"] == \"yaml\":\n image = render_from_yaml(args.specfile[\"filepath\"])\n elif args.specfile[\"type\"] == \"toml\":\n image = render_from_toml(args.specfile[\"filepath\"])\n else:\n raise NotImplementedError(\n f\"Rendering based on {args.specfile['type']} file format is not supported.\"\n )\n if args.dry is False:\n print(f\"Saving to {args.save}\")\n image.save(args.save)\n filesize = os.stat(args.save).st_size\n pretty_filesize = format_bytes(filesize)\n print(\n f\"Successfully saved image {image.width}x{image.height}, {pretty_filesize}.\"\n )\n else:\n print(\"Skipping, dry run.\")\n","sub_path":"src/pygerber/parser/pillow/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"269726121","text":"# COPYRIGHT: see https://github.com/x19290/py.lisp4u/blob/master/0rights.md\n\nfrom __future__ import unicode_literals\n\nfrom listgen.simplest_dict_list import SimplestDictList\n\n\nclass BasicEnv(SimplestDictList):\n slice_type = None\n oparen, cparen = r'{}'\n\n def __next__(self):\n link = self._link\n if link is None:\n raise StopIteration\n return\n\n def __getitem__(self, key):\n for env in self:\n try:\n return env.prim_get(key)\n except KeyError:\n continue\n raise KeyError(key)\n\n def __setitem__(self, key, value):\n if self._need_not_set(key):\n return\n lu = self._lookup(key)\n if lu is None:\n return self.prim_set(key, value)\n if lu is self:\n return lu.prim_set(key, value)\n try:\n lu[key] = value\n except NotImplementedError:\n for li in self:\n if isinstance(li, BasicCowEnv):\n return li.prim_set(key, value)\n self.prim_set(key, value)\n\n def grow(self, slice_=None):\n ctor = self.slice_type\n return ctor(self.slice_or_default(slice_), self)\n\n def prim_get(self, key):\n return super(BasicEnv, self).__getitem__(key)\n\n def prim_set(self, key, value):\n super(BasicEnv, self).__setitem__(key, value)\n\n @staticmethod\n def _need_not_set(_key):\n return False\n\n def _lookup(self, key):\n for env in self:\n if key in env:\n return env\n return None\n\n def keys(self):\n seen = set()\n for y in self:\n for k in sorted(super(BasicEnv, y).keys()):\n if k not in seen:\n seen.add(k)\n yield k\n\n def nuke(self):\n self.clear()\n return self\n\n def write_to(self, ostream):\n ostream.write(self.oparen)\n keys = self.keys().__iter__()\n try:\n next(keys).write_to(ostream).write(r':')\n except StopIteration:\n pass\n else:\n for k in keys:\n ostream.write(r' ')\n k.write_to(ostream).write(r':')\n ostream.write(self.cparen)\n return ostream\n\n\nBasicEnv.slice_type = BasicEnv\n\n\nclass BasicCowEnv(BasicEnv):\n slice_type = BasicEnv\n\n def __setitem__(self, key, value):\n if self._need_not_set(key):\n return\n lu = self._lookup(key)\n if (lu is None or lu[key] != value):\n super(BasicCowEnv, self).__setitem__(key, value)\n\n\nclass BasicRoEnv(BasicEnv):\n slice_type = BasicCowEnv\n\n def __setitem__(self, key, value):\n raise NotImplementedError(key, value)\n\n\nclass WriteRo(object):\n def __init__(self, ro):\n self._ro = ro\n\n def __getitem__(self, key):\n return self._ro.prim_get(key)\n\n def __setitem__(self, key, value):\n self._ro.prim_set(key, value)\n\n def grow(self, *args, **kwargs):\n return self._ro.grow(*args, **kwargs)\n\n def __repr__(self):\n return self._ro.__repr__()\n\n\ndel unicode_literals, SimplestDictList\n","sub_path":"lib/basic_env.py","file_name":"basic_env.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"360865937","text":"import os\n\nimport requests\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0'\n}\n\n\ndef download_file_series(video_links):\n # for link in video_links:\n file_name = video_links.split('/')[-1]\n\n print(\"Downloading file:%s\" % file_name)\n r = requests.get(video_links, stream=True)\n\n # download started\n with open(file_name, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024 * 1024):\n if chunk:\n f.write(chunk)\n\n print(\"%s downloaded!\\n\" % file_name)\n\n # print(\"All videos downloaded!\")\n\n return\n\n\n## 下载pdf\ndef save_file(url, pdf_name, file_dir):\n response = requests.get(url, headers=headers, stream=True)\n ## 如果指定的文件夹,那么便新建\n if not os.path.exists(file_dir):\n os.makedirs(file_dir)\n ## os.path.join(a,b..)如果a字符串没有以/结尾,那么自动加上\\\\。(windows下)\n with open(os.path.join(file_dir, pdf_name), \"wb\") as pdf_file:\n for content in response.iter_content():\n pdf_file.write(content)\n\n\nif __name__ == '__main__':\n f2 = open(\"url.txt\", \"r\")\n lines = f2.readlines()\n for line3 in lines:\n print(line3)\n # download_file_series(line3.replace('\\n', ''))\n save_file(line3.replace('\\n', ''), line3.replace('\\n', '').split('/')[-1],\n \"/Users/shicongyang/books/paystudybook\")\n f2.close()\n","sub_path":"python-demo/down_file.py","file_name":"down_file.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"572459254","text":"from bs4 import BeautifulSoup\nimport requests\nfrom selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.support.select import Select\nimport pandas as pd\nimport csv\ndef write_to_csv(qnlist):\n with open('Amazon SDET Questions.csv', mode='a+', encoding='utf-8') as qns:\n jobs_writer = csv.writer(qns, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n jobs_writer.writerow([qnlist])\n\n\ndriver = webdriver.Chrome(r'D:\\Software\\chromedriver_win32\\chromedriver.exe')\ndriver.get('https://www.geeksforgeeks.org/')\ndriver.maximize_window()\nsleep(3)\ndriver.find_element_by_xpath(\"//input[@name = 'search']\").send_keys(\"Amazon SDET Interview Questions\")\ndriver.find_element_by_xpath(\"//button[@class='gsc-search-button gsc-search-button-v2']\").click()\nsleep(2)\nurl = driver.current_url\nprint(url)\nhrefList = []\nactualList = []\n\nsleep(5)\nall_links = driver.find_elements_by_class_name(\"gs-title\")\nfor e in all_links:\n hrefList.append(e.get_attribute('href'))\n\nfor href in hrefList:\n if href is not None:\n actualList.append(href)\nquestions = []\np=[]\nfor link in actualList:\n driver.get(link)\n sleep(10)\n texts = []\n text1 = driver.find_elements_by_tag_name('p')\n plen = len(text1)\n print(plen)\n for each in text1:\n #print(each.text)\n texts.append(each.text)\n write_to_csv(texts)\n\n#\n#\n# driver.get(\"https://practice.geeksforgeeks.org/explore/?company%5B%5D=Amazon&problemType=full&page=1\")\n# driver.maximize_window()\n#\n# sleep(2)\n\ndriver.quit()","sub_path":"AmazonSDETInterviewQuestions.py","file_name":"AmazonSDETInterviewQuestions.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"178340763","text":"#!/usr/bin/env python3\n\nimport socket\nimport time\nimport umsgpack\n\nRPORT = 9999\nRHOST = \"127.0.0.1\"\nBYTES = 4096\n\ndate_good = {\"test_date\": \"01.01.1111\"}\ndate_wrong = {\"test_date\": \"01/01/1111\"}\n\n\ndef chatting(msg_dict):\n print(\"Sending \" + msg_dict['test_date'] + \" .......\")\n time.sleep(1)\n sock.send(umsgpack.dumps(msg_dict))\n\n print(\"Receiving .... \")\n time.sleep(1)\n rdata = sock.recv(BYTES)\n print(umsgpack.loads(rdata)[\"result\"])\n\n\nsock = socket.socket()\nsock.connect((RHOST, RPORT))\n\nchatting(date_good)\nchatting(date_wrong)\n\nsock.close()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"22014024","text":"import requests\nimport json\nfrom lib.bloom_filter import BloomFilter\nfrom lib.rabbitmq import Rabbit\nimport re\nfrom lxml import etree\nimport yaml\nfrom article import Article\nimport datetime\nimport time\nimport random\nimport pika\nfrom article_img.qiniu_fetch import qiniufetch\nfrom lib.log import LogHandler\n\nlog = LogHandler('wangyi')\nsetting = yaml.load(open('config_local.yaml'))\n\nclass Wangyi:\n def __init__(self):\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36\",\n # \"Cookie\": \"TEMP_USER_ID=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1aWQiOiI1YWYxNDk5OTY4ZDYzIiwidGltZSI6MTUyNTc2MjQ1N30.yT2cDnBOA7Zj9lFxI52f064z6zI4zxPv78HWjvXvwyc; city_redirected=2; prov=cn021; city=021; weather_city=sh; region_ip=116.247.70.x; region_ver=1.2; userid=1525762465015_d0klfz8748; Hm_lvt_2618c9646a4a7be2e5f93653be3d5429=1525762465; Hm_lpvt_2618c9646a4a7be2e5f93653be3d5429=1525762465; ifh_site=3066%2C\"\n }\n self.start_url = \"http://sh.house.163.com/news/\"\n self.proxies = [{\"http\": \"http://192.168.0.96:3234\"},\n {\"http\": \"http://192.168.0.93:3234\"},\n {\"http\": \"http://192.168.0.90:3234\"},\n {\"http\": \"http://192.168.0.94:3234\"},\n {\"http\": \"http://192.168.0.98:3234\"},\n {\"http\": \"http://192.168.0.99:3234\"},\n {\"http\": \"http://192.168.0.100:3234\"},\n {\"http\": \"http://192.168.0.101:3234\"},\n {\"http\": \"http://192.168.0.102:3234\"},\n {\"http\": \"http://192.168.0.103:3234\"}, ]\n self.bf = BloomFilter(host=setting['redies_host'],\n port=setting['redis_port'],\n key='article_toutiao_test',\n blockNum=1,\n db=0, )\n\n\n\n def connect(self):\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=setting['rabbitmq_host'],\n port=setting['rabbitmq_port']))\n self.channel = connection.channel()\n self.channel.exchange_declare('article', 'direct', durable=True)\n self.channel.queue_declare('wangyi_article', durable=True)\n self.channel.queue_bind(exchange='article',\n queue='wangyi_article',\n routing_key='white')\n\n def start_crawler(self):\n res = requests.get(self.start_url, headers=self.headers)\n res.encoding = 'gbk'\n html = etree.HTML(res.text)\n city_list = html.xpath(\"//div[@class='city']/a\")\n for city in city_list:\n city_name = city.xpath(\"./text()\")[0]\n city_url = city.xpath(\"./@href\")[0]\n city_news_url = city_url+'news'\n self.city_news(city_name,city_news_url)\n\n def city_news(self,city_name,city_url):\n while True:\n try:\n proxy = self.proxies[random.randint(0,9)]\n news_res = requests.get(city_url, headers=self.headers,proxies=proxy)\n break\n except Exception as e:\n log.error(e)\n continue\n news_res.encoding = 'gbk'\n news_html = etree.HTML(news_res.text)\n try:\n cate_list = news_html.xpath(\"//div[@class='importent-news']\")\n except Exception as e:\n log.info(e)\n return\n for cate in cate_list:\n cate_name = cate.xpath(\"./h2/a/text()\")[0]\n news_list = cate.xpath(\"./ul/li\")\n for news in news_list:\n url = news.xpath(\"./h3/a/@href\")[0]\n if self.bf.is_contains(url): # 过滤详情页url\n log.info('bloom_filter已经存在{}'.format(url))\n continue\n else:\n self.bf.insert(url)\n log.info('bloom_filter不存在,插入新的url:{}'.format(url))\n try:\n desc = news.xpath(\"./div[@class='news-detail']/p/text()\")[0]\n except:\n desc = None\n article = Article('网易')\n article.url = url\n article.desc = desc\n article.city = city_name\n article.category = cate_name\n message = json.dumps(article.to_dict())\n\n disconnected = True\n while disconnected:\n try:\n disconnected = False\n self.channel.basic_publish(exchange='article',\n routing_key='white',\n body=message,\n properties=pika.BasicProperties(delivery_mode=2))\n log.info('已经放入队列')\n except Exception as e:\n log.error(e)\n self.connect()\n disconnected = True","sub_path":"hilder_articles/backup/wangyi/wangyi_producer.py","file_name":"wangyi_producer.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"26406081","text":"import matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\ndef main():\n datasets = []\n wasp_jmlr = pd.read_csv(\"wasp_sequences/jmlr.csv\")\n wasp_iprg = pd.read_csv(\"wasp_sequences/iprg.csv\")\n clasp_jmlr = pd.read_csv(\"our_sequences/jmlr_almost_all.txt\")\n clasp_iprg = pd.read_csv(\"our_sequences/iprg.dat_sequence_runtimes.txt\")\n sns.set(font_scale=3.5)\n sns.set_style(\"whitegrid\")\n\n clasp_jmlr = clasp_jmlr[clasp_jmlr['pattern_type'] == \"maximal\"]\n clasp_iprg = clasp_iprg[clasp_iprg['pattern_type'] == \"maximal\"]\n\n wasp_jmlr = wasp_jmlr[wasp_jmlr['pattern_type'] == \"maximal\"]\n wasp_iprg = wasp_iprg[wasp_iprg['pattern_type'] == \"maximal\"]\n\n plt.plot(clasp_jmlr['threshold']/100,clasp_jmlr['time'], linestyle='-', marker=\"o\", markersize=30, color=\"red\",markeredgecolor='black',markeredgewidth=1.0)\n plt.plot(clasp_iprg['threshold']/100,clasp_iprg['time'], linestyle='-', marker=\"s\", markersize=30, color=\"red\",markeredgecolor='black',markeredgewidth=1.0)\n plt.plot(wasp_jmlr['threshold']/100,wasp_jmlr['time'], linestyle='-', marker=\">\", markersize=30, color=\"blue\",markeredgecolor='black',markeredgewidth=1.0)\n plt.plot(wasp_iprg['threshold']/100,wasp_iprg['time'], linestyle='-', marker=\"<\", markersize=30, color=\"blue\",markeredgecolor='black',markeredgewidth=1.0)\n \n\n# plt.xlim(0.040,0.160)\n plt.ylim(0.5, plt.ylim()[1]*1.20)\n plt.legend(labels=[\"Clasp: jmlr\", \"Clasp: iprg\",\"WASP: jmlr\",\"WASP: iprg\"],loc='best')\n plt.xlabel(\"Frequency\")\n plt.ylabel(\"Runtime in Seconds\")\n plt.yscale('log')\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"to_visualize/scripts/visualize_seq_wasp_vs_clasp_maximal.py","file_name":"visualize_seq_wasp_vs_clasp_maximal.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"437000467","text":"from torch import nn\r\nimport torch.nn.functional as TNF\r\n\r\nfrom ops.basic_ops import ConsensusModule, Identity\r\nfrom transforms import *\r\nimport torch.nn.init as init\r\nfrom torch.nn.init import normal_, constant_\r\nimport senet\r\nimport senet_branch\r\n\r\n\r\nBN_MOMENTUM = 0.1\r\n\r\ndef initialize_weights(net_l, scale=1.):\r\n if not isinstance(net_l, list):\r\n net_l = [net_l]\r\n for net in net_l:\r\n for m in net.modules():\r\n if isinstance(m, nn.Conv2d):\r\n init.kaiming_normal_(m.weight, a=0, mode='fan_in')\r\n m.weight.data *= scale # for residual block\r\n if m.bias is not None:\r\n m.bias.data.zero_()\r\n elif isinstance(m, nn.Linear):\r\n init.kaiming_normal_(m.weight, a=0, mode='fan_in')\r\n m.weight.data *= scale\r\n if m.bias is not None:\r\n m.bias.data.zero_()\r\n elif isinstance(m, nn.BatchNorm2d):\r\n init.constant_(m.weight, 1)\r\n init.constant_(m.bias.data, 0.0)\r\n\r\nclass STCNet(nn.Module):\r\n def __init__(self, num_class = 2, num_segments = 8, modality = \"RGB\",\r\n base_model='se_resnext50_32x4d',branch_model='se_resnext50_32x4d', new_length=None,\r\n consensus_type='avg', before_softmax=True,\r\n dropout=0.5,img_feature_dim=256,\r\n crop_num=1, partial_bn=True, print_spec=True):\r\n super(STCNet, self).__init__()\r\n self.modality = modality\r\n self.num_segments = num_segments\r\n self.reshape = True\r\n self.before_softmax = before_softmax\r\n self.dropout = dropout\r\n self.crop_num = crop_num\r\n self.consensus_type = consensus_type\r\n self.img_feature_dim = img_feature_dim # the dimension of the CNN feature to represent each frame\r\n if not before_softmax and consensus_type != 'avg':\r\n raise ValueError(\"Only avg consensus can be used after Softmax\")\r\n\r\n if new_length is None:\r\n self.new_length = 1 if modality == \"RGB\" else 5\r\n else:\r\n self.new_length = new_length\r\n if print_spec == True:\r\n print((\"\"\"\r\n Initializing STCNet with base model: {}.\r\n STCNet Configurations:\r\n input_modality: {}\r\n num_segments: {}\r\n new_length: {}\r\n consensus_module: {}\r\n dropout_ratio: {}\r\n img_feature_dim: {}\r\n \"\"\".format(base_model, self.modality, self.num_segments, self.new_length, consensus_type, self.dropout, self.img_feature_dim)))\r\n\r\n self._prepare_base_model(base_model)\r\n self._prepare_branch_model(branch_model) # initialize flow/residual branch\r\n\r\n self.conv_fuse_RGB = nn.Conv2d(2048, 256, 1, 1, bias=True)\r\n self.conv_fuse_FLOW = nn.Conv2d(2048, 256, 1, 1, bias=True)\r\n\r\n self.cross_down = [nn.Sequential(\r\n nn.Conv2d(channels, channels, 1, 1, bias=True),\r\n nn.BatchNorm2d(channels),\r\n nn.ReLU(inplace=False)).cuda()\r\n for channels in [256, 512, 1024, 2048]]\r\n self.cross_up = [nn.Sequential(\r\n nn.Conv2d(channels, channels, 1, 1, bias=True),\r\n nn.BatchNorm2d(channels),\r\n nn.ReLU(inplace=False)).cuda()\r\n for channels in [256, 512, 1024, 2048]]\r\n self.up_path = [nn.Sequential(\r\n nn.Conv2d(channels, channels, 1, 1, bias=True),\r\n nn.BatchNorm2d(channels),\r\n nn.ReLU(inplace=False)).cuda()\r\n for channels in [256, 512, 1024, 2048]]\r\n self.down_path = [nn.Sequential(\r\n nn.Conv2d(channels, channels, 1, 1, bias=True),\r\n nn.BatchNorm2d(channels),\r\n nn.ReLU(inplace=False)).cuda()\r\n for channels in [256, 512, 1024, 2048]]\r\n\r\n self.cls_module = nn.Sequential(\r\n nn.Conv2d(2048, 256, kernel_size=1, bias=False),\r\n nn.BatchNorm2d(256),\r\n nn.ReLU(),\r\n nn.AdaptiveAvgPool2d((1, 1)),\r\n )\r\n self.last_fc = nn.Linear(256, num_class)\r\n\r\n self._enable_pbn = partial_bn\r\n if partial_bn:\r\n self.partialBN(True)\r\n\r\n def forward(self, RGB_input, FLOW_input):\r\n sample_len = (3)\r\n\r\n #[64,56,56]\r\n feature_x0 = self.base_model.layer0(RGB_input.view((-1, sample_len) + RGB_input.size()[-2:]))\r\n branch_x0 = self.branch_model.layer0(FLOW_input.view((-1, sample_len) + FLOW_input.size()[-2:]))\r\n #[256,56,56]\r\n feature_x1 = self.base_model.layer1(feature_x0)\r\n branch_x1 = self.branch_model.layer1(branch_x0)\r\n #branch_x1_up = self.cross_up[0](branch_x1)\r\n #branch_x1_ = self.down_path[0](branch_x1)\r\n #feature_x1_down = self.cross_down[0](feature_x1)\r\n #feature_x1_ = self.up_path[0](feature_x1)\r\n feature_x1_merge = feature_x1 + branch_x1\r\n branch_x1_merge = branch_x1 + feature_x1\r\n #[512,28,28]\r\n feature_x2 = self.base_model.layer2(feature_x1_merge)\r\n branch_x2 = self.branch_model.layer2(branch_x1_merge)\r\n #branch_x2_up = self.cross_up[1](branch_x2)\r\n #branch_x2_ = self.down_path[1](branch_x2)\r\n #feature_x2_down = self.cross_down[1](feature_x2)\r\n #feature_x2_ = self.up_path[1](feature_x2)\r\n feature_x2_merge = feature_x2 + branch_x2\r\n branch_x2_merge = feature_x2 + branch_x2\r\n #[1024,14,14]\r\n feature_x3 = self.base_model.layer3(feature_x2_merge)\r\n branch_x3 = self.branch_model.layer3(branch_x2_merge)\r\n #branch_x3_up = self.cross_up[2](branch_x3)\r\n #branch_x3_ = self.down_path[2](branch_x3)\r\n #feature_x3_down = self.cross_down[2](feature_x3)\r\n #feature_x3_ = self.up_path[2](feature_x3)\r\n feature_x3_merge = feature_x3 + branch_x3\r\n branch_x3_merge = feature_x3 + branch_x3\r\n #[2048,7,7]\r\n feature_x4 = self.base_model.layer4(feature_x3_merge)\r\n branch_x4 = self.branch_model.layer4(branch_x3_merge)\r\n # branch_x4_up = self.cross_up[3](branch_x4)\r\n # feature_x4_down = self.cross_down[3](feature_x4)\r\n # feature_x4_ = feature_x4 + branch_x4_up\r\n # branch_x4_ = branch_x4 + feature_x4_down\r\n #[256,7,7]\r\n RGB_out = TNF.relu(self.conv_fuse_RGB(feature_x4 + branch_x4), inplace=False)\r\n B, C, H, W = RGB_out.size()\r\n RGB_out = RGB_out.view(-1, self.num_segments * C, H, W)\r\n\r\n cls_out = self.cls_module(RGB_out)\r\n cls_out = cls_out.view(-1, 256)\r\n cls_out = self.last_fc(cls_out)\r\n\r\n return cls_out#, ret_feature\r\n\r\n\r\n\r\n def _prepare_STCNet(self, num_class):\r\n feature_dim = getattr(self.base_model, self.base_model.last_layer_name).in_features\r\n if self.dropout == 0:\r\n setattr(self.base_model, self.base_model.last_layer_name, nn.Linear(feature_dim, num_class))\r\n self.new_fc = None\r\n else:\r\n setattr(self.base_model, self.base_model.last_layer_name, nn.Dropout(p=self.dropout))\r\n if self.consensus_type in ['TRN','TRNmultiscale']:\r\n # create a new linear layer as the frame feature\r\n self.new_fc = nn.Linear(feature_dim, self.img_feature_dim)\r\n else:\r\n # the default consensus types in STCNet\r\n self.new_fc = nn.Linear(feature_dim, num_class)\r\n\r\n std = 0.001\r\n if self.new_fc is None:\r\n normal_(getattr(self.base_model, self.base_model.last_layer_name).weight, 0, std)\r\n constant_(getattr(self.base_model, self.base_model.last_layer_name).bias, 0)\r\n else:\r\n normal_(self.new_fc.weight, 0, std)\r\n constant_(self.new_fc.bias, 0)\r\n return feature_dim\r\n\r\n\r\n def _prepare_base_model(self, base_model):\r\n\r\n if 'resnet' in base_model or 'resnext' in base_model:\r\n self.base_model = getattr(senet, base_model)(num_classes=1000, pretrained='imagenet')\r\n self.base_model.last_layer_name = 'last_linear'\r\n self.input_size = 224\r\n self.input_mean = [0.485, 0.456, 0.406]\r\n self.input_std = [0.229, 0.224, 0.225]\r\n else:\r\n raise ValueError('Unknown base model: {}'.format(base_model))\r\n \r\n def _prepare_branch_model(self, branch_model):\r\n\r\n if 'resnet' in branch_model or 'resnext' in branch_model:\r\n self.branch_model = getattr(senet, branch_model)(num_classes=1000, pretrained='imagenet')\r\n self.branch_model.last_layer_name = 'last_linear'\r\n self.input_size = 224\r\n self.input_mean = [0.485, 0.456, 0.406]\r\n self.input_std = [0.229, 0.224, 0.225]\r\n else:\r\n raise ValueError('Unknown base model: {}'.format(branch_model))\r\n\r\n def train(self, mode=True):\r\n \"\"\"\r\n Override the default train() to freeze the BN parameters\r\n :return:\r\n \"\"\"\r\n super(STCNet, self).train(mode)\r\n count = 0\r\n if self._enable_pbn:\r\n print(\"Freezing BatchNorm2D except the first one.\")\r\n for m in self.base_model.modules():\r\n if isinstance(m, nn.BatchNorm2d):\r\n count += 1\r\n if count >= (2 if self._enable_pbn else 1):\r\n m.eval()\r\n\r\n # shutdown update in frozen mode\r\n m.weight.requires_grad = False\r\n m.bias.requires_grad = False\r\n\r\n def partialBN(self, enable):\r\n self._enable_pbn = enable\r\n\r\n def get_optim_policies(self):\r\n first_conv_weight = []\r\n first_conv_bias = []\r\n normal_weight = []\r\n normal_bias = []\r\n bn = []\r\n transpose = []\r\n\r\n conv_cnt = 0\r\n bn_cnt = 0\r\n for m in self.modules():\r\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d):\r\n ps = list(m.parameters())\r\n conv_cnt += 1\r\n if conv_cnt == 1:\r\n first_conv_weight.append(ps[0])\r\n if len(ps) == 2:\r\n first_conv_bias.append(ps[1])\r\n else:\r\n normal_weight.append(ps[0])\r\n if len(ps) == 2:\r\n normal_bias.append(ps[1])\r\n elif isinstance(m, torch.nn.Linear):\r\n ps = list(m.parameters())\r\n normal_weight.append(ps[0])\r\n if len(ps) == 2:\r\n normal_bias.append(ps[1])\r\n\r\n elif isinstance(m, torch.nn.BatchNorm1d):\r\n bn.extend(list(m.parameters()))\r\n elif isinstance(m, torch.nn.BatchNorm2d):\r\n bn_cnt += 1\r\n # later BN's are frozen\r\n if not self._enable_pbn or bn_cnt == 1:\r\n bn.extend(list(m.parameters()))\r\n elif isinstance(m, torch.nn.ConvTranspose2d):\r\n transpose.extend(m.parameters())\r\n elif len(m._modules) == 0:\r\n if len(list(m.parameters())) > 0:\r\n raise ValueError(\"New atomic module type: {}. Need to give it a learning policy\".format(type(m)))\r\n\r\n return [\r\n {'params': first_conv_weight, 'lr_mult': 5 if self.modality == 'Flow' else 1, 'decay_mult': 1,\r\n 'name': \"first_conv_weight\"},\r\n {'params': first_conv_bias, 'lr_mult': 10 if self.modality == 'Flow' else 2, 'decay_mult': 0,\r\n 'name': \"first_conv_bias\"},\r\n {'params': normal_weight, 'lr_mult': 1, 'decay_mult': 1,\r\n 'name': \"normal_weight\"},\r\n {'params': normal_bias, 'lr_mult': 2, 'decay_mult': 0,\r\n 'name': \"normal_bias\"},\r\n {'params': bn, 'lr_mult': 1, 'decay_mult': 0,\r\n 'name': \"BN scale/shift\"},\r\n {'params': transpose, 'lr_mult': 1, 'decay_mult': 0,\r\n 'name': 'conv transpose'}\r\n ]\r\n\r\n @property\r\n def crop_size(self):\r\n return self.input_size\r\n\r\n @property\r\n def scale_size(self):\r\n return self.input_size * 256 // 224\r\n\r\n def get_augmentation(self):\r\n #return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75, .66]),\r\n # GroupRandomHorizontalFlip()])\r\n rrc = RandomResizedCrop(size=224, scale=(0.9, 1), ratio=(3./4., 4./3.))\r\n rp = RandomPerspective(anglex=3, angley=3, anglez=3, shear=3)\r\n # Improve generalization\r\n rhf = RandomHorizontalFlip(p=0.5)\r\n # Deal with dirts, ants, or spiders on the camera lense\r\n re = RandomErasing(p=0.5, scale=(0.003, 0.01), ratio=(0.3, 3.3), value=0)\r\n cj = ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=(-0.1, 0.1), gamma=0.3)\r\n\r\n return torchvision.transforms.Compose([cj, rrc, rp, rhf])\r\n #return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .9,]),\r\n # GroupRandomHorizontalFlip()])\r\n\r\n\r\nif __name__ == '__main__':\r\n import torch\r\n RGB_input = torch.zeros([1,8,3,224,224])\r\n FLOW_input = torch.zeros([1,8,3,224,224])\r\n model = STCNet()\r\n model = model.cuda()\r\n print(model)\r\n RGB_input = RGB_input.cuda()\r\n FLOW_input = FLOW_input.cuda()\r\n res = model(RGB_input,FLOW_input)\r\n print(res.shape)\r\n","sub_path":"src/STCNet-se_resnext/stcnet.py","file_name":"stcnet.py","file_ext":"py","file_size_in_byte":13644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"610008978","text":"#! /usr/bin/env python\nclass Solution:\n # @param {integer} n\n # @return {boolean}\n def isPowerOfTwo(self, n):\n if n <= 0:\n return False\n while n > 0:\n if n == 1:\n return True\n if n % 2 != 0:\n return False\n n = n / 2\n# Note: only for n > 0 can be the power of two;\n# and n = 1 if the final step to check if n is the power of two;\n","sub_path":"Power231/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"36433","text":"from handler.base_plugin_command import CommandPlugin\nfrom vk.utils import EventType\n\nimport peewee, aiohttp, json, io\n\n\nclass LockChatPlugin(CommandPlugin):\n __slots__ = (\"pwmanager\", \"flags\", \"ChatLock\")\n\n def __init__(self, *commands, prefixes=None, strict=False, picture_flag=\"пикчу\", title_flag=\"название\", invite_flag=\"состав\"):\n \"\"\"Plugin allowing admins or moders to lock chat's state\"\"\"\n\n if not commands:\n commands = \"lock\",\n\n super().__init__(*commands, prefixes=prefixes, strict=strict)\n\n self.flags = [picture_flag, title_flag, invite_flag]\n\n self.pwmanager = None\n self.ChatLock = None\n\n self.description = [\"Оборона беседы\",\n f\"{self.prefixes[-1]}{self.commands[0]} [флаги через пробелы] - установить настройки защиты беседы. Если флаг присутствует, \"\n \"то изменение соответствующего атрибута запрещено.\",\n f\"Флаги: {picture_flag} (обложка не будет меняться), {title_flag} (название не будет меняться), \"\n \"{invite_flag} (нельзя покидать или вступать в беседу)\",\n \"Отсутствие флага разрешает изменение атрибута.\"]\n\n def initiate(self):\n if self.pwmanager is None:\n raise ValueError(\"Please, use PeeweePlugin with set_manager=True for this plugin to work or set pwmanager for plugin yourself.\")\n\n class ChatLock(peewee.Model):\n chat_id = peewee.BigIntegerField()\n\n old_pic = peewee.TextField(null=True)\n hold_pi = peewee.BooleanField(default=False)\n hold_ti = peewee.BooleanField(default=False)\n hold_en = peewee.BooleanField(default=False)\n\n class Meta:\n database = self.pwmanager.database\n indexes = (\n (('chat_id', ), True),\n )\n\n with self.pwmanager.allow_sync():\n ChatLock.create_table(True)\n\n self.ChatLock = ChatLock\n\n async def check_event(self, evnt):\n return evnt.type == EventType.ChatChange\n\n async def process_event(self, evnt):\n if evnt.user_id == self.api.get_current_id():\n return\n\n lock, _ = await self.pwmanager.get_or_create(self.ChatLock, chat_id=evnt.chat_id)\n\n if evnt.source_act == \"chat_invite_user\":\n if lock.hold_en:\n return await self.api.messages.removeChatUser(chat_id=evnt.chat_id, user_id=evnt.source_mid)\n\n elif evnt.source_act == \"chat_kick_user\":\n if lock.hold_en:\n return await self.api.messages.addChatUser(chat_id=evnt.chat_id, user_id=evnt.source_mid)\n\n elif evnt.source_act == \"chat_title_update\":\n if lock.hold_ti:\n return await self.api.messages.editChat(chat_id=evnt.chat_id, title=evnt.old_title)\n\n elif evnt.source_act == \"chat_photo_update\" or evnt.source_act == \"chat_photo_remove\":\n if lock.hold_pi:\n sender = self.api.get_default_sender(\"photos.getChatUploadServer\")\n\n resp = await self.api(sender).photos.getChatUploadServer(chat_id=evnt.chat_id)\n if not resp or not resp.get(\"upload_url\"): return\n upload_url = resp[\"upload_url\"]\n\n data = aiohttp.FormData()\n async with aiohttp.ClientSession() as sess:\n async with sess.get(lock.old_pic) as resp:\n data.add_field('file', io.BytesIO(await resp.read()), filename=\"picture.jpg\", content_type='multipart/form-data')\n\n async with sess.post(upload_url, data=data) as resp:\n result = json.loads(await resp.text())\n\n if not result or not result.get(\"response\"): return\n\n return await self.api(sender).messages.setChatPhoto(file=result[\"response\"])\n\n async def process_message(self, msg):\n if msg.chat_id == 0:\n return\n\n if \"is_admin\" in msg.meta and not msg.meta[\"is_admin\"] and \"is_moder\" in msg.meta and not msg.meta[\"is_moder\"]:\n return await msg.answer(\"Вы не имеете доступа к этой команде.\")\n\n lock, _ = await self.pwmanager.get_or_create(self.ChatLock, chat_id=msg.chat_id)\n\n flags = self.parse_message(msg)[1].lower().split()\n pi, ti, en = False, False, False\n\n for uf in flags:\n took = False\n\n for i, f in enumerate(self.flags):\n if not f.startswith(uf):\n continue\n\n if took:\n return await msg.answer(f\"Флаг не удалось опознать: \\\"{uf}\\\".\")\n took = True\n\n if i == 0:\n pi = True\n elif i == 1:\n ti = True\n elif i == 2:\n en = True\n\n if not took:\n return await msg.answer(f\"Флаг не удалось опознать: \\\"{uf}\\\".\")\n\n if pi:\n chat = await self.api.messages.getChat(chat_id=msg.chat_id)\n\n if not chat:\n return await msg.answer(\"Произошла ошибка. Попробуйте позже.\")\n\n lock.old_pic = chat.get(\"photo_200\")\n lock.hold_pi = True\n else:\n lock.hold_pi = False\n\n lock.hold_ti = ti\n lock.hold_en = en\n\n await self.pwmanager.update(lock)\n\n willbesaved = \", \".join(i for i in\n (\n \"обложка беседы\" if pi else \"\",\n \"название беседы\" if ti else \"\",\n \"состав беседы\" if en else \"\"\n ) if i\n )\n\n return await msg.answer (\n \"Будет сохранено: \" + (willbesaved if willbesaved else \"ничего\")\n )\n","sub_path":"plugins/chat/lock_chat.py","file_name":"lock_chat.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"483211879","text":"import matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.cm as cm\nimport numpy as np\n\n\nsver = ['lbfgs', 'sgd','adam']\nav,HL,HLN,TP,TN,FP,FN,prec1,recall1 = np.loadtxt('params_' + sver[0] + '.txt').T\nav,HL,HLN,TP,TN,FP,FN,prec2,recall2 = np.loadtxt('params_' + sver[1] + '.txt').T\nav,HL,HLN,TP,TN,FP,FN,prec3,recall3 = np.loadtxt('params_' + sver[2] + '.txt').T\n\nfig= plt.figure(figsize=(6, 8))\nn=16\nnrows=4\nncols=4\nmax_val =0.0\nvaly=[3,2,1,0]\ngs1 = gridspec.GridSpec(4, 3)\ngs1.update(wspace=0.0, hspace=0.0) # set the spacing between axes.\n \nfor i in np.array(range(4)):\n \n nstart =i*n\n nfin =(i+1)*n\n data_prec1 = prec1[nstart:nfin]\n grid_prec1 = data_prec1.reshape((nrows,ncols))\n data_prec2 = prec2[nstart:nfin]\n grid_prec2 = data_prec2.reshape((nrows,ncols))\n data_prec3 = prec3[nstart:nfin]\n grid_prec3 = data_prec3.reshape((nrows,ncols)) \n\n ax1 = plt.subplot(gs1[i*3])\n ax2 = plt.subplot(gs1[i*3 +1])\n ax3 = plt.subplot(gs1[i*3 +2])\n [[row1,col1]] = np.argwhere(grid_prec1 == np.max(grid_prec1))\n ym1 = valy[row1] \n xm1=col1 \n [[row2,col2]] = np.argwhere(grid_prec2 == np.max(grid_prec2))\n ym2 = valy[row2] \n xm2=col2\n [[row3,col3]] = np.argwhere(grid_prec3 == np.max(grid_prec3))\n ym3 = valy[row3] \n xm3=col3\n \n if (np.max(grid_prec1)>max_val):\n max_val =np.max(grid_prec1) \n [[rowm,colm]] = np.argwhere(grid_prec1 == np.max(grid_prec1))\n pmax = (i*3+1)\n if (np.max(grid_prec2)>max_val):\n max_val =np.max(grid_prec2)\n [[rowm,colm]] = np.argwhere(grid_prec2 == np.max(grid_prec2))\n pmax = (i*3+2)\n if (np.max(grid_prec3)>max_val):\n max_val =np.max(grid_prec3)\n [[rowm,colm]] = np.argwhere(grid_prec3 == np.max(grid_prec3))\n pmax = (i*3+3)\n im1= ax1.imshow(grid_prec1,interpolation='nearest', cmap=cm.YlGnBu,vmin=0.0, vmax=1.0, extent =[0,4,0,4]) \n im2= ax2.imshow(grid_prec2,interpolation='nearest', cmap=cm.YlGnBu,vmin=0.0, vmax=1.0, extent =[0,4,0,4])\n im3= ax3.imshow(grid_prec3,interpolation='nearest', cmap=cm.YlGnBu,vmin=0.0, vmax=1.0, extent =[0,4,0,4]) \n ax1.plot(xm1+0.5,ym1+0.5,'k*')\n ax2.plot(xm2+0.5,ym2+0.5,'k*')\n ax3.plot(xm3+0.5,ym3+0.5,'k*')\n plt.setp(ax1, yticks=[0.5,1.5, 2.5, 3.5], yticklabels=['4', '3', '2', '1'], ylabel='no. HL')\n if (i==3):\n plt.setp(ax1, xticks=[0.5, 1.5, 2.5, 3.5], xticklabels=['10', '20', '50', '100'], xlabel='no. HL nodes') \n plt.setp(ax2, xticks=[0.5, 1.5, 2.5, 3.5], xticklabels=['10', '20', '50', '100'], xlabel='no. HL nodes')\n plt.setp(ax3, xticks=[0.5, 1.5, 2.5, 3.5], xticklabels=['10', '20', '50', '100'], xlabel='no. HL nodes')\n ax2.set_yticks([])\n ax3.set_yticks([])\n else:\n ax1.set_xticks([])\n ax2.set_xticks([])\n ax2.set_yticks([])\n ax3.set_xticks([])\n ax3.set_yticks([]) \n\nymax = valy[rowm] \nxmax = colm \nlabel=max_val\nplt.subplot(gs1[pmax-1])\nplt.plot(xmax+0.5,ymax+0.5,'o',markersize=15, color='red', mfc='none')\nplt.annotate(label, xy=(xmax+0.5, ymax+0.5), xytext=(xmax-20, ymax+20),\n textcoords='offset points', ha='right', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='white', alpha=0.5),\n arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))\n \nplt.subplot(gs1[0]) \nplt.annotate('alpha',color='blue', xy=(10, 540), xycoords='figure pixels') \nplt.annotate('solver',color='red', xy=(205, 540), xycoords='figure pixels')\nplt.annotate('1e-1', color='blue', xy=(10, 505), xycoords='figure pixels') \nplt.annotate('1e-2', color='blue', xy=(10, 392), xycoords='figure pixels') \nplt.annotate('1e-3', color='blue', xy=(10, 275), xycoords='figure pixels') \nplt.annotate('1e-4', color='blue', xy=(10, 160), xycoords='figure pixels') \nplt.annotate(sver[0], color='red', xy=(100, 520), xycoords='figure pixels') \nplt.annotate(sver[1], color='red', xy=(210, 520), xycoords='figure pixels') \nplt.annotate(sver[2], color='red', xy=(320, 520), xycoords='figure pixels') \ncax = fig.add_axes([0.9, 0.1, 0.03, 0.8])\nfig.colorbar(im3, cax=cax)\npad = 5 \n \nfig.savefig('param_all.pdf')\n\n\n\n\n","sub_path":"plot_pixel_params.py","file_name":"plot_pixel_params.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"33341713","text":"#!/usr/bin/env python3\n# PYTHON_ARGCOMPLETE_OK\n\nimport sys\nimport os\nimport argcomplete\nimport argparse\n\nfrom errors import FatalError, NotSupportedError\nimport common\nimport partition\nimport binary\nimport gen_partition\nimport traces\n\n__version__ = \"0.1\"\n\nPYTHON2 = sys.version_info[0] < 3 # True if on pre-Python 3\nif PYTHON2:\n\tprint(\"Fatal error: gen_flash_image needs to be run with python version 3\")\n\texit(1)\n\n\nclass FlashImage(object):\n\t\n\tdef __init__(self, sectorSize, flashType):\n\t\t\n\t\tself.sectorSize = sectorSize\n\t\tif flashType not in ('hyper', 'spi'):\n\t\t\traise FatalError('Flash type %s not suported. ROM boot loader supports hyper and spi flash type' % flashType)\n\t\tself.flashType = flashType\n\t\t\n\t\tself.image = bytearray()\n\t\t\n\t\t# Partition\n\t\tself.partitionTable = None\n\t\tself.partitionTableOffset = None\n\t\t# Partition 0 : Flash header + Second Stage BootLoader or unic APP\n\t\tself.ssbl = None\n\t\n\tdef getCurrentSize(self):\n\t\treturn len(self.image)\n\t\n\tdef appendBootloader(self, elf, encrypt = False, aesKey = None, aesIv = None):\n\t\tself.ssbl = binary.SSBL(flashType = self.flashType, elf = elf, encrypt = encrypt, aesKey = aesKey,\n\t\t aesIv = aesIv)\n\t\tself.image = self.ssbl.dump()\n\t\tself.partitionTableOffset = self.ssbl.partitionTableOffset\n\t\treturn self.partitionTableOffset\n\t\n\tdef appendPartitionTable(self, partitionTable):\n\t\tself.partitionTable = partitionTable\n\t\t\n\t\t# Check if a boot loader has been added previously, if not, generating empty boot loader\n\t\tif len(self.image) == 0:\n\t\t\ttraces.info(\"Warning: Empty boot loader partition. GAP will not be able to boot from flash.\")\n\t\t\tself.ssbl = binary.SSBL(flashType = self.flashType)\n\t\t\tself.image += self.ssbl.dump()\n\t\t\n\t\tself.image += self.partitionTable.to_binary()\n\t\n\tdef writeRAWImage(self, outputPath):\n\t\t# Creating folders if necessary\n\t\ttry:\n\t\t\tos.makedirs(os.path.realpath(os.path.dirname(outputPath)))\n\t\texcept FileExistsError:\n\t\t\tpass\n\t\t\n\t\twith open(outputPath, 'wb') as file:\n\t\t\tfile.write(self.image)\n\t\n\tdef writeStimuli(self, outputPath):\n\t\t# Warning: not tested!\n\t\t# Creating folders if necessary\n\t\ttry:\n\t\t\tos.makedirs(os.path.dirname(outputPath))\n\t\texcept:\n\t\t\tpass\n\t\t\n\t\twith open(outputPath, 'w') as file:\n\t\t\tif self.flashType == 'mram':\n\t\t\t\tlast_bytes = len(self.buff) & 0x7\n\t\t\t\tfor i in range(0, 8 - last_bytes):\n\t\t\t\t\tself.__appendByte(0)\n\t\t\t\tfor i in range(0, len(self.buff) >> 3):\n\t\t\t\t\tvalue = (self.buff[i * 8 + 7] << 56) + (self.buff[i * 8 + 6] << 48) + (\n\t\t\t\t\t\t\tself.buff[i * 8 + 5] << 40) + (self.buff[i * 8 + 4] << 32) + (\n\t\t\t\t\t\t\t self.buff[i * 8 + 3] << 24) + (self.buff[i * 8 + 2] << 16) + (\n\t\t\t\t\t\t\t self.buff[i * 8 + 1] << 8) + self.buff[i * 8]\n\t\t\t\t\tdumpLongToSlm(file, i, value)\n\t\t\telif self.flashType == 'hyper':\n\t\t\t\tif len(self.buff) & 1 != 0:\n\t\t\t\t\tself.__appendByte(0)\n\t\t\t\tfor i in range(0, len(self.buff) >> 1):\n\t\t\t\t\tvalue = (self.buff[i * 2 + 1] << 8) + self.buff[i * 2]\n\t\t\t\t\tdumpShortToSlm(file, i, value)\n\t\t\telif self.archi == 'vivosoc2' or self.archi == 'fulmine':\n\t\t\t\tif len(self.buff) % 4 != 0:\n\t\t\t\t\tfor i in range(0, 4 - (len(self.buff) % 4)):\n\t\t\t\t\t\tself.buff.append(0)\n\t\t\t\tfor i in range(0, len(self.buff), 4):\n\t\t\t\t\tdumpByteToSlm(file, i, self.buff[i + 3])\n\t\t\t\t\tdumpByteToSlm(file, i + 1, self.buff[i + 2])\n\t\t\t\t\tdumpByteToSlm(file, i + 2, self.buff[i + 1])\n\t\t\t\t\tdumpByteToSlm(file, i + 3, self.buff[i + 0])\n\t\t\telse:\n\t\t\t\tfor i in range(0, len(self.buff)):\n\t\t\t\t\tdumpByteToSlm(file, i, self.buff[i])\n\n\ndef appendArgs(parser: argparse.ArgumentParser) -> None:\n\t\"\"\"\n\tAppend specific module arguments.\n\n\t:param parser:\n\t:type parser: argparse.ArgumentParser\n\t\"\"\"\n\t\n\t# Bootloader\n\tparser.add_argument('--boot-loader',\n\t type = argparse.FileType('rb'), required = True,\n\t help = \"\"\"Boot loader or unic application in elf format.\n\t\t In case of Gap boot from flash, this program will be loaded into memories and will be executed by the FC.\"\"\")\n\t\n\t# Partition table\n\tparser.add_argument('--partition-table', '-t',\n\t default = None, type = argparse.FileType('rb'), required = False,\n\t dest = 'partitionTable',\n\t help = \"\"\"Partition table in binary or csv format.\n\t If the partition table is not mentioned, a typical partition table is generated with both filesystem partitions readfs and LittleFS.\"\"\")\n\t\n\t# Partition images\n\tparser.add_argument('--partition', '-p', metavar = 'partition-name partition-image',\n\t default = {},\n\t action = partition.PartitionNameFilenamePairAction,\n\t help = 'The name of the partition and its image file.')\n\t\n\t# Flash block size\n\tparser.add_argument('--block-size', '-b',\n\t required = True,\n\t type = common.argToInt, dest = 'blockSize',\n\t help = \"Erasable flash sectore size. useful for aligning partitions.\")\n\t\n\t# Flash size\n\tparser.add_argument('--flash-size', '-s',\n\t required = True,\n\t type = common.argToInt, dest = 'flashSize',\n\t help = \"Flash size.\")\n\t\n\t# Flash type\n\tparser.add_argument('--flash-type',\n\t required = True,\n\t choices = ('hyper', 'spi'), dest = 'flashType',\n\t help = \"Flash type.\")\n\t\n\t# Output\n\tparser.add_argument('-o',\n\t dest = 'output', default = 'flash.img',\n\t help = 'RAW image output')\n\n\ndef operationFunc(args):\n\ttraces.verbose = args.verbose\n\ttraces.info('Build flash image')\n\t\n\tflashImage = FlashImage(sectorSize = args.blockSize, flashType = args.flashType)\n\t\n\t#\n\t# Bootloader\n\t#\n\tendOfSSBLOffset = flashImage.appendBootloader(elf = args.boot_loader)\n\ttraces.info(\"Partition boot loader size: 0x%X\" % endOfSSBLOffset)\n\t\n\t#\n\t# Partition table\n\t#\n\ttraces.info(\"\\nGenerating partition table:\")\n\t# Check if the partition table is lower than free space in current SSBL flash sector\n\tif (endOfSSBLOffset + partition.MAX_PARTITION_TABLE_SIZE) > binary.align(endOfSSBLOffset, args.blockSize):\n\t\tpartitionTableOffset = binary.align(endOfSSBLOffset, args.blockSize)\n\t\ttraces.info(\n\t\t\t\"No free space to store partition table at the end of the SSBL partition, adding padding untill the next sector 0x%X\" % partitionTableOffset)\n\t\tflashImage.image.padToOffset(partitionTableOffset)\n\telse:\n\t\tpartitionTableOffset = endOfSSBLOffset\n\t\n\ttraces.info(\"Partition table offset: 0x%X\" % partitionTableOffset)\n\t\n\tif args.partitionTable:\n\t\ttraces.infoWithoutNewLine('Open partition table: ')\n\t\ttableInput = args.partitionTable.read()\n\t\tif gen_partition.isBinaryPartitionTable(tableInput):\n\t\t\t# Binary format\n\t\t\ttraces.info('Binary table format')\n\t\t\ttable = gen_partition.PartitionTable.from_binary(tableInput)\n\t\t\n\t\telse:\n\t\t\t# CSV Format\n\t\t\ttraces.info('CSV table format')\n\t\t\t\n\t\t\ttry:\n\t\t\t\ttableInput = tableInput.decode()\n\t\t\texcept UnicodeDecodeError:\n\t\t\t\traise InputError(\n\t\t\t\t\t'\"%s\" input file must be a CSV text file or partition table binary.' % args.partitionTable.name)\n\t\t\t\n\t\t\ttraces.info('Parsing CSV input...')\n\t\t\ttable = gen_partition.PartitionTable.from_csv(tableInput, partitionTableOffset = partitionTableOffset,\n\t\t\t sectorSize = args.blockSize, md5Sum = True)\n\t\n\telse:\n\t\t# Auto partitioning\n\t\ttraces.info('Partition table was not provided, generating generic table.')\n\t\ttable = gen_partition.PartitionTable(md5Sum = True)\n\t\toffset = partitionTableOffset + partition.MAX_PARTITION_TABLE_SIZE\n\t\t\n\t\tif \"readfs\" in args.partition.keys():\n\t\t\ttraces.info(\"Creating ReadFS partition\")\n\t\t\treadFSSize = os.path.getsize(args.partition['readfs'].name)\n\t\t\ttraces.info(\"ReadFS image size: 0x%X\" % readFSSize)\n\t\t\treadFSPartition = partition.PartitionDefinition(\n\t\t\t\tname = 'readfs',\n\t\t\t\ttype = partition.DATA_TYPE,\n\t\t\t\tsubtype = partition.READFS_SUBTYPE,\n\t\t\t\tsize = readFSSize,\n\t\t\t\toffset = binary.align(offset, args.blockSize))\n\t\t\ttable.append(readFSPartition)\n\t\t\toffset = readFSPartition.offset + readFSPartition.size\n\t\t\n\t\t#\n\t\t# LittleFS partition\n\t\t#\n\t\tlfsOffset = binary.align(offset, args.blockSize)\n\t\tif \"lfs\" in args.partition.keys():\n\t\t\t# LittleFS image from CLI\n\t\t\ttraces.info(\"Creating LittleFS partition\")\n\t\t\tlfsSize = os.path.getsize(args.partition['lfs'].name)\n\t\t\ttraces.info(\"LittleFS image size: 0x%X\" % lfsSize)\n\t\telse:\n\t\t\t# No LittleFS image, use of remaining free space\n\t\t\tlfsSize = args.flashSize - lfsOffset\n\t\t\ttraces.info(\"Creating an empty LittleFS partition, using the rest of the flash space: 0x%X\" % lfsSize)\n\t\t\n\t\tlfsPartition = partition.PartitionDefinition(\n\t\t\tname = 'lfs',\n\t\t\ttype = partition.DATA_TYPE,\n\t\t\tsubtype = partition.LFS_SUBTYPE,\n\t\t\tsize = lfsSize,\n\t\t\toffset = binary.align(offset, args.blockSize))\n\t\ttable.append(lfsPartition)\n\t\toffset = lfsPartition.offset + lfsPartition.size\n\n\ttraces.info('Verifying table...')\n\ttraces.info(table.to_csv(simple_formatting = False))\n\ttable.verify(partitionTableOffset = partitionTableOffset, flashSectorSize = args.blockSize, flashSize = args.flashSize)\n\n\tflashImage.appendPartitionTable(table)\n\t\n\t#\n\t# Writting partition images\n\t#\n\ttraces.info(\"Dumping partition image::\")\n\tfor p in sorted(table, key = lambda x: x.offset):\n\t\tif p.name in args.partition.keys():\n\t\t\ttraces.info(\"%s partition [%s]\" % (p.name, args.partition[p.name].name))\n\t\t\tflashImage.image.padToOffset(p.offset)\n\t\t\tflashImage.image += args.partition[p.name].read()\n\t\t\t\n\t\telse:\n\t\t\ttraces.info(\"%s partition [None]\" % p.name)\n\t\n\t# add padding to finish on 4 bytes align\n\tflashImage.image.padToOffset(binary.align(flashImage.getCurrentSize(), 4))\n\t\n\t#\n\t# Write output\n\t#\n\t\n\ttraces.info(\"\\nWritting output image to %s, size %uKB.\" %(args.output, flashImage.getCurrentSize() / 1024))\n\tflashImage.writeRAWImage(args.output)\n\n\ndef main(custom_commandline = None):\n\t\"\"\"\n\tMain\n\tfunction\n\tfor build Flash image\n\t\n\tcustom_commandline - Optional override for default arguments parsing (that uses sys.argv), can be a list of custom arguments\n\tas strings.Arguments and their values need to be added as individual items to the list e.g.\"-b 115200\" thus\n\tbecomes['-b', '115200'].\n\t\"\"\"\n\t\n\tparser = argparse.ArgumentParser(\n\t\tdescription = 'Build a Gap flash image from binary files, partition table and partition images - v%s' % __version__,\n\t\tprog = 'gen_flash_image',\n\t\tfromfile_prefix_chars = '@')\n\t\n\tcommon.appendCommonOptions(parser)\n\tappendArgs(parser)\n\t\n\targcomplete.autocomplete(parser)\n\targs = parser.parse_args(custom_commandline)\n\t\n\ttry:\n\t\toperationFunc(args)\n\tfinally:\n\t\ttry: # Clean up AddrFilenamePairAction files\n\t\t\tfor address, argfile in args.addr_filename:\n\t\t\t\targfile.close()\n\t\texcept AttributeError:\n\t\t\tpass\n\n\nif __name__ == '__main__':\n\ttry:\n\t\tmain()\n\texcept FatalError as e:\n\t\tprint('\\nA fatal error occurred: %s' % e)\n\t\tsys.exit(2)\n","sub_path":"tools/gapy/gen_flash_image.py","file_name":"gen_flash_image.py","file_ext":"py","file_size_in_byte":10803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"423306082","text":"#!/bin/python\n\nfrom collections import deque\nimport numpy as np\nimport torch\n\ndef mini_batch_train(env, agent, max_episodes, max_steps, batch_size, eps_start=1.0, eps_end=0.1, eps_decay=0.995):\n episode_rewards = []\n rewards_mean = [] # list the mean of the window scores\n rewards_window = deque(maxlen=100)\n eps = eps_start\n\n for episode in range(max_episodes):\n state = env.reset()\n episode_reward = 0\n\n for step in range(max_steps):\n action = agent.act(state, eps)\n next_state, reward, done, _ = env.step(action)\n\n agent.step(state, action, reward, next_state, done)\n \n episode_reward += reward\n state = next_state\n \n if done:\n break\n \n episode_rewards.append(episode_reward)\n rewards_window.append(episode_reward)\n average_score = np.mean(rewards_window)\n rewards_mean.append(average_score)\n\n eps = max(eps_end, eps*eps_decay)\n\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\teps: {:.4f}\\tLR: {}'\n .format(episode, average_score, eps, agent.lr_scheduler.get_lr()), end=\"\")\n \n if episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\teps: {:.4f}\\tLR: {}'\n .format(episode, average_score, eps, agent.lr_scheduler.get_lr()))\n \n if average_score >= 13: # check if environment is solved\n print('\\nEnvironment solved in {: d} episodes!\\tAverage Score: {: .2f}'.format(episode - 100, average_score))\n \n torch.save(agent.qnetwork_local.state_dict(), '{}.pth'.format(agent.name))\n break \n\n return episode_rewards, rewards_mean\n","sub_path":"p1_navigation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"277351311","text":"# Copyright (C) 2008 Laurence Tratt http://tratt.net/laurie/\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\nimport re\nfrom .import Results\n\n_RE_US_ZIP = re.compile(\"^[0-9]{5}$\")\n_RE_US_ZIP_PLUS4 = re.compile(\"^[0-9]{5}-[0-9]{4}$\")\n\n\ndef postcode_match(ft, i):\n for match, new_i in _sub_pc_match(ft, i):\n yield match, new_i\n\n\ndef _sub_pc_match(ft, i):\n us_id = ft.queryier.get_country_id_from_iso2(ft, \"US\")\n if _RE_US_ZIP_PLUS4.match(ft.split[i]):\n main, sup = ft.split[i].split('-')\n elif _RE_US_ZIP.match(ft.split[i]):\n main, sup = ft.split[i], None\n else:\n return\n\n if sup is not None:\n sup_txt = \" AND lower(sup)=%(sup)s \"\n else:\n sup_txt = \"\"\n\n c = ft.db.cursor()\n\n c.execute(\"SELECT postcode_id, osm_id, country_id, main, \"\n + ft.location_printer(\"location\") + \" as location \"\n \"FROM postcode \"\n \"WHERE lower(main)=%(main)s \"\n \"AND country_id=%(us_id)s\"\n + sup_txt,\n dict(main=main, sup=sup, us_id=us_id, ))\n\n cols_map = ft.queryier.mk_cols_map(c)\n for cnd in c.fetchall():\n postcode_id = cnd[cols_map[\"postcode_id\"]]\n country_id = cnd[cols_map[\"country_id\"]]\n pp = pp_place_id(ft, cnd[cols_map[\"main\"]], postcode_id)\n\n if us_id != ft.host_country_id:\n pp = \"{0}, {1}\".format(pp, ft.queryier.country_name_id(ft, country_id))\n\n match = Results.RPost_Code(postcode_id, cnd[cols_map[\"osm_id\"]], country_id, cnd[cols_map[\"location\"]], pp)\n yield match, i - 1\n\n\ndef pp_place_id(ft, pp, postcode_id):\n c = ft.db.cursor()\n\n c.execute(\"SELECT parent_id FROM postcode WHERE postcode_id=%(id)s\", dict(id=postcode_id))\n parent_id = c.fetchone()[0]\n\n if parent_id is not None:\n pp = \"{0}, {1}\".format(pp, ft.queryier.pp_place_id(ft, parent_id))\n\n return pp\n","sub_path":"Geo/US.py","file_name":"US.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"568561561","text":"# Adapted from https://github.com/karpathy/char-rnn Implementation\nimport numpy as np\nimport os\nimport sys\n\n# run thr model on GPUs\nos.environ['CUDA_VISIBLE_DEVICES'] = \"0\"\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers import TimeDistributed\nfrom keras import optimizers\n\n# ========================================================\n# training set used to trained the model\ntraining_set_original = sys.argv[1]\n\n# model to fine tune (this model was built using sys.argv[1])\nmodel_wts_tostart = sys.argv[2]\n\n# new training set on which model will be finetune\ntraining_set_fortuning = sys.argv[3]\n\n# number epochs\nnum_epoch = int(sys.argv[4])\n\n# batch size\nbatch_size = int(sys.argv[5])\n\n# learning rate\nlearning_rate = float(sys.argv[6])\n\n# output folder to store the fine tune models\noutput_folder = sys.argv[7]\n\n# ========================================================\nprint(\"input:\")\nprint(\"training_set_original\", training_set_original)\nprint(\"model_wts_tostart\", model_wts_tostart)\nprint(\"training_set_fortuning\", training_set_fortuning)\nprint(\"num_epoch\", num_epoch)\nprint(\"batch_size\", batch_size)\nprint(\"learning_rate\", learning_rate)\nprint(\"output_folder\", output_folder)\n# ========================================================\n\ntext = open(training_set_original, 'r').read()\nchar_to_idx = {ch: i for (i, ch) in enumerate(sorted(list(set(text))))}\nidx_to_char = {i: ch for (ch, i) in char_to_idx.items()}\nvocab_size = len(char_to_idx)\n\ntext = open(training_set_fortuning, 'r').read()\n\nprint('Working on %d characters (%d unique)' % (len(text), vocab_size))\n\n# ========================================================\nSEQ_LEN = 64\nBATCH_SIZE = batch_size\nBATCH_CHARS = int(len(text) / BATCH_SIZE)\nLSTM_SIZE = 512\nLAYERS = 3\n\n\n# For training, each subsequent example for a given batch index should be a\n# consecutive portion of the text. To achieve this, each batch index operates\n# over a disjoint section of the input text.\ndef read_batches(text):\n T = np.asarray([char_to_idx[c] for c in text], dtype=np.int32)\n X = np.zeros((BATCH_SIZE, SEQ_LEN, vocab_size))\n Y = np.zeros((BATCH_SIZE, SEQ_LEN, vocab_size))\n\n for i in range(0, BATCH_CHARS - SEQ_LEN - 1, SEQ_LEN):\n X[:] = 0\n Y[:] = 0\n for batch_idx in range(BATCH_SIZE):\n start = batch_idx * BATCH_CHARS + i\n for j in range(SEQ_LEN):\n X[batch_idx, j, T[start + j]] = 1\n Y[batch_idx, j, T[start + j + 1]] = 1\n\n yield X, Y\n\n\n# ========================================================\ndef build_model(infer):\n if infer:\n batch_size = seq_len = 1\n else:\n batch_size = BATCH_SIZE\n seq_len = SEQ_LEN\n model = Sequential()\n model.add(LSTM(LSTM_SIZE,\n return_sequences=True,\n batch_input_shape=(batch_size, seq_len, vocab_size),\n stateful=True))\n\n model.add(Dropout(0.2))\n for l in range(LAYERS - 1):\n model.add(LSTM(LSTM_SIZE, return_sequences=True, stateful=True))\n model.add(Dropout(0.2))\n\n model.add(TimeDistributed(Dense(vocab_size)))\n model.add(Activation('softmax'))\n\n adagrad = optimizers.Adagrad(lr=learning_rate)\n model.compile(loss='categorical_crossentropy', optimizer=adagrad)\n return model\n\n\n# ========================================================\nprint('Building model.')\ntraining_model = build_model(infer=False)\ntraining_model.load_weights(model_wts_tostart)\nprint(\"..done\")\n\nfor epoch in range(num_epoch):\n for i, (x, y) in enumerate(read_batches(text)):\n loss = training_model.train_on_batch(x, y)\n print(epoch, i, loss)\n training_model.save_weights(output_folder + '/keras_char_rnn.%d.h5' % epoch,\n overwrite=True)\nprint(\"END\")\n","sub_path":"build_models_finetune.py","file_name":"build_models_finetune.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"16498741","text":"import json\r\n\r\n#Load the json file\r\nfilename = 'json_donation_file.txt'\r\ni = 0\r\nflag100_set = 0\r\nwith open(filename,'r') as f:\r\n all_data = json.load(f)\r\n while i < len(all_data):\r\n data = all_data[i]\r\n #Parse into useful stuff\r\n #loop through each\r\n try:\r\n output_string = 'Donation ' + str(data['amount']) + ' by ' + str(data['displayName']) + ': '\r\n if str(data['message']) != \"None\":\r\n output_string += str(data['message'])\r\n if data['amount'] >= 100.0 and flag100_set:\r\n print(output_string + '\\n')\r\n elif not flag100_set:\r\n print(output_string + '\\n')\r\n\r\n except ValueError as err:\r\n print(\"Error parsing String! Sorry I'm lazy\\n\")\r\n #print(err)\r\n #print('\\n')\r\n try:\r\n useless = input(\"Press Enter for next 100$+ donation\\n\")\r\n except SyntaxError:\r\n pass\r\n i+= 1\r\n","sub_path":"extra_life_json_parser.py","file_name":"extra_life_json_parser.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"630305737","text":"#!/usr/bin/python3\n\"\"\"\ncreates new view for User object that handles all default RestFul API actions\n\"\"\"\nfrom api.v1.views import app_views\nfrom flask import jsonify, request, abort\nfrom models import storage\nfrom models.amenity import Amenity\nfrom models.city import City\nfrom models.place import Place\nfrom models.review import Review\nfrom models.state import State\nfrom models.user import User\n\n\n@app_views.route('/users', methods=['GET', 'POST'])\ndef users():\n \"\"\"GET request: retrieves the list of all User objects\n POST request: creates a new instance of the object\"\"\"\n if request.method == 'POST':\n if request.is_json is False:\n return jsonify(error='Not a JSON'), 400\n http_body = request.get_json()\n if 'email' not in http_body.keys():\n return jsonify(error='Missing email'), 400\n if 'password' not in http_body.keys():\n return jsonify(error='Missing password'), 400\n new_user = User(**http_body)\n storage.new(new_user)\n storage.save()\n return jsonify(new_user.to_dict()), 201\n return jsonify([user.to_dict() for user in storage.all(User).values()])\n\n\n@app_views.route('/users/', methods=['GET', 'DELETE', 'PUT'])\ndef user_id(user_id):\n \"\"\"GET request: retrieves a User object\n DELETE request: deletes a User object\"\"\"\n user = storage.get('User', user_id)\n if user is None:\n abort(404)\n if request.method == 'GET':\n return jsonify(user.to_dict())\n if request.method == 'DELETE':\n storage.delete(user)\n storage.save()\n return jsonify({}), 200\n if request.method == 'PUT':\n if request.is_json is False:\n return jsonify(error='Not a JSON'), 400\n new_attr = request.get_json()\n for attr in new_attr.keys():\n if attr not in ['id', 'created_at', 'updated_at']:\n setattr(user, attr, new_attr[attr])\n storage.save()\n return jsonify(user.to_dict()), 200\n","sub_path":"api/v1/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"371237584","text":"import typing\n\n\ndef bubble_sort(l: []) -> []:\n \"\"\"\n arguments: list\n returns: list (sorted)\n \"\"\"\n n = len(l)\n for i in range(n):\n for j in range(n):\n if l[j] > l[i]:\n l[i], l[j] = l[j], l[i]\n return l\n\n\nif __name__ == \"__main__\":\n l = [1, 4, 2, 3, 6, -1, 7, 10, 0]\n print(bubble_sort(l))\n","sub_path":"algorithms/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"380616947","text":"import re\n\nfrom flask import render_template, Blueprint, request, g\n\nimport app.service.config_service as config_service\nfrom ..service.auth_service import login_required\n\nrule_bp = Blueprint('rule', __name__)\n\n\n@rule_bp.route('/rules/validation', methods=['GET'], defaults={'provider': None, 'dataset': None})\n@rule_bp.route('/rules/validation/', methods=['GET', 'POST'], defaults={'dataset': None})\n@rule_bp.route('/rules/validation//', methods=['GET'])\n@login_required\ndef get_validation_rules(provider, dataset):\n success = request.args.get('success')\n if success is not None:\n if success == 'true':\n success = True\n else:\n success = False\n\n variables = []\n\n if provider is None:\n results = config_service.get_providers(g.user)\n elif dataset is None:\n results = config_service.get_datasets(g.user, provider)\n else:\n temp = config_service.get_validation_rules(g.user, provider, dataset)\n results = []\n variables = []\n\n for key, values in temp.items():\n for value in values:\n results.append((value[0], value[2], key, re.sub('(\\\\[\\')|(\\'\\\\])|(\\')+', '', str(value[1]))))\n\n if value[0] not in variables:\n variables.append(value[0])\n\n data = zip(range(1, len(results) + 1), results)\n\n return render_template('validation.html', provider=provider, dataset=dataset, variables=variables, data=data,\n update_success=success)\n\n\n@rule_bp.route('/rules/cleaning//', methods=['GET'])\n@login_required\ndef get_cleaning_rules(provider, dataset):\n success = request.args.get('success')\n if success is not None:\n if success == 'true':\n success = True\n else:\n success = False\n\n temp = config_service.get_cleaning_rules(g.user, provider, dataset)\n results = []\n\n for value in temp.values():\n results.append((value[2], value[3], value[0], value[1]))\n\n data = zip(range(1, len(results) + 1), results)\n\n return render_template('cleaning.html', provider=provider, dataset=dataset, data=data, update_success=success)\n\n\n@rule_bp.route('/rules/missing//', methods=['GET'])\n@login_required\ndef get_missing_rules(provider, dataset):\n success = request.args.get('success')\n if success is not None:\n if success == 'true':\n success = True\n else:\n success = False\n\n temp = config_service.get_missing_values_rules(g.user, provider, dataset)\n results = []\n\n for key, values in temp.items():\n for value in values:\n results.append((value[0], key, value[1]))\n\n data = zip(range(1, len(results) + 1), results)\n\n return render_template('missing.html', provider=provider, dataset=dataset, data=data, update_success=success)\n","sub_path":"app/views/rule_view.py","file_name":"rule_view.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"374176412","text":"import uuid\nfrom typing import Sequence, Union, Optional\n\nfrom django.utils import timezone\nfrom django.http import Http404\nfrom django.db.models.query import QuerySet\nfrom django.http import HttpResponse\nfrom django.conf import settings\n\nfrom rest_framework.exceptions import NotFound, ValidationError, PermissionDenied\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.status import HTTP_200_OK, HTTP_201_CREATED\nfrom rest_framework.response import Response\nfrom rest_framework.request import Request\nfrom rest_framework.generics import CreateAPIView, ListAPIView, get_object_or_404\n\nfrom . import models, serializers, utils\n\nclass UploadImageView(CreateAPIView):\n \"\"\" \n View for uploading an image and then \n return data about the image that provided by a plan\n \"\"\"\n\n permission_classes = [IsAuthenticated,]\n \n serializer_class = serializers.CreateImageSerializer\n\n def create(self, request: Request) -> Union[Response, ValidationError]:\n plan_obj: models.Plan = request.user.plan # type: ignore \n\n serializer = self.get_serializer(data=request.data)\n\n serializer.is_valid(raise_exception=True)\n\n img_obj: models.Image = serializer.save() # type: ignore \n\n data = utils.create_image_data_dict(\n plan_obj.thumbnail_heights,\n img_obj,\n plan_obj.is_expiring_link_generation_provided,\n plan_obj.is_link_to_original_provided,\n )\n\n return Response(data, status=HTTP_201_CREATED)\n \nclass ListImagesView(ListAPIView):\n \"\"\" \n View that return a list of data about the image that provided by a plan\n \"\"\"\n permission_classes = [IsAuthenticated,]\n\n serializer_class = serializers.ImageSerializer\n\n def get_queryset(self):\n user_id: int = self.request.user.id # type: ignore\n return models.Image.objects.filter(user_id=user_id).order_by('image')\n\n def list(self, request: Request) -> Union[Response, NotFound]:\n images_qs: QuerySet[models.Image] = self.get_queryset()\n plan_obj: models.Plan = request.user.plan # type: ignore \n\n if not images_qs:\n raise NotFound(\"You haven't sent any pictures yet\")\n\n paginated_images_qs: Sequence[models.Image] = self.paginate_queryset(images_qs) # type: ignore \n response_data = []\n for img_obj in paginated_images_qs:\n data = utils.create_image_data_dict(\n plan_obj.thumbnail_heights,\n img_obj,\n plan_obj.is_expiring_link_generation_provided,\n plan_obj.is_link_to_original_provided,\n )\n response_data.append(data)\n return Response(response_data, status=HTTP_200_OK)\n\nclass ImageView(APIView):\n \"\"\" \n View that return an original of image or thumbnail as file\n \"\"\"\n permission_classes = [IsAuthenticated,]\n\n def get(self, request: Request, image_title: str, **kwargs) -> Union[HttpResponse, Http404, PermissionDenied]:\n height: Optional[int] = kwargs.get(\"height\", None)\n plan_obj: models.Plan = request.user.plan # type: ignore\n user_id: int = request.user.id # type: ignore\n\n if height:\n if not height in range(1, 1001):\n raise ValidationError(\"thumbnail's height must be in range 1 to 1000\")\n if not height in plan_obj.thumbnail_heights:\n raise PermissionDenied(\"Your plan doesn't provide this thumbnail height\")\n else:\n if not plan_obj.is_expiring_link_generation_provided:\n raise PermissionDenied(\"Your plan doesn't provide fetch original image\")\n\n image_queryset: QuerySet[models.Image] = models.Image.objects.all()\n image_obj = get_object_or_404(queryset=image_queryset, user_id=user_id, image=\"static/images/\" + image_title)\n\n height: Optional[int] = kwargs.get(\"height\", None)\n\n image_file = image_obj.get_image_file(height=height)\n\n response = HttpResponse(content_type=\"image/\" + image_file.format)\n response['Content-Disposition'] = 'filename=\"{}.{}\"'.format(\n image_obj.image_name, \n image_file.format\n )\n image_file.save(response, image_file.format)\n # print(response.code)\n return response\n\n\n\nclass FetchExpiringLinkView(APIView):\n \"\"\" \n View that return a link to expiring a originall of image or thumbnail\n \"\"\"\n permission_classes = [IsAuthenticated,]\n\n def get(\n self, request: Request, image_title: str, seconds: int, **kwargs\n ) -> Union[Response, Http404, PermissionDenied, ValidationError]:\n\n height: Optional[int] = kwargs.get(\"height\", None)\n plan_obj: models.Plan = request.user.plan # type: ignore\n user_id: int = request.user.id # type: ignore\n \n if height:\n if not height in range(1, 1001):\n raise ValidationError(\"thumbnail's height must be in range 1 to 1000\")\n if not height in plan_obj.thumbnail_heights:\n raise PermissionDenied(\"Your plan doesn't provide this thumbnail height\")\n else:\n if not plan_obj.is_link_to_original_provided:\n raise PermissionDenied(\"Your plan doesn't provide fetch original image\")\n \n if not plan_obj.is_expiring_link_generation_provided:\n raise PermissionDenied(\"Your plan doesn't provide generation expiring links\")\n\n queryset: QuerySet[models.Image] = models.Image.objects.all()\n image_obj = get_object_or_404(queryset=queryset, user_id=user_id, image=\"static/images/\" + image_title)\n\n exp_link_ser = serializers.CreateExpiringLinkSerializer(\n data={\n \"id\": uuid.uuid4,\n \"link_to_image\": settings.HOSTNAME + \"/uploads/\" + image_obj.image_name,\n \"seconds\": seconds\n } \n )\n exp_link_ser.is_valid(raise_exception=True)\n\n exp_link_obj: models.ExpiringLink = exp_link_ser.save() # type: ignore\n\n return Response({\"url\": exp_link_obj.get_expiring_link_to_image(height)}, status=HTTP_200_OK)\n \nclass ExpiringLinkView(APIView):\n\n permission_classes = [IsAuthenticated,]\n\n def get(self, request: Request, image_title: str, uuid: str, **kwargs) -> Union[HttpResponse, Http404, PermissionDenied]:\n height: Optional[int] = kwargs.get(\"height\", None)\n plan_obj: models.Plan = request.user.plan # type: ignore\n user_id: int = request.user.id # type: ignore\n \n if height:\n if not height in range(1, 1001):\n raise ValidationError(\"thumbnail's height must be in range 1 to 1000\")\n if not height in plan_obj.thumbnail_heights:\n raise PermissionDenied(\"Your plan doesn't provide this thumbnail height\")\n else:\n if not plan_obj.is_link_to_original_provided:\n raise PermissionDenied(\"Your plan doesn't provide fetch original image\")\n \n if not plan_obj.is_expiring_link_generation_provided:\n raise PermissionDenied(\"Your plan doesn't provide generation expiring links\")\n\n image_queryset: QuerySet[models.Image] = models.Image.objects.all()\n image_obj = get_object_or_404(queryset=image_queryset, user_id=user_id, image=\"static/images/\" + image_title)\n\n exp_link_queryset: QuerySet[models.ExpiringLink] = models.ExpiringLink.objects.all()\n exp_link_obj = get_object_or_404(queryset=exp_link_queryset, id=uuid)\n \n if exp_link_obj.expired_at < timezone.now():\n raise PermissionDenied(\"Link is expired\")\n\n image_file = image_obj.get_image_file(height=height)\n\n response = HttpResponse(content_type=\"image/\" + image_file.format)\n response['Content-Disposition'] = 'filename=\"{}.{}\"'.format(\n image_obj.image_name, \n image_file.format\n )\n image_file.save(response, image_file.format)\n return response\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"61891995","text":"#encoding=gbk\n\nfrom cj.web import *\n\nsponsor = request('sponsor')\nmonth_begin = request('month_begin')\nmonth_end = request('month_end')\ncount_begin = request('count_begin')\ncount_end = request('count_end')\nflag = request('flag')\n\nurl = \"saleout_profit_detail.py?sponsor=%s&count_begin=%s&count_end=%s&flag=%s\" % (sponsor,count_begin,count_end,flag)\nif(flag==\"MRFS\"):\n if(month_begin):\n url = \"buy_profit_detail.py?sponsor=%s&month_begin=%s&month_end=%s\" % (sponsor,month_begin,month_end)\n else:\n url = \"now_profit_detail_buy.py?sponsor=%s&flag=%s\" % (sponsor,flag)\nelif(flag==\"MCHG\"):\n if(month_begin):\n url = \"sale_profit_detail.py?sponsor=%s&month_begin=%s&month_end=%s\" % (sponsor,month_begin,month_end)\n else:\n url = \"now_profit_detail_buy.py?sponsor=%s&flag=%s\" % (sponsor,flag)\nelif(not flag):\n category = request('category')\n expiring = request('expiring')\n\n if(category=='MRFS'):\n url = \"pj_buy.py?expiring=\" + expiring\n elif(category=='MCHG'):\n url = \"pj_sale.py?expiring=\" + expiring\n else:\n url = \"view_double_sale.py?expiring=\" + expiring\n#log.info(\"redirect to %s\" % url)\n\n\next.Page(js.Script(ext.main(ext.ViewPort(), \"window.location.href='%s'\" % url)))","sub_path":"pjtz/pj_redirect.py","file_name":"pj_redirect.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"144053117","text":"#!/usr/bin/env python2.7\n#--coding:utf-8 --\n\"\"\"\nbedpe2bed.py\n\"\"\"\n\n__author__ = \"CAO Yaqiang\"\n__date__ = \"2015-05-26\"\n__modified__ = \"\"\n__email__ = \"caoyaqiang0410@gmail.com\"\n\n#general library\nimport glob, os, time, sys, random\n\n#3rd library\n\n#my own library\nfrom Biolibs.rel.General.logger import getlogger\n\n#global settings\n#logger\ndate = time.strftime(' %Y-%m-%d', time.localtime(time.time()))\nlogger = getlogger(fn=os.getcwd() + \"/\" + date.strip() + \"_\" +\n os.path.basename(__file__) + \".log\")\n\n\ndef call_sys(cmds):\n \"\"\"\n Call systematic commands without return.\n \"\"\"\n for c in cmds:\n logger.info(c)\n try:\n os.system(c)\n except:\n logger.error(c)\n\n\ndef flush(i):\n if i % 1000 == 0:\n report = \"\\r%dk reads parsed\" % (i / 1000)\n sys.stdout.write(report)\n sys.stdout.flush()\n\n\ndef bedpe2bed(bedpe):\n fn = os.path.splitext(bedpe)[0]\n bed = fn + '.bed'\n with open(bed, \"w\") as f:\n for i, line in enumerate(open(bedpe)):\n flush(i)\n line = line.split(\"\\n\")[0].split(\"\\t\")\n if len(line) < 7:\n continue\n r1 = [line[0], line[1], line[2], line[6]]\n r2 = [line[3], line[4], line[5], line[6]]\n r1 = \"\\t\".join(r1) + \"\\n\"\n r2 = \"\\t\".join(r2) + \"\\n\"\n f.write(r1 + r2)\n print\n tmp = str(random.random())\n bedgz = bed + \".gz\"\n c1 = \"sortBed -i %s > %s\" % (bed, tmp)\n c2 = \"mv %s %s\" % (tmp, bed)\n c3 = \"bgzip %s\" % bed\n c4 = \"tabix -p bed %s\" % bedgz\n call_sys([c1, c2, c3, c4])\n\n\ndef main():\n bedpe = \"H3K27ac_chr22.bedpe\"\n bedpe2bed(bedpe)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ChIP-seq/bedpe2bed.py","file_name":"bedpe2bed.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"225084531","text":"#Random UAVs\n\nimport numpy as np\n\ndef add_UAVs_random(n,cls,szs,shps,img):\n\th,w=img.shape\n\tcolors=np.random.choice(cls,n)\n\tsizes=np.random.choice(szs,n)\n\tshapes=np.random.choice(shps,n)\n\tlocy=np.random.randint(h,size=n)\n\tlocx=np.random.randint(w,size=n)\n\n\tshape_pts={'cross':np.array([[0,0],[-1,0],[1,0],[0,-1],[0,1]]),\n\t\t\t'square':np.array([[0,0]]),\n\t\t\t'x':np.array([[0,0],[1,1],[1,-1],[-1,-1],[-1,1]]),\n\t\t\t'arrow':np.array([[0,0],[1,-1],[2,-2],[1,1],[2,2]])}\n\n\tfor i,(cl,sz,shape,x,y) in enumerate(zip(colors,sizes,shapes,locy,locx)):\n\t\ttile_pts=shape_pts[shape]\n\t\ttile_size=np.shape(tile_pts)[0]\n\n\t\tfor k,(c,d) in enumerate(zip(tile_pts[:,0],tile_pts[:,1])):\n\t\t\tfor a in np.arange(sz):\n\t\t\t\tfor b in np.arange(sz):\n\t\t\t\t\tpty=y+b+c*sz\n\t\t\t\t\tptx=x+a+d*sz\n\t\t\t\t\tif (pty>=0) and (pty=0) and (ptx> \" + file)\n","sub_path":"tests/executor/plugins/verify-input/lbvs-entry.py","file_name":"lbvs-entry.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"185395238","text":"class Settings():\r\n \r\n def __init__(self, s_l):\r\n \r\n #Общие\r\n self.screen_width = int(s_l['screen_width'])\r\n self.screen_height = int(s_l['screen_height'])\r\n self.bg_color = tuple(int(item) for item in s_l['bg_color'].split(' '))\r\n self.ship_limit = int(s_l['ship_limit'])\r\n \r\n #Пуля\r\n self.bullet_width = int(s_l['bullet_width'])\r\n self.bullet_height = int(s_l['bullet_height'])\r\n self.bullet_color = tuple(int(item) for item in s_l['bullet_color'].split(' '))\r\n self.bullets_allowed = int(s_l['bullets_allowed'])\r\n \r\n #Пришельцы\r\n self.fleet_drop_speed = int(s_l['fleet_drop_speed'])\r\n \r\n self.speedup_scale = float(s_l['speedup_scale'])\r\n self.score_scale = float(s_l['score_scale'])\r\n \r\n self.initialize_dynamic_settings(s_l)\r\n \r\n def initialize_dynamic_settings(self, s_l):\r\n self.ship_speed_factor = float(s_l['ship_speed_factor'])\r\n \r\n self.bullet_speed_factor = float(s_l['bullet_speed_factor'])\r\n \r\n self.alien_points = 50\r\n \r\n self.alien_speed_factor = 1\r\n self.fleet_direction = 1 # 1 is right -1 is left\r\n \r\n def increase_speed(self):\r\n self.ship_speed_factor *= self.speedup_scale\r\n self.bullet_speed_factor *= self.speedup_scale\r\n self.alien_speed_factor *= self.speedup_scale\r\n self.alien_points = int(self.alien_points * self.score_scale)","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"76111347","text":"import os\nimport sys\nimport argparse\nimport shutil\n\nfrom glob import glob\nfrom tqdm import tqdm\n\nfrom robosat_pink.tiles import tiles_from_csv\nfrom robosat_pink.web_ui import web_ui\n\n\ndef add_parser(subparser):\n parser = subparser.add_parser(\n \"subset\",\n help=\"filter images in a slippy map dir using a csv tiles cover\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\"--mode\", type=str, default=\"copy\", choices={\"copy\", \"move\", \"delete\"}, help=\"filtering mode\")\n parser.add_argument(\"--web_ui\", action=\"store_true\", help=\"activate web ui output\")\n parser.add_argument(\"--web_ui_base_url\", type=str, help=\"web ui alternate base url\")\n parser.add_argument(\"--web_ui_template\", type=str, help=\"path to an alternate web ui template\")\n parser.add_argument(\"--dir\", type=str, required=True, help=\"directory to read slippy map tiles from for filtering\")\n parser.add_argument(\"--cover\", type=str, required=True, help=\"csv cover to filter tiles by\")\n parser.add_argument(\"--out\", type=str, help=\"directory to save filtered tiles to (on copy or move mode)\")\n\n parser.set_defaults(func=main)\n\n\ndef main(args):\n tiles = set(tiles_from_csv(args.cover))\n extension = \"\"\n\n for tile in tqdm(tiles, desc=\"Subset\", unit=\"tiles\", ascii=True):\n\n paths = glob(os.path.join(args.dir, str(tile.z), str(tile.x), \"{}.*\".format(tile.y)))\n if len(paths) != 1:\n print(\"Warning: {} skipped.\".format(tile))\n continue\n src = paths[0]\n\n try:\n if args.mode in [\"copy\", \"move\"]:\n assert args.out\n if not os.path.isdir(os.path.join(args.out, str(tile.z), str(tile.x))):\n os.makedirs(os.path.join(args.out, str(tile.z), str(tile.x)), exist_ok=True)\n\n extension = os.path.splitext(src)[1][1:]\n dst = os.path.join(args.out, str(tile.z), str(tile.x), \"{}.{}\".format(tile.y, extension))\n\n if args.mode == \"move\":\n assert os.path.isfile(src)\n shutil.move(src, dst)\n\n if args.mode == \"copy\":\n shutil.copyfile(src, dst)\n\n if args.mode == \"delete\":\n assert os.path.isfile(src)\n os.remove(src)\n\n except:\n sys.exit(\"Error: Unable to process {}\".format(tile))\n\n if args.web_ui:\n template = \"leaflet.html\" if not args.web_ui_template else args.web_ui_template\n base_url = args.web_ui_base_url if args.web_ui_base_url else \"./\"\n web_ui(args.out, base_url, tiles, tiles, extension, template)\n","sub_path":"robosat_pink/tools/subset.py","file_name":"subset.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"330422674","text":"# Import packages\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import MetaData, Table\nfrom sqlalchemy import select\nfrom sqlalchemy import order_by\n\n# Create engine to connect to file\nengine = create_engine('sqlite:///census_nyc.sqlite')\n\nmetadata = MetaData()\n\n# Reflect census table via engine: census\ncensus = Table('census', metadata, autoload=True, autoload_with=engine)\n\n\n\n# Build a query to select state and age: stmt\nstmt = select([\n census.columns.state,\n census.columns.age\n])\n\n# Append order by to ascend by state and descend by age\nstmt = stmt.order_by(\n census.columns.state,\n desc(census.columns.age)\n)\n\n# Execute the statement and store all of the records: results\nresults = connection.execute(stmt).fetchall()\n\n# Print the first 20 results\nprint(results[:20])\n","sub_path":"DB/sql_order_multiple_columns.py","file_name":"sql_order_multiple_columns.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"342667396","text":"#!/usr/bin/env python3\nimport pydanmaku as pd\nimport random\nimport time\nimport math\nfrom math import pi, sin, cos\nfrom player import Moveable\n\n# import pkgutil\n# a =pkgutil.get_data('pydanmaku', 'shaders/frag.shader')\n# print(a)\n\nFPS = 60\nwait = 1/FPS\nprevious = time.time()\n\ndef framerate():\n global previous, wait, FPS\n now = time.time()\n delta = now - previous\n to_wait = wait - delta\n previous += wait\n if to_wait < 0:\n print('bad'); return\n time.sleep(to_wait)\n\nx = pd.DanmakuGroup(\"images/rice.png\")\ny = pd.DanmakuGroup(\"images/rice.png\", True)\ni = 0\n\nplayer = Moveable(100, 100, 2, \"images/amulet.png\")\n\nviolin_freqs = [\n 0.995, 0.940, 0.425, 0.480, 0.0, 0.365, 0.040, 0.085, 0.0, 0.09\n]\n\ndef violin(x):\n return sum (\n s*sin((n+1)*x) if n%2==0 else s*cos((n+1)*x)\n for n, s in enumerate(violin_freqs)\n )\n\n@pd.modifier\ndef wavy(step, b):\n if b.life < 30:\n b = b._replace(ang=b.ang + 0.1*sin((step+b.life)/10), acc=b.acc+0.001)\n return b\n\nx.add_modifier(wavy)\n\n\n\"\"\"\nimport matplotlib.pyplot as plt\nins = [2*pi*x/1000 for x in range(1001)]\nouts = list(map(violin, ins))\nplt.plot(ins, outs)\nplt.show()\n\nimport sys\nsys.exit(0)\n\"\"\"\ntry:\n pd.init()\n start = time.time()\n i = 0\n pos = 100\n for _ in range(60000):\n i+=1\n \n if i % 4 == 1:\n for j in range(10):\n x.add_bullet(\n pos+0*100*violin(i/100), 0, True, 10, 15,\n angle=i/30 + j*pi/5, speed=5\n )\n if i % 4 == 3:\n for j in range(10):\n y.add_bullet(\n -pos+0*-100*violin(i/100), 0, False, 5, 100,\n angle=2*pi*violin(i/100) + j*pi/5, speed=5\n )\n\n player.run()\n x.run()\n x.render()\n y.run()\n y.render()\n keys = pd.get_keys()\n if keys[263]: pos += 2\n if keys[262]: pos -= 2\n pd.render()\n #framerate()\n\nfinally:\n pd.close()\n\n\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"546572189","text":"import json\nimport logging\nimport os\n\nfrom discord import Embed\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\n\nimport api\nimport interface\nfrom api import request\n\nload_dotenv(verbose=True, override=True)\n\nwith open(\"src/resources/box.json\", encoding=\"utf-8\") as f:\n box_types = json.load(f)\n\n\ndef describe_box(box: dict, is_premium: bool) -> str:\n key = box.get(\"key_premium\", box[\"key\"]) if is_premium else box[\"key\"]\n max_point = max(p[\"point\"] for p in box[\"probabilities\"])\n return \"{emoji} **{name}** (열쇠 {key}개 필요, 최대 {max_point}P)\".format(\n **dict(box, key=key, max_point=max_point)\n )\n\n\ndef box_open_view(box: dict, is_premium: bool, key_count: int) -> Embed:\n embed = Embed(title=box[\"name\"]).set_thumbnail(url=box[\"image\"])\n\n # 필요 열쇠량\n description = f\"열쇠 {box['key']}개 필요\"\n if \"key_premium\" in box.keys():\n description += f\" (프리미엄: {box['key_premium']}개 필요)\"\n\n # 확률 분포\n description += \"\\n\\n이 상자를 열면..\\n\"\n description += \"\\n\".join(\n [\n f\"{100 * prob['prob']:.0f}%의 확률로 {prob['point']}P 획득\"\n for prob in box[\"probabilities\"]\n ]\n )\n\n # 사용자에게 안내\n key = box.get(\"key_premium\", box[\"key\"]) if is_premium else box[\"key\"]\n description += f\"\\n\\n열쇠 {key}개를 사용해서 **{box['name']}**를 열어볼까요?\\n\"\n description += f\"(현재 열쇠 **{key_count}개**를 가지고 있어요!)\"\n\n embed.description = description\n return embed\n\n\nclass User(commands.Cog):\n guild_whitelist = [\n int(guild_id.strip()) for guild_id in os.getenv(\"GUILD_WHITELIST\").split(\",\")\n ]\n logger = logging.getLogger(\"lara.user\")\n\n def __init__(self, bot):\n self.bot = bot\n\n premium_role_env = os.getenv(\"PREMIUM_ROLE\")\n if premium_role_env is None:\n raise ValueError(\"Environment variable PREMIUM_ROLE is not defined\")\n self.premium_role = int(premium_role_env)\n\n subscriber_role_env = os.getenv(\"SUBSCRIBER_ROLE\")\n if subscriber_role_env is None:\n raise ValueError(\"Environmant variable SUBSCRIBER_ROLE is not defined\")\n self.subscriber_role = int(subscriber_role_env)\n\n async def cog_check(self, ctx):\n return ctx.guild is not None and ctx.guild.id in self.guild_whitelist\n\n async def cog_command_error(self, ctx, error):\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\"⚠️ **팀 크레센도 디스코드**에서만 사용 가능한 명령어입니다.\")\n return\n\n self.logger.error(str(error))\n\n def is_premium(self, ctx: commands.Context) -> bool:\n return ctx.guild.get_role(self.premium_role) in ctx.author.roles\n\n @commands.command(\n \"출석\", aliases=[\"출석체크\", \"출첵\", \"ㅊ\"], brief=\"팀 크레센도 디스코드 서버에 출석하고 열쇠를 얻습니다.\",\n )\n async def attend(self, ctx):\n user, _ = await request(\"get\", f\"/discords/{ctx.author.id}\")\n if len(user) == 0:\n return await ctx.send(\n f\"\"\"{ctx.author.mention}, ⚠️ 팀 크레센도 FORTE에 가입하지 않은 계정입니다.\n출석체크 보상으로 POINT를 지급받기 위해선 FORTE 가입이 필요합니다.\n하단의 링크에서 Discord 계정 연동을 통해 가입해주세요.\n> https://forte.team-crescendo.me/login/discord\"\"\"\n )\n\n try:\n key_count = await api.post_attendace(ctx.author.id)\n except api.AttendanceError as e:\n self.logger.log(e.level, f\"{ctx.author.id} attend failure, {e.status}\")\n return await ctx.send(f\"{ctx.author.mention}, {e}\")\n\n self.logger.info(f\"{ctx.author.id} attend success, key_count = {key_count}\")\n progress = key_count * \"🔑\" + (10 - key_count) * \"❔\"\n return await ctx.send(\n f\"\"\"{ctx.author.mention}, ⚡ **출석 체크 완료!**\n\n{progress}\n\n모은 열쇠로 상자를 열면 POINT를 받을 수 있습니다. (`라라야 상자` 입력)\n\n※ `💎Premium` 역할을 갖고 있으면 상자를 열 때 필요한 열쇠가 줄어듭니다. (<#748566390528671774> 확인)\"\"\"\n )\n\n async def select_box(self, ctx: commands.Context) -> str:\n \"\"\"\n 사용자의 입력에 따라 TimeoutError 또는 KeyError가 발생할 수 있습니다.\n \"\"\"\n is_premium = self.is_premium(ctx)\n description = \"\\n\".join(\n describe_box(box, is_premium) for box in box_types.values()\n )\n embed = Embed(title=\"어떤 상자를 열어볼까요?\", description=description)\n\n prompt = await ctx.send(ctx.author.mention, embed=embed)\n emoji_map = {box[\"emoji\"]: key for key, box in box_types.items()}\n user_input = await interface.input_emojis(ctx, prompt, [*emoji_map.keys(), \"❌\"])\n return emoji_map[user_input]\n\n @commands.command(\"상자\", brief=\"열쇠를 사용하여 상자를 열고 확률적으로 포인트를 받습니다.\")\n async def unpack_box(self, ctx):\n key_count = await api.get_key_count(ctx.author.id)\n if key_count == 0:\n await ctx.send(\n f\"{ctx.author.mention}, 상자를 열 수 있는 열쇠가 없습니다.\\n\"\n + \"`라라야 출석` 명령어로 매일 열쇠를 하나씩 얻을 수 있습니다.\"\n )\n return\n\n box_type = await self.select_box(ctx)\n\n prompt = await ctx.send(\n ctx.author.mention,\n embed=box_open_view(box_types[box_type], self.is_premium(ctx), key_count),\n )\n if await interface.is_confirmed(ctx, prompt):\n await prompt.edit(\n content=f\"{ctx.author.mention}, **{box_types[box_type]['name']}**를 여는 중...\",\n embed=None,\n )\n try:\n point, remaining_keys = await api.unpack_box(\n ctx.author.id, box_type, self.is_premium(ctx)\n )\n self.logger.info(\n f\"{ctx.author.id} unpack success, {box_type}, point = {point}, key_count = {remaining_keys}\"\n )\n await ctx.send(\n f\"{ctx.author.mention}, 상자를 열어 **{point}P**를 얻었습니다! (남은 열쇠: **{remaining_keys}개**)\"\n )\n except api.AttendanceError as e:\n self.logger.log(e.level, f\"{ctx.author.id} unpack failure, {e.status}\")\n return await ctx.send(f\"{ctx.author.mention}, {e}\")\n\n @commands.command(\"구독\", brief=\"전용 구독자 역할을 지급받거나 반환합니다.\")\n async def subscribe(self, ctx):\n role = ctx.guild.get_role(self.subscriber_role)\n if role is None:\n return await ctx.send(\"⚠️ 구독자 역할을 찾을 수 없습니다.\")\n\n if role not in ctx.author.roles:\n await ctx.author.add_roles(role)\n await ctx.send(f\"{ctx.author.mention}, 구독자 역할을 지급했습니다.\")\n else:\n await ctx.author.remove_roles(role)\n await ctx.send(f\"{ctx.author.mention}, 구독자 역할을 회수했습니다.\")\n\n\ndef setup(bot):\n bot.add_cog(User(bot))\n","sub_path":"src/extensions/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":7295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"361816290","text":"def programa():\n # entrada\n num1 = int(input('Número 1: '))\n num2 = int(input('Número 2: '))\n\n # processamento\n soma = num1 + num2\n diferenca = num1 - num2\n produto = num1 * num2\n quociente = num1 // num2\n resto = num1 % num2\n potencia = num1 ** num2\n\n # saída\n print('Soma dos números é', soma)\n print('A diferença entre os números é {}'.format(diferenca))\n print(f'O produto dos número é {produto}')\n print('O quociente exato dos números é %d' % (quociente))\n print(f'O resto da divisao inteira é {resto}')\n print(f'A potencia é {potencia}')\n\n\nprograma()\n","sub_path":"programacao_estruturada/20192_186/resolucao_prova_final/operacoes.py","file_name":"operacoes.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"493999447","text":"import socket\nimport entity_pb2\nimport numpy as np\nimport struct\nimport sprite\n\nclass Eye(object):\n \"\"\"Connects to perception system and writes data to python\n friendly containers\"\"\"\n def __init__(self,ipAddress, port):\n self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.targetIP = ipAddress\n self.targetPort = port\n self.sock.connect((self.targetIP,self.targetPort))\n return\n\n def read(self, size):\n ## Read until the complete data is received\n recvList = ''\n recvSize = 0\n while(recvSize < size):\n recvPiece = self.sock.recv(min(2048,size - recvSize))\n if(recvPiece == ''):\n print('Connection broken')\n self.disconnect()\n return ''\n else:\n recvList += recvPiece\n recvSize += len(recvPiece)\n\n return recvList\n\n def write(self, message):\n try:\n ## Send the length of data\n numBytes = socket.htonl(len(message))\n self.sock.sendall(struct.pack('I',numBytes))\n ## Send until all of the data is sent\n self.sock.sendall(message)\n except socket.error:\n print('Connection broken')\n self.disconnect()\n return\n\n def getMessage(self):\n numBytes = int(socket.ntohl(struct.unpack('I', self.read(4))[0]))\n messageString = self.read(numBytes)\n entityList = entity_pb2.EntityList()\n entityList.ParseFromString(messageString)\n return entityList\n\n def getSpriteList(self, screenSize):\n entityList = self.getMessage()\n spriteList = []\n for entity in entityList.entityList:\n patchSize = (entity.image.height,entity.image.width,4)\n if not min(patchSize) <= 0:\n patch = np.fromstring(entity.image.data,dtype='uint8').reshape(patchSize)\n spriteList.append(sprite.Sprite(patch,[entity.pose.position.x, entity.pose.position.y, entity.pose.angle],screenSize))\n return spriteList,entityList.command\n\n def sendMessage(self, message):\n messageString = message.SerializeToString()\n self.write(messageString)\n return\n\n def disconnect(self):\n self.sock.shutdown()\n self.sock.close()","sub_path":"procamPy/src/eye.py","file_name":"eye.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"247826277","text":"from networks import networks\nimport ipaddress\n\nif __name__ == '__main__':\n # python3 ./026/003_ipcheck/app.py > ./026/003_ipcheck/table.md\n hosts = dict()\n print(f'Сегмент сети | имя подсети | IP-адресация | broadcast-IP')\n print(f'--- | --- | --- | ---')\n for segment_name, subnetworks in networks.items():\n for subnetwork_ip, subnetwork_name in subnetworks.items():\n subnetwork_obj = ipaddress.ip_network(subnetwork_ip)\n print(f'{segment_name} | {subnetwork_name} | {subnetwork_ip} | {subnetwork_obj.broadcast_address} ')\n","sub_path":"026/003_ipcheck/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"279390475","text":"\"\"\"\nmodule implementing the match subcommand;\nmatching patterns against the index\n\"\"\"\n\nfrom . import names\nfrom . import utils\nfrom . import seqs\nfrom . import suffix\n\ndef buildparser(p):\n p.add_argument(\"pattern\", help=\"string to match against the index\")\n p.add_argument(\"indexname\", help=\"name of the index\")\n p.add_argument(\"--mode\", \"-m\", default=\"full\",\n choices={\"full\",\"suffix\",\"prefix\",\"bms\",\"mum\",\"mm\"},\n help=\"search mode (default: full)\")\n p.add_argument(\"--minlen\", type=int, default=0,\n help=\"minimum length of matches to report\")\n p.add_argument(\"--errors\", \"-e\", type=int, default=0,\n help=\"maximum number of allowed errors (mismatches+indels)\")\n p.add_argument(\"--nolcp\", action=\"store_true\",\n help=\"never use the lcp array (save memory, waste time)\")\n p.add_argument(\"--occrate\", default=0, type=int,\n help=\"specify which *.occ file should be used [0: choose fastest]\")\n\n\ndef main(args):\n clock = utils.TicToc()\n iname = args.indexname\n \n # encode the pattern according to index alphabet\n alphabet = seqs.get_alphabet_from_indexname(iname)\n pattern = list(alphabet.encoded(args.pattern, terminate=False))\n #print(\"pattern is {} with length {}\".format(pattern,len(pattern)))\n \n mode = args.mode\n if mode in {\"full\", \"suffix\", \"prefix\", \"mum\", \"mm\"}:\n do_matching(pattern, iname, args, clock)\n elif mode in {\"bms\"}:\n do_bms(pattern, iname, args, clock)\n else:\n raise NotImplementedError(\"Method / mode {} not implemented\".format(mode))\n # done\n print(clock.toc(),\"done\")\n\n\ndef _get_suffix_array(iname, args, clock):\n mode = args.mode\n # get an empty suffix array\n sa = suffix.SuffixArray.from_indexname(iname)\n # we need bwt and occ in any case\n print(clock.toc(),\"reading {}, {}...\".format(names.bwt(iname),names.occ(iname,\"*\")))\n sa.bwt_from_indexname(iname)\n sa.occ_from_indexname(iname, args.occrate)\n print(clock.toc(),\" occrate =\", sa.occ.occrate)\n # in some cases, we need lcp1 as well\n if mode in {\"prefix\",\"mm\",\"mum\",\"bms\"} and not args.nolcp:\n print(clock.toc(),\"reading {}...\".format(names.lcp1(iname)))\n sa.lcp_from_indexname(iname,1)\n return sa\n \n\ndef do_matching(pat, iname, args, clock):\n sa = _get_suffix_array(iname, args, clock)\n mode = args.mode; minlen = args.minlen; errors = args.errors;\n print(clock.toc(),\"matching backwards...\")\n if mode in {\"full\", \"suffix\"}:\n matches = sa.backward_search(pat, minlen=minlen, errors=errors)\n elif mode in {\"prefix\"}:\n matches = sa.prefix_search(pat, minlen=minlen, errors=errors)\n elif mode in {\"mm\", \"mum\"}:\n matches = sa.mms(pat, minlen=minlen, errors=errors, unique=(mode==\"mum\"))\n matches = list(matches)\n # now, matches is a list with (i, matchlen, L, R) tuples\n # to report, we need the sequence and the pos array\n sa.bwt = None; sa.occ = None\n print(clock.toc(),\"reading {}, {}...\".format(names.seq(iname),names.pos(iname)))\n sa.seq_from_indexname(iname)\n sa.pos_from_indexname(iname)\n m = len(pat)\n for (i, j, mlen, L, R) in matches:\n if mlen < m and args.mode == \"full\": continue\n for r in range(L, R+1):\n p = sa.pos[r]\n print(i, j, mlen, p, sa.substring(p, p+mlen))\n \ndef do_bms(pat, iname, args, clock):\n sa = _get_suffix_array(iname, args, clock)\n minlen = args.minlen\n print(clock.toc(),\n \"computing backward matching statistics (lcp_threshold={})...\".format(sa.lcp_threshold))\n bms = sa.backward_matching_statistics(pat, errors = args.errors)\n oldstartpos = len(pat)\n for (i,startpos,mlen,L,R) in bms:\n if mlen >= minlen:\n if startpos == oldstartpos:\n jump = \".\"\n else:\n jump = \"+U\" if R==L else \"+\" \n print(i, startpos, mlen, R-L+1, jump)\n oldstartpos = startpos\n \n\n","sub_path":"mamaslemonpy/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"417766239","text":"#!/usr/bin/env python3\n\n\"\"\"\n================================================================================\nTest code for iface module\n================================================================================\n\"\"\"\n\nfrom angelic.iface import Interface\n\nfrom unittest import TestCase\nfrom unittest import main\nfrom unittest.mock import patch\n\n\nclass TestInterface(TestCase):\n\n # --------------------------------------------------------------------------\n @patch('netifaces.ifaddresses')\n def test_init_handles_iface_param(self, mock_addrs):\n \"\"\"Should use the interface param to call netifaces.ifaddresses once\"\"\"\n test_string = 'wlan0'\n iface = Interface(test_string)\n mock_addrs.assert_called_once_with(test_string)\n\n @patch('netifaces.ifaddresses')\n @patch('angelic.iface.Interface.get_default_iface')\n def test_init_handles_default_case(self, mock_iface, mock_addrs):\n \"\"\"Should call get_default_iface\"\"\"\n iface = Interface()\n mock_iface.assert_called_once_with()\n\n # --------------------------------------------------------------------------\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test/test_iface.py","file_name":"test_iface.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"247858517","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom logging import Formatter\nfrom conf import load_config\n\nconf = load_config()\n\nlogger = logging.getLogger('distributionfile')\nlogger.setLevel(logging.DEBUG)\n\n\nformatter = Formatter('%(asctime)s %(levelname)s: %(message)s ')\n\nfile_handler = RotatingFileHandler(conf.LOGFILE, maxBytes=10 * 1024 * 1024,\n backupCount=10)\nfile_handler.setLevel(logging.DEBUG)\nfile_handler.setFormatter(formatter)\n\n\nconsole_handler = logging.StreamHandler()\nconsole_handler.setLevel(logging.DEBUG)\nconsole_handler.setFormatter(formatter)\n\nlogger.addHandler(file_handler)\nlogger.addHandler(console_handler)\n","sub_path":"app/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"30965835","text":"\"\"\"\n@author: Vincent Bonnet\n@description : Client implementation to communicates with the server\n\"\"\"\n\nimport functools\nfrom multiprocessing.managers import SyncManager\n\nclass ServerQueueManager(SyncManager):\n pass\n\nServerQueueManager.register('get_job_queue')\nServerQueueManager.register('get_result_queue')\n\n\nclass ClientDispatcher:\n def __init__(self, client):\n self.client = client\n commands = self.client.run('get_commands')\n if not commands:\n return\n\n for command in commands:\n if hasattr(self, command):\n raise ValueError(f'in register_cmd() {command} already registered')\n\n func = functools.partial(self.run, command)\n setattr(self, command, func)\n\n def run(self, command_name, **kwargs):\n return self.client.run(command_name, **kwargs)\n\nclass Client:\n '''\n Client to connect and dispatch commands to a Server\n '''\n def __init__(self, name = \"noname\"):\n self._manager = None\n self._job_queue = None\n self._result_queue = None\n self._name = name # name of the client for server log\n\n def __del__(self):\n self.disconnect_from_server()\n\n def is_connected(self):\n return self._manager is not None\n\n def get_dispatcher(self):\n return ClientDispatcher(self)\n\n def connect_to_server(self, ip=\"127.0.0.1\", port=8013, authkey='12345'):\n try:\n self._manager = ServerQueueManager(address=(ip, port), authkey=bytes(authkey,encoding='utf8'))\n self._manager.connect()\n self._job_queue = self._manager.get_job_queue()\n self._result_queue = self._manager.get_result_queue()\n print('Client connected to %s:%s' % (ip, port))\n return True\n except Exception as e:\n self._manager = None\n self._job_queue = None\n self._result_queue = None\n print('Exception raised by client : ' + str(e))\n return False\n\n def run(self, command_name, **kwargs):\n if self.is_connected():\n self._job_queue.put((command_name, self._name, kwargs))\n result = self._result_queue.get(block=True)\n return result\n return None\n\n def disconnect_from_server(self):\n if self.is_connected():\n self._job_queue.put(('close_server', self._name))\n\n","sub_path":"implicit_solver/host_app/rpc/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"484066612","text":"import os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom torch.utils.tensorboard import SummaryWriter\nfrom s4l_loss import s4l_loss\n\nclass s4l_framework(object):\n def __init__(self, network, hps, train_loader, test_loader):\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.network = network\n self.hps = hps\n self.optimizer = optim.Adam(self.network.parameters(), lr=self.hps.initial_lr)\n self.scheduler = optim.lr_scheduler.StepLR(self.optimizer,\n step_size=self.hps.lr_decay_steps,\n gamma=self.hps.lr_decay_rate)\n self.s4l_labeled_loss = s4l_loss(self.hps.num_classes)\n self.labeled_loss = nn.CrossEntropyLoss()\n self.global_steps = 0\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.writer = SummaryWriter(log_dir=os.path.join(os.getcwd(), 'log', self.hps.exp_name))\n\n def train(self, epoch):\n self.network.train()\n\n train_loss = 0\n correct = 0\n total = 0\n\n if not self.hps.s4l_mode:\n for batch_idx, (inputs, targets) in enumerate(self.train_loader):\n self.global_steps += 1\n inputs = inputs.to(self.device)\n targets = targets.to(self.device)\n\n self.optimizer.zero_grad()\n outputs = self.network(inputs)\n loss = self.labeled_loss(outputs, targets)\n\n loss.backward()\n self.optimizer.step()\n self.scheduler.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n train_loss = train_loss / total\n acc = 100 * correct / total\n\n print('training acc: ', acc)\n self.writer.add_scalar('train_loss', train_loss, epoch)\n self.writer.add_scalar('train_acc', acc, epoch)\n self.writer.add_scalar('learning_rate', self.scheduler.get_lr()[0], epoch)\n\n return acc, self.global_steps\n \n else:\n for batch_idx, (inputs, class_labels, rotation_labels) in enumerate(self.train_loader):\n self.global_steps += 1\n inputs = inputs.to(self.device)\n class_labels = class_labels.to(self.device)\n rotation_labels = rotation_labels.to(self.device)\n \n self.optimizer.zero_grad()\n outputs = self.network(inputs)\n class_outputs = outputs[0]\n rotation_outputs = outputs[1]\n\n supervised_loss = self.s4l_labeled_loss(class_outputs, class_labels)\n ss_loss = self.labeled_loss(rotation_outputs, rotation_labels)\n\n loss = supervised_loss + ss_loss\n loss.backward()\n self.optimizer.step()\n self.scheduler.step()\n\n return \n\n \n def test(self, epoch):\n self.network.eval()\n\n test_loss = 0\n correct = 0\n total = 0\n\n if not self.hps.s4l_mode:\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(self.test_loader):\n inputs = inputs.to(self.device)\n targets = targets.to(self.device)\n outputs = self.network(inputs)\n loss = self.labeled_loss(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n \n test_loss = test_loss / total\n acc = 100 * correct / total\n\n print('validation acc: ', acc)\n self.writer.add_scalar('test_loss', test_loss, epoch)\n self.writer.add_scalar('test_acc', acc, epoch)\n\n return acc\n \n else:\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(self.test_loader):\n inputs = inputs.to(self.device)\n targets = targets.to(self.device)\n outputs = self.network(inputs)\n class_outputs = outputs[0]\n loss = self.labeled_loss(class_outputs, targets)\n\n test_loss += loss.item()\n _, predicted = class_outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n \n test_loss = test_loss / total\n acc = 100 * correct / total\n\n print('validation acc: ', acc)\n self.writer.add_scalar('test_loss', test_loss, epoch)\n self.writer.add_scalar('test_acc', acc, epoch)\n\n return acc","sub_path":"ssl_torch/s4l_framework.py","file_name":"s4l_framework.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"151923865","text":"import pytest\n# from django.conf import settings\nfrom django.urls import reverse, resolve\n\nfrom mhep.assessments.models import Assessment, Library\n\npytestmark = pytest.mark.django_db\n\n\ndef test_assessments_home():\n assert (\n reverse(\"assessments:home\") == \"/\"\n )\n assert resolve(\"/\").view_name == \"assessments:home\"\n\n\ndef test_list_create_assessments(assessment: Assessment):\n assert (\n reverse(\"assessments:list-create-assessments\") == \"/api/v1/assessments/\"\n )\n assert resolve(\"/api/v1/assessments/\").view_name == \"assessments:list-create-assessments\"\n\n\ndef test_assessment_detail_update_destroy(assessment: Assessment):\n assert (\n reverse(\"assessments:retrieve-update-destroy-assessment\", kwargs={\"pk\": assessment.id})\n == f\"/api/v1/assessments/{assessment.id}/\"\n )\n assert (\n resolve(f\"/api/v1/assessments/{assessment.id}/\").view_name\n == \"assessments:retrieve-update-destroy-assessment\"\n )\n\n\ndef test_list_create_libraries():\n assert (\n reverse(\"assessments:list-create-libraries\") == \"/api/v1/libraries/\"\n )\n assert resolve(\"/api/v1/libraries/\").view_name == \"assessments:list-create-libraries\"\n\n\ndef test_update_destroy_library(library: Library):\n assert (\n reverse(\"assessments:update-destroy-library\", kwargs={\"pk\": library.id})\n == f\"/api/v1/libraries/{library.id}/\"\n )\n assert (\n resolve(f\"/api/v1/libraries/{library.id}/\").view_name\n == \"assessments:update-destroy-library\"\n )\n\n\ndef test_create_library_item(library: Library):\n assert (\n reverse(\"assessments:create-update-delete-library-item\", kwargs={\"pk\": library.id})\n == f\"/api/v1/libraries/{library.id}/items/\"\n )\n assert (\n resolve(f\"/api/v1/libraries/{library.id}/items/\").view_name\n == \"assessments:create-update-delete-library-item\"\n )\n\n\ndef test_update_destroy_library_item(library: Library):\n tag = \"SW_01\"\n\n assert (\n reverse(\"assessments:create-update-delete-library-item\", kwargs={\"pk\": library.id, \"tag\": tag})\n == f\"/api/v1/libraries/{library.id}/items/{tag}/\"\n )\n assert (\n resolve(f\"/api/v1/libraries/{library.id}/items/{tag}/\").view_name\n == \"assessments:create-update-delete-library-item\"\n )\n\n\ndef test_list_organisations():\n assert (\n reverse(\"assessments:list-organisations\") == \"/api/v1/organisations/\"\n )\n assert (\n resolve(\"/api/v1/organisations/\").view_name == \"assessments:list-organisations\"\n )\n\n\ndef test_list_create_organisation_assessments():\n assert (\n reverse(\"assessments:list-create-organisation-assessments\", kwargs={\"pk\": 1})\n == \"/api/v1/organisations/1/assessments/\"\n )\n assert (\n resolve(\"/api/v1/organisations/1/assessments/\").view_name\n == \"assessments:list-create-organisation-assessments\"\n )\n","sub_path":"mhep/mhep/assessments/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"152052183","text":"import MapReduce\nimport sys\n\n\"\"\"\nWord Indexing in the Simple Python MapReduce Framework\n\"\"\"\n\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\ndef mapper(record): \n # key: order_id\n #record[1]\n \n # value: tuple \n #record[0] \n #\"line_item\" indicates that the record is a line item.\n #\"order\" indicates that the record is an order. \n key = record[1]\n value = record \n mr.emit_intermediate(key,value)\n\ndef reducer(key, list_of_values):\n # key: document identifier\n # value: list of occurrence\n\n order = [wlist for wlist in list_of_values if wlist[0] == \"order\"]\n line = [wlist for wlist in list_of_values if wlist[0] == \"line_item\"]\n\n for v in order: \n for l in line:\n tup = list(v)\n tup.extend(l)\n mr.emit(tup)\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","sub_path":"assignment3/join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"339507021","text":"#!/usr/bin/env python3\nimport os\nimport re\nfrom os import listdir\nfrom os.path import isfile, join\n\nimport subprocess\nimport sys,json,uuid\nimport numpy as np\nfrom collections import defaultdict\n\nmain = [\"total\", \"modules\", \"resources\" ]\nnet = [ \"pnet\", \"dtau\", \"dmet\" , \"other\" ]\ntype = dict()\ntype[\"pnet\"] = [\"BoostedJetONNXJetTagsProducer\" , \"ParticleNetSonicJetTagsProducer\", \"DeepFlavourONNXJetTagsProducer\", \"DeepDoubleXONNXJetTagsProducer\" ]\ntype[\"dtau\"] = [\"DeepTauIdSonicProducer\", \"DeepTauId\"]\ntype[\"dmet\"] = [\"DeepMETSonicProducer\", \"DeepMETProducer\"]\ntype[\"other\"] = [\"other\"]\n\ndef parse(fjson):\n\n data = json.load(open(fjson))\n nev = data[\"total\"][\"events\"]\n\n rtime = dict()\n ctime = dict()\n for n in net:\n rtime[n] = 0\n ctime[n] = 0\n\n for m in data[\"modules\"]:\n for n in net:\n if m[\"type\"] in type[n]:\n rtime[n] += m[\"time_real\"]\n ctime[n] += m[\"time_thread\"]\n\n trtime = data[\"total\"][\"time_real\"]\n tctime = data[\"total\"][\"time_thread\"]\n\n # print(\"%s %f %f (%.2f%%) %f (%.2f%%) %f (%.2f%%) %f (%.2f%%)\" % (cwd, trtime/nev, \n # rtime['pnet']/nev, rtime['pnet']/trtime*100,\n # rtime['dtau']/nev, rtime['dtau']/trtime*100,\n # rtime['dmet']/nev, rtime['dmet']/trtime*100,\n # rtime['other']/nev, rtime['other']/trtime*100) )\n return trtime/nev, rtime['pnet']/nev, rtime['dtau']/nev, rtime['dmet']/nev, rtime['other']/nev\n\n\n\ndir = subprocess.check_output('ls q* -1d', shell=True).split()\nsc = np.zeros(len(dir))\nfor c,d in zip(sc,dir):\n c = int(d[1:])\n\nrtime = defaultdict(list)\npnet = defaultdict(list)\ndtau = defaultdict(list)\ndmet = defaultdict(list)\nother = defaultdict(list)\n\nfor d in dir:\n sc = int(d[1:])\n print (d, sc)\n #outs = [ f for f in listdir(d) if f.endswith( \".json\".encode()) ]\n outs = [ f for f in listdir(d) if f.startswith(\"result_sonic_\".encode()) ]\n \n for out in outs:\n res = parse( join(d, out) )\n \n rtime[sc] += [ res[0] ]\n pnet[sc] += [ res[1] ]\n dtau[sc] += [ res[2] ]\n dmet[sc] += [ res[3] ]\n other[sc] += [ res[4] ]\n\noutput = {\n \"rtime\": rtime,\n \"pnet\": pnet,\n \"dtau\": dtau,\n \"dmet\": dmet,\n \"other\": other\n}\n\n# write out\nCWD = os.getcwd().split('/')[-1]\nresult = json.dumps(output,sort_keys=False,indent=4)\noname = \"timers_{}.json\".format(CWD)\nwith open(oname,'w') as ofile:\n ofile.write(result)\n\n","sub_path":"scaling_test_template/collect_timers.py","file_name":"collect_timers.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"308762415","text":"from __future__ import annotations\n\nimport functools\nimport inspect\nfrom itertools import groupby\nfrom typing import Any, Callable, Dict, List, Sequence, Tuple, Type, TypeVar, Union\n\nfrom baize.asgi import FormData\nfrom pydantic import BaseConfig, BaseModel, ValidationError, create_model\n\nfrom indexpy.exceptions import RequestValidationError\nfrom indexpy.requests import request\nfrom indexpy.utils import safe_issubclass\n\nfrom .fields import FieldInfo, RequestInfo, Undefined\n\nCallableObject = TypeVar(\"CallableObject\", bound=Callable)\n\n\ndef create_model_config(title: str = None, description: str = None) -> Type[BaseConfig]:\n class ExclusiveModelConfig(BaseConfig):\n schema_extra = {\n k: v\n for k, v in {\"title\": title, \"description\": description}.items()\n if v is not None\n }\n\n return ExclusiveModelConfig\n\n\ndef parse_signature(function: CallableObject) -> CallableObject:\n sig = inspect.signature(function)\n\n __parameters__: Dict[str, Any] = {\n key: {} for key in [\"path\", \"query\", \"header\", \"cookie\", \"body\"]\n }\n __exclusive_models__: Dict[BaseModel, str] = {}\n\n for name, param in sig.parameters.items():\n if not isinstance(param.default, FieldInfo):\n continue\n\n if param.POSITIONAL_ONLY:\n raise TypeError(\n f\"Parameter {name} cannot be defined as positional only parameters.\"\n )\n\n default = param.default\n annotation = param.annotation\n\n if getattr(default, \"exclusive\", False):\n if safe_issubclass(annotation, BaseModel):\n model = annotation\n else:\n model = create_model(\n \"temporary_exclusive_model\",\n __config__=create_model_config(default.title, default.description),\n __root__=(annotation, ...),\n )\n __parameters__[default._in] = model\n __exclusive_models__[model] = name\n else:\n if safe_issubclass(__parameters__[default._in], BaseModel):\n raise RuntimeError(\n f\"{default._in.capitalize()}(exclusive=True) \"\n f\"and {default._in.capitalize()} cannot be used at the same time\"\n )\n if annotation == param.empty:\n annotation = Any\n __parameters__[default._in][name] = (annotation, default)\n\n for key, params in filter(\n lambda kv: kv[1],\n ((key, __parameters__.pop(key)) for key in tuple(__parameters__.keys())),\n ):\n if safe_issubclass(params, BaseModel):\n model = params\n else:\n model = create_model(\"temporary_model\", **params)\n __parameters__[key] = model\n\n if \"body\" in __parameters__:\n setattr(function, \"__request_body__\", __parameters__.pop(\"body\"))\n\n if __parameters__:\n setattr(function, \"__parameters__\", __parameters__)\n\n if __exclusive_models__:\n setattr(function, \"__exclusive_models__\", __exclusive_models__)\n\n request_attrs = {\n name: param.default\n for name, param in sig.parameters.items()\n if isinstance(param.default, RequestInfo)\n }\n if request_attrs:\n setattr(function, \"__request_attrs__\", request_attrs)\n\n __signature__ = inspect.Signature(\n parameters=[\n param\n for param in sig.parameters.values()\n if not isinstance(param.default, (FieldInfo, RequestInfo))\n ],\n return_annotation=sig.return_annotation,\n )\n setattr(function, \"__signature__\", __signature__)\n\n return function\n\n\ndef _merge_multi_value(\n items: Sequence[Tuple[str, Any]]\n) -> Dict[str, Union[str, List[str]]]:\n \"\"\"\n If there are values with the same key value, they are merged into a List.\n \"\"\"\n return {\n k: v_list if len(v_list) > 1 else v_list[0]\n for k, v_list in (\n (k, list(v for _, v in kv_iter))\n for k, kv_iter in (\n lambda iterable, key: groupby(sorted(iterable, key=key), key=key)\n )(items, lambda kv: kv[0])\n )\n }\n\n\nasync def verify_params(handler: CallableObject) -> CallableObject:\n parameters: Dict[str, BaseModel] = getattr(handler, \"__parameters__\", None)\n request_body: BaseModel = getattr(handler, \"__request_body__\", None)\n request_attrs: Dict[str, RequestInfo] = getattr(handler, \"__request_attrs__\", None)\n if not (parameters or request_body or request_attrs):\n return handler\n\n exclusive_models: Dict[Type[BaseModel], str] = getattr(\n handler, \"__exclusive_models__\", {}\n )\n\n data: List[Any] = []\n kwargs: Dict[str, Any] = {}\n\n try:\n # try to get parameters model and parse\n if parameters:\n if \"path\" in parameters:\n data.append(parameters[\"path\"].parse_obj(request.path_params))\n\n if \"query\" in parameters:\n data.append(\n parameters[\"query\"].parse_obj(\n _merge_multi_value(request.query_params.multi_items())\n )\n )\n\n if \"header\" in parameters:\n data.append(parameters[\"header\"].parse_obj(request.headers))\n\n if \"cookie\" in parameters:\n data.append(parameters[\"cookie\"].parse_obj(request.cookies))\n\n # try to get body model and parse\n if request_body:\n _body_data = await request.data()\n if isinstance(_body_data, FormData):\n _body_data = _merge_multi_value(_body_data.multi_items())\n data.append(request_body.parse_obj(_body_data))\n\n # try to get request instance attributes\n if request_attrs:\n for name, info in request_attrs.items():\n try:\n value: Any = functools.reduce(\n lambda attr, name: getattr(attr, name),\n (info.alias or name).split(\".\"),\n request,\n )\n except AttributeError:\n if info.default is not Undefined:\n value = info.default\n elif info.default_factory is not None:\n value = info.default_factory()\n else:\n raise\n kwargs[name] = (await value) if inspect.isawaitable(value) else value\n\n except ValidationError as e:\n raise RequestValidationError(e)\n\n for _data in data:\n if _data.__class__.__name__ == \"temporary_model\":\n kwargs.update(_data.dict())\n elif _data.__class__.__name__ == \"temporary_exclusive_model\":\n kwargs[exclusive_models[_data.__class__]] = _data.__root__\n else:\n kwargs[exclusive_models[_data.__class__]] = _data\n return functools.partial(handler, **kwargs) # type: ignore\n\n\ndef create_new_callback(callback: CallableObject) -> CallableObject:\n @functools.wraps(callback)\n async def callback_with_auto_bound_params(*args, **kwargs):\n p = await verify_params(callback)\n return await p(*args, **kwargs)\n\n return callback_with_auto_bound_params # type: ignore\n\n\nhas_wrapped_by_auto_params = lambda function: (\n hasattr(function, \"__parameters__\")\n or hasattr(function, \"__request_body__\")\n or hasattr(function, \"__request_attrs__\")\n)\n\n\ndef auto_params(handler: CallableObject) -> CallableObject:\n if inspect.isclass(handler) and hasattr(handler, \"__methods__\"):\n for method in map(lambda x: x.lower(), handler.__methods__): # type: ignore\n function = getattr(handler, method)\n if has_wrapped_by_auto_params(function):\n continue\n callback = parse_signature(function)\n setattr(handler, method, create_new_callback(callback))\n return handler\n elif inspect.iscoroutinefunction(handler):\n callback = parse_signature(handler)\n return create_new_callback(callback)\n else:\n return handler\n","sub_path":"indexpy/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":8055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"501466090","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 10 00:25:29 2017\n\n@author: James Jiang\n\"\"\"\n\nelements = list(range(256))\nlengths_not_ascii = [130, ',', 126, ',', 1, ',', 11, ',', 140, ',', 2, ',', 255, ',', 207, ',', 18, ',', 254, ',', 246, ',', 164, ',', 29, ',', 104, ',', 0, ',', 224]\nlengths = []\n\nfor i in range(len(lengths_not_ascii)):\n digits = [j for j in str(lengths_not_ascii[i])]\n for k in range(len(digits)):\n lengths.append(ord(digits[k]))\n\nlengths.extend([17, 31, 73, 47, 23])\n\nskip = 0\nelements_pointer = 0\n\nfor count in range(64):\n for i in range(len(lengths)):\n current_length = lengths[i]\n if elements_pointer + current_length >= len(elements):\n sublist1 = elements[elements_pointer:]\n sublist2 = elements[0:current_length - len(sublist1)]\n sublist = sublist1 + sublist2\n else:\n sublist = elements[elements_pointer:elements_pointer + current_length]\n sublist.reverse()\n if elements_pointer + current_length >= len(elements):\n elements[elements_pointer:] = sublist[0:len(sublist1)]\n elements[0:current_length - len(sublist1)] = sublist[len(sublist1):]\n else:\n elements[elements_pointer:elements_pointer + current_length] = sublist\n if elements_pointer + (current_length + skip) >= len(elements):\n elements_pointer = elements_pointer + current_length + skip - len(elements) \n while True:\n if elements_pointer in range(len(elements)):\n break\n else:\n elements_pointer -= len(elements)\n else:\n elements_pointer += (current_length + skip)\n skip += 1\n\ndense_hash_list = []\n\nfor i in range(0, len(elements), 16):\n hash = elements[i] ^ elements[i + 1] ^ elements[i + 2] ^ elements[i + 3] ^ elements[i + 4] ^ elements[i + 5] ^ elements[i + 6] ^ elements[i + 7] ^ elements[i + 8] ^ elements[i + 9] ^ elements[i + 10] ^ elements[i + 11] ^ elements[i + 12] ^ elements[i + 13] ^ elements[i + 14] ^ elements[i + 15]\n dense_hash_list.append(hash)\n\nknot_hash = ''\n\nfor i in range(0, len(dense_hash_list)):\n string = hex(dense_hash_list[i])[2:]\n if len(string) == 1:\n string = '0' + string\n knot_hash += string\n \nprint(knot_hash)\n \n","sub_path":"python/2017day10part2.py","file_name":"2017day10part2.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"574384788","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\r\nimport os\r\ndstPath = 'E:\\\\CodeProject'\r\ntxt = 'getString'\r\nfileExt = '.h'\r\ndef SearchText(dir, txt, fileExt):\r\n list = os.listdir(dir)\r\n for line in list:\r\n filepath = os.path.join(dir, line)\r\n if os.path.isdir(filepath):\r\n SearchText(filepath, txt, fileExt)\r\n\r\n if filepath.endswith(fileExt):\r\n try:\r\n with open(filepath, 'r', encoding='gbk') as f:\r\n lines = f.readlines()\r\n #print(lines)\r\n if str(lines).find(txt) != -1:\r\n print(filepath)\r\n print('------')\r\n except:\r\n print(\"xxxxxxxxxxxxxxxxxxxxx: %s\" % filepath)\r\n\r\n\r\nSearchText(dstPath, txt, fileExt)","sub_path":"SearchText.py","file_name":"SearchText.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"357117732","text":"import asyncio\nimport io\nimport logging\nfrom pathlib import Path\n\nimport aiohttp\nfrom aiogram import Bot, Dispatcher, types\n\nfrom .config import settings\n\nlogging.basicConfig(level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nwith Path(__file__).parent.joinpath(\"templates/message.html\").open() as f:\n email_template = f.read()\n\n\nasync def send_email(message: types.Message):\n if settings.TG_HASTAG_ALL in message.html_text:\n email_to = settings.EMAIL_TO_ALL\n else:\n email_to = settings.EMAIL_TO\n\n # удаляем хештег и переносы строк в конце сообщения\n email_text = (\n message.html_text.replace(f\"#{settings.TG_HASTAG_ALL}\", \"\")\n .replace(f\"#{settings.TG_HASTAG}\", \"\")\n .rstrip(\"\\n\")\n )\n\n async with aiohttp.ClientSession() as session:\n data = aiohttp.FormData()\n data.add_field(\"from\", settings.EMAIL_FROM)\n data.add_field(\"to\", email_to)\n data.add_field(\"subject\", settings.EMAIL_SUBJECT)\n data.add_field(\n \"html\",\n email_template.replace(\"%message%\", email_text).replace(\"\\n\", \"
\"),\n )\n if message.document is not None:\n data.add_field(\n \"attachment\",\n await message.document.download(destination_file=io.BytesIO()),\n filename=message.document.file_name,\n )\n params = {\n \"auth\": aiohttp.BasicAuth(\"api\", settings.MAILGUN_API_KEY),\n \"data\": data,\n }\n for _ in range(5):\n resp = await session.post(\n f\"https://api.mailgun.net/v3/{settings.EMAIL_DOMAIN}/messages\", **params\n )\n if resp.status == 200:\n break\n logger.warning(\"Mailgun response status: %s\", resp.status)\n await asyncio.sleep(3)\n\n\ndef create_bot() -> Dispatcher:\n bot = Bot(token=settings.TG_BOT_TOKEN, parse_mode=types.ParseMode.HTML)\n dp = Dispatcher(bot)\n dp.register_channel_post_handler(\n send_email,\n hashtags=[settings.TG_HASTAG, settings.TG_HASTAG_ALL],\n chat_id=settings.TG_CHAT_ID,\n content_types=[types.ContentType.TEXT, types.ContentType.DOCUMENT],\n )\n return dp\n","sub_path":"app/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"73948732","text":"import os\nimport shutil\nimport zipfile\nfrom os.path import join, getsize\n\ndef zip_file(src_dir):\n # 压缩指定文件夹\n zip_name = src_dir + '.zip'\n # src_dir: 压缩的文件夹路径;zip_name: 压缩后zip文件的路径及名称\n z = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)\n for dirpath, dirnames, filenames in os.walk(src_dir):\n fpath = dirpath.replace(src_dir,'')\n fpath = fpath and fpath + os.sep or ''\n for filename in filenames:\n z.write(os.path.join(dirpath, filename), fpath + filename)\n print ('==压缩成功==')\n z.close()\n\ndef unzip_file(zip_src, dst_dir):\n #zip_src: zip文件的全路径,dst_dir: 要解压到的目的文件夹\n r = zipfile.is_zipfile(zip_src)\n if r:\n fz = zipfile.ZipFile(zip_src, 'r')\n for file in fz.namelist():\n fz.extract(file,dst_dir)\n else:\n print(\"This is not zip\")\n\n\"\"\"\n python其他操作文件方法:\n 1.剪切(移动)文件到指定目录\n shutil.move(filename, dst_dir)\n 2.删除文件夹\n shutil.rmtree(src_dir)\n 3.删除指定文件\n os.remove(file_src)\n 4.新建文件夹\n os.mkdir(dst_dir)\n 5.遍历文件夹\n for filename in os.listdir(src_dir)\n 6.复制文件\n shutil.copyfile(src_file,dst_file)\n 7.获取文件夹大小\n def get_dir_size(dir_path):\n size = 0L\n for root, dirs, files in os.walk(dir_path):\n size += sum([getsize(join(root, name)) for name in files])\n return size\n 可以根据文件大小做出不同的判断:\n file_size = get_dir_size(DATA_PATH)\n max_size = file_size /1024/ 1024 #获取以MB为单位的值\n if max_size < 100:\n pass\n \n\"\"\"","sub_path":"exercise_11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"360612730","text":"\"\"\" Order factory to choose order status according to\nthe service provider\n\"\"\"\nimport logging\n\nfrom django.conf import settings\n\nfrom oms.custom_requests import GoogleDistanceApiRequest\nfrom order import models as order_model\nfrom service_provider.opinio.functions import OrderStatus\n\nlogger = logging.getLogger('common')\nlogger = logging.LoggerAdapter(logger, settings.LOGGING_EXTRA)\n\ndef order_status(workflow):\n \"\"\" get the order status on the basis of\n service provider\n Args:\n workflow : workflow instance\n return:\n response_content : parsed response data \n\n \"\"\"\n if workflow.state in settings.SYSTEM_STATUS: return {}\n order_event = order_model.get_order_event(workflow.order, workflow.state)\n if order_event:\n additional_data = order_event[0].additional_data\n if 'order_code' or 'state' in additional_data:\n additional_data.pop('order_code', None), additional_data.pop('state', None)\n return additional_data\n return {}\n \n\ndef order_serviceability(order_payload):\n \"\"\" get the order status on the basis of\n service provider\n Args:\n merchant_id : merchant id,\n client_id : client id\n return:\n response_content : parsed response data \n\n \"\"\"\n try:\n from oms.custom_requests import MerchantRequest\n client_id = order_payload['client_id']\n merchant_id = order_payload['order_details']['pickup']['merchant_id']\n r = MerchantRequest(merchant_id, client_id)\n response = r.send()\n try:\n merchant_id = response.json()[0]['merchant_id']\n if merchant_id:\n from service_provider.opinio.functions import OrderServiceabilityCheck\n order_serviceability = OrderServiceabilityCheck(merchant_id=merchant_id)\n response = order_serviceability.send()\n import ast\n response_content = ast.literal_eval(response.content)\n if response_content['GP_availability'] == 1 or response_content['GP_availability'] == 0:\n return 1\n else:\n return 0\n else:\n return\n except:\n return\n except:\n return\n\n\ndef get_order_delivery_distance_time(origin_lat, origin_long, des_lat, des_long,departure_time=None):\n \"\"\" get order delivery time accoring to their lat long\n Args:\n origin_lat : origin lattitude\n origin_long : origin longitude\n des_lat : destination lattitude\n des_long : destination longitude\n departure_time : time when fleet departs\n \"\"\"\n req = GoogleDistanceApiRequest(origin_lat, origin_long, des_lat, des_long, departure_time)\n response = req.send()\n logger.info(\"google api response status is %s\", response)\n import ast\n response_content = ast.literal_eval(response.content)\n logger.info(\"google api response is %s\", response_content)\n distance = response_content['rows'][0].get('elements')[0].get('distance').get('value')\n if departure_time:\n duration = response_content['rows'][0].get('elements')[0].get('duration_in_traffic').get('value')\n else:\n duration = response_content['rows'][0].get('elements')[0].get('duration').get('value') \n return distance, duration\n\n\n","sub_path":"order/order_factory.py","file_name":"order_factory.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"349363497","text":"# William La Cava\n# mock experiment for comparing recommenders.\nimport pdb\nimport os\nimport pandas as pd\nimport numpy as np\nimport argparse\nfrom ai.recommender.average_recommender import AverageRecommender\nfrom ai.recommender.random_recommender import RandomRecommender\n# from ai.recommender.meta_recommender import MetaRecommender\nfrom ai.recommender.knn_meta_recommender import KNNMetaRecommender\n# from ai.recommender.mlp_meta_recommender import MLPMetaRecommender\nfrom ai.recommender.svd_recommender import SVDRecommender\nfrom ai.recommender.surprise_recommenders import *\n\nfrom joblib import Parallel, delayed\nfrom collections import OrderedDict\nfrom mock_experiment.mf_utils import local_get_metafeatures, update_dataset_mf\nimport warnings \nwarnings.simplefilter(\"ignore\")\n# define a comparison function that tests a recommender on the datasets, \n#using an intial knowledge base.\ndef run_experiment(rec,data_idx,n_recs,trial,knowledge_base,ml_p,n_init, iters):\n \"\"\"generates recommendations for datasets, using the first n_init as knowledge base.\"\"\"\n # set seed\n np.random.seed(trial)\n results = []\n kwargs = {'metric':'bal_accuracy','ml_p':ml_p}\n # if rec == 'svd': \n # kwargs.update({'datasets':knowledge_base.dataset.unique()})\n rec_choice = {'random': RandomRecommender,\n 'average': AverageRecommender,\n 'knnmeta': KNNMetaRecommender,\n 'svd': SVDRecommender,\n 'cocluster': CoClusteringRecommender,\n 'knnmeans': KNNWithMeansRecommender,\n 'knnml': KNNMLRecommender,\n 'knndataset': KNNDatasetRecommender,\n 'slopeone': SlopeOneRecommender\n }\n # rec_choice = {'random': RandomRecommender,\n # 'average': AverageRecommender,\n # # 'meta': MetaRecommender,\n # # 'mlp': MLPMetaRecommender,\n # 'knn': KNNMetaRecommender,\n # 'svd': SVDRecommender\n # }\n\n recommender = rec_choice[rec](**kwargs)\n #pdb.set_trace()\n #################################################### load first n_init results into recommender\n subset_ml = np.random.choice(ml_p['algorithm'].unique(), size= n_init)\n train_subset = []\n for s in subset_ml:\n train_subset.append(np.random.choice(\n knowledge_base.loc[knowledge_base.algorithm==s].index))\n print('setting training data for recommender:')\n tmp = knowledge_base.iloc[train_subset]\n for _,row in tmp.iterrows():\n print(row.algorithm,':',row.dataset,':',row.bal_accuracy)\n\n init_data = []\n init_data_mf = []\n # for i in train_subset:\n # init_data.append(knowledge_base.loc[knowledge_base['dataset']==i])\n # init_df = pd.concat(init_data)\n init_df = knowledge_base.iloc[train_subset]\n # get the metafeatures for the initial datasets\n for i,_ in init_df.groupby('dataset'):\n init_data_mf.append(local_get_metafeatures(i))\n\n dataset_mf = pd.concat(init_data_mf).set_index('dataset')\n print('initial training on',len(init_df),'results')\n recommender.update(init_df, dataset_mf)\n #################################################### \n\n\n ########################################################################## main experiment loop\n datasets = data_idx\n # loop thru rest of datasets\n # for it,dataset in enumerate(rec_subset):\n for it, dataset in data_idx:\n holdout_accuracy_lookup = knowledge_base.loc[\n knowledge_base['dataset'] == dataset].set_index(\n ['algorithm', 'parameter_hash']).loc[:, 'bal_accuracy'].to_dict()\n holdout_rank_lookup = knowledge_base.loc[\n knowledge_base['dataset'] == dataset].set_index(\n ['algorithm', 'parameter_hash']).loc[:, 'ranking'].to_dict()\n\n print('generating ',n_recs,'recommendations for',dataset)\n\n # for each dataset, generate a recommendation\n mls, ps, scores = recommender.recommend(dataset_id=dataset,\n n_recs=n_recs,\n dataset_mf=local_get_metafeatures(dataset)\n )\n print('got',len(mls),'recs')\n updates = []\n for i in np.arange(len(mls)):\n ml = mls[i]\n p = ps[i]\n phash = hash(frozenset(p.items()))\n\n print('recommending',ml,'with',p,'for',dataset)\n if (ml,phash) not in holdout_accuracy_lookup:\n raise ValueError((ml,phash),'not found')\n \n # n = n+1\n # retreive the performance of the recommended learner\n actual_score = holdout_accuracy_lookup[(ml, phash)]\n actual_ranking = holdout_rank_lookup[(ml,phash)]\n # find all top ranking algorithms\n dataset_results = knowledge_base.loc[knowledge_base['dataset'] == dataset]\n best_score = dataset_results['bal_accuracy'].max()\n best_algs = dataset_results.loc[dataset_results['ranking']==1]['algorithm'].unique()\n best_algorithm = '|'.join(list(best_algs)) \n # Update the recommender with the score from its latest guess\n updates.append(pd.DataFrame(data={'dataset': [dataset],\n 'algorithm': [ml],\n 'parameters': [p],\n 'bal_accuracy': [actual_score]})\n )\n \n # store the trial, iteration, dataset, recommender, ml rec, param rec,bal_accuracy\t\n results.append({'trial':trial,\n 'iteration':it,\n 'n_recs':n_recs,\n 'n_init':n_init,\n 'iters':iters,\n 'recommender':rec,\n 'dataset':dataset,\n 'ml-rec':ml,\n 'p-rec':p,\n 'score-rec':scores[0],\n 'bal_accuracy':actual_score,\n 'max_bal_accuracy':best_score,\n 'best_algorithm':best_algorithm,\n 'ranking':actual_ranking,\n 'delta_bal_accuracy':(best_score-actual_score)/best_score})\n\n print('updating recommender...')\n if len(updates)>0:\n update_record = pd.concat(updates)\n dataset_mf = update_dataset_mf(dataset_mf, update_record)\n recommender.update(update_record,dataset_mf)\n else:\n print('WARNINING: got no updates for',dataset,'. There are ',\n len([r for r in results if r['dataset'] == dataset]),\n 'results for this dataset already and',len(ml_p),\n 'unique ml+p combos')\n \n ########################################################################## main experiment loop\n return results\n\n\nif __name__ == '__main__':\n \"\"\"run experiment\"\"\"\n\n parser = argparse.ArgumentParser(description='Run a PennAI a recommender experiment.', \n add_help=False)\n parser.add_argument('-h','--help',action='help',\n help=\"Show this help message and exit.\")\n # parser.add_argument('-rec',action='store',dest='rec',default='random', \n # help='Recommender to run.') \n parser.add_argument('-rec',action='store',dest='rec',default='random',\n choices = ['random','average','knnmeta','svd','cocluster','knnmeans',\n 'knnml','knndataset','slopeone'],\n help='Recommender algorithm options.')\n parser.add_argument('-n_recs',action='store',dest='n_recs',type=int,default=1,help='Number of '\n ' recommendations to make at a time. If zero, will send continous '\n 'recommendations until AI is turned off.')\n parser.add_argument('-v','-verbose',action='store_true',dest='verbose',default=False,\n help='Print out more messages.')\n parser.add_argument('-n_init',action='store',dest='n_init',type=int,default=10,\n help='Number of initial datasets to seed knowledge database')\n parser.add_argument('-iters',action='store',dest='iters',type=int,default=100,\n help='Number of initial datasets to seed knowledge database')\n parser.add_argument('-t',action='store',dest='trial',type=int,default=0,\n help='Trial number')\n parser.add_argument('-data',action='store',dest='KNOWL',type=str,\n default='mock_experiment/simulation_experiment.tsv',\n help='knowledge base')\n parser.add_argument('-resdir',action='store',dest='RESDIR',type=str,\n default='results',\n help='results directory')\n\n args = parser.parse_args()\n \n # load knowledge base(s)\n if ',' in args.KNOWL:\n data_files = args.KNOWL.split(',')\n else:\n data_files = [args.KNOWL]\n\n for data_file in data_files:\n print(70*'=','\\n','loading',data_file,'\\n'+70*'=','\\n')\n knowledge_base = pd.read_csv(data_file,\n compression='gzip', sep='\\t').fillna('')#,\n ml_p = knowledge_base.loc[:,['algorithm','parameters']].drop_duplicates()\n ml_p['parameters'] = ml_p['parameters'].apply(\n lambda x: eval(x))\n knowledge_base['parameters'] = knowledge_base['parameters'].apply(\n lambda x: eval(x))\n knowledge_base['parameter_hash'] = knowledge_base['parameters'].apply(\n lambda x: hash(frozenset(x.items())))\n\n print('len ml_p:',len(ml_p)) \n data_idx = np.unique(knowledge_base['dataset']) # datasets \n \n\n # output file\n out_file = ('mock_experiment/' + args.RESDIR + '/experiment_' \n + data_file.split('/')[-1].split('.')[0]\n + '_rec-{}'.format(args.rec) \n + '_ninit-{}'.format(args.n_init)\n + '_nrecs-{}'.format(args.n_recs)\n + '_iters-{}'.format(args.iters) \n + '_trial-{}'.format(args.trial) \n + '.csv') \n\n # run experiment\n # np.random.shuffle(data_idx) # shuffle datasets\n print('rec:',args.rec,'n_recs:',args.n_recs,'n_init:',args.n_init,\n 'iters:',args.iters)\n results = run_experiment(args.rec,\n data_idx,\n args.n_recs,\n args.trial,\n knowledge_base,\n ml_p,\n args.n_init,\n args.iters)\n\n df_results = pd.DataFrame.from_records(results,columns=results[0].keys()) \n # write results\n if os.path.exists(out_file):\n with open(out_file, 'a') as out:\n df_results.to_csv(out,index=False,header=False)\n else:\n df_results.to_csv(out_file,index=False)\n\n print('done. results written to ', out_file)\n","sub_path":"code/mock_experiment/sim_run_experiment.py","file_name":"sim_run_experiment.py","file_ext":"py","file_size_in_byte":11239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"279388566","text":"import random\n\n#draw is the hangman drawing\ndraw = []\nf = open(\"pendu.txt\", \"r\")\ni = 0\nfor x in f:\n draw.append(x)\n i = i + 1\nf.close()\n#Dict is the word dictionary (here a list)\ndict = []\nd = open(\"dict.txt\", \"r\")\ni = 0\nfor x in d:\n dict.append(x)\n i = i + 1\nd.close()\n\n#Choosing the word\nword = random.randrange(0, len(dict))\nwordlen = len(dict[word]) - 1\nword = dict[word]\nhideword = [\"_ \"] * wordlen\nprint(hideword)\n\nstep = 0\nfail = 0\nwin = False\nlettre = \"00\"\n#Game loop\nwhile fail < 8 and win == False:\n #choose letter\n while lettre.isalpha() == False or len(lettre) > 1 :\n lettre = input(\"Choose a letter : \")\n if len(lettre) > 1 :\n print(\"One letter at the time please\")\n if lettre.isalpha() == False :\n print(\"Sorry, I take only letter\")\n lettre = lettre.upper()\n #Check if letter is in the word\n if lettre in word :\n for x in range(len(word)):\n if word[x] == lettre:\n hideword[x] = lettre\n else :\n while \";\" not in draw[step] and step + 1 < len(draw):\n print(draw[step])\n step = step + 1\n step = step +1\n fail = fail +1\n print(hideword)\n #check if you win\n if \"_ \" in hideword:\n win = False\n else :\n win = True\n lettre = \"00\"\n\nif win == True:\n print(\"Congratulations, you win\")\nelse :\n print(\"Too bad, you lose\")\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"10271070","text":"def Hirei(x):\n y = 3 * x\n return y\n #ここで比例の関数(function)は終わり。\n\nYnoAtai = Hirei(3)\nprint(\"y=\" + str(YnoAtai))\n\n#for x in range(0, 100):\n# y = Hirei(a, x)\n# print(y)\n\n\n\n","sub_path":"math/hirei.py","file_name":"hirei.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"517896904","text":"#!/usr/bin/env python\n\n\"\"\" A unittest script for the WgsAssembledSeqSet module. \"\"\"\n\nimport unittest\nimport json\nimport tempfile\n\nfrom cutlass import WgsAssembledSeqSet\n\nfrom CutlassTestConfig import CutlassTestConfig\nfrom CutlassTestUtil import CutlassTestUtil\n\n# pylint: disable=W0703, C1801\n\nclass WgsAssembledSeqSetTest(unittest.TestCase):\n \"\"\" A unit test class for the WgsAssembledSeqSet class. \"\"\"\n\n session = None\n util = None\n\n @classmethod\n def setUpClass(cls):\n \"\"\" Setup for the unittest. \"\"\"\n # Establish the session for each test method\n cls.session = CutlassTestConfig.get_session()\n cls.util = CutlassTestUtil()\n\n def testImport(self):\n \"\"\" Test the importation of the WgsAssembledSeqSet module. \"\"\"\n success = False\n try:\n from cutlass import WgsAssembledSeqSet\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(WgsAssembledSeqSet is None)\n\n def testSessionCreate(self):\n \"\"\" Test the creation of a WgsAssembledSeqSet via the session. \"\"\"\n success = False\n seq_set = None\n\n try:\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(seq_set is None)\n\n def testPrivateFiles(self):\n \"\"\" Test the private_files property. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n self.util.boolTypeTest(self, seq_set, \"private_files\")\n\n self.util.boolPropertyTest(self, seq_set, \"private_files\")\n\n def testToJson(self):\n \"\"\" Test the generation of JSON from a WgsAssembledSeqSet instance. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n success = False\n\n comment = \"Test comment\"\n private_files = False\n\n seq_set.comment = comment\n seq_set.private_files = private_files\n\n seqset_json = None\n\n try:\n seqset_json = seq_set.to_json()\n success = True\n except Exception:\n pass\n\n self.assertTrue(success, \"Able to use 'to_json'.\")\n self.assertTrue(seqset_json is not None, \"to_json() returned data.\")\n\n parse_success = False\n\n try:\n seqset_data = json.loads(seqset_json)\n parse_success = True\n except Exception:\n pass\n\n self.assertTrue(parse_success, \"to_json() did not throw an exception.\")\n self.assertTrue(seqset_data is not None, \"to_json() returned parsable JSON.\")\n\n self.assertTrue('meta' in seqset_data, \"JSON has 'meta' key in it.\")\n\n self.assertEqual(seqset_data['meta']['comment'],\n comment, \"'comment' in JSON had expected value.\")\n\n self.assertEqual(seqset_data['meta']['private_files'],\n private_files,\n \"'private_files' in JSON had expected value.\"\n )\n\n def testId(self):\n \"\"\" Test the id property. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n self.assertTrue(seq_set.id is None,\n \"New template WgsAssembledSeqSet has no ID.\")\n\n with self.assertRaises(AttributeError):\n seq_set.id = \"test\"\n\n def testVersion(self):\n \"\"\" Test the version property. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n self.assertTrue(seq_set.version is None,\n \"New template WgsAssembledSeqSet has no version.\")\n\n with self.assertRaises(ValueError):\n seq_set.version = \"test\"\n\n def testAssembler(self):\n \"\"\" Test the assembler property. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n self.util.stringTypeTest(self, seq_set, \"assembler\")\n\n self.util.stringPropertyTest(self, seq_set, \"assembler\")\n\n def testAssemblyName(self):\n \"\"\" Test the assembly_name property. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n self.util.stringTypeTest(self, seq_set, \"assembly_name\")\n\n self.util.stringPropertyTest(self, seq_set, \"assembly_name\")\n\n def testChecksumsLegal(self):\n \"\"\" Test the checksums property. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n success = False\n checksums = {\"md5\": \"asdf32qrfrae\"}\n\n try:\n seq_set.checksums = checksums\n success = True\n except Exception:\n pass\n\n self.assertTrue(success, \"Able to use the 'checksums' setter.\")\n\n self.assertEqual(seq_set.checksums['md5'],\n checksums['md5'],\n \"Property getter for 'checksums' works.\")\n\n def testComment(self):\n \"\"\" Test the comment property. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n self.util.stringTypeTest(self, seq_set, \"comment\")\n\n self.util.stringPropertyTest(self, seq_set, \"comment\")\n\n def testIllegalFormat(self):\n \"\"\" Test the format property with an illegal value. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n with self.assertRaises(Exception):\n seq_set.format = 1\n\n def testLegalFormat(self):\n \"\"\" Test the format property with a legal value. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n success = False\n format_ = \"fasta\"\n\n try:\n seq_set.format = format_\n success = True\n except Exception:\n pass\n\n self.assertTrue(success, \"Able to use the 'format' setter\")\n\n self.assertEqual(seq_set.format, format_,\n \"Property getter for 'format' works.\")\n\n def testFormatDoc(self):\n \"\"\" Test the format_doc property. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n self.util.stringTypeTest(self, seq_set, \"format_doc\")\n\n self.util.stringPropertyTest(self, seq_set, \"format_doc\")\n\n def testIllegalSequenceType(self):\n \"\"\" Test the sequence_type property with an illegal value. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n # Test int argument\n with self.assertRaises(Exception):\n seq_set.sequence_type = 1\n\n # Test list argument\n with self.assertRaises(Exception):\n seq_set.sequence_type = ['a', 'b', 'c']\n\n # Test dict argument\n with self.assertRaises(Exception):\n seq_set.sequence_type = {'a': 1, 'b': 2}\n\n def testLegalSequenceType(self):\n \"\"\" Test the sequence_type property with a legal value. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n success = False\n sequence_type = \"nucleotide\"\n\n try:\n seq_set.sequence_type = sequence_type\n success = True\n except Exception:\n pass\n\n self.assertTrue(success, \"Able to use the 'sequence_type' setter.\")\n\n self.assertEqual(seq_set.sequence_type, sequence_type,\n \"Property getter for 'sequence_type' works.\")\n\n def testSize(self):\n \"\"\" Test the size property. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n self.util.intTypeTest(self, seq_set, \"size\")\n\n self.util.intPropertyTest(self, seq_set, \"size\")\n\n def testTags(self):\n \"\"\" Test the tags property. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n tags = seq_set.tags\n self.assertTrue(type(tags) == list, \"tags() method returns a list.\")\n self.assertEqual(len(tags), 0, \"Template object 'tags' list is empty.\")\n\n new_tags = [\"tagA\", \"tagB\"]\n\n seq_set.tags = new_tags\n self.assertEqual(seq_set.tags, new_tags,\n \"Can set tags on a WgsAssembledSeqSet.\")\n\n json_str = seq_set.to_json()\n doc = json.loads(json_str)\n self.assertTrue('tags' in doc['meta'],\n \"JSON representation has 'tags' field in 'meta'.\")\n\n self.assertEqual(doc['meta']['tags'], new_tags,\n \"JSON representation had correct tags after setter.\")\n\n def testAddTag(self):\n \"\"\" Test the add_tag() method. \"\"\"\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n seq_set.add_tag(\"test\")\n self.assertEqual(seq_set.tags, [\"test\"],\n \"Can add a tag to a WgsAssembledSeqSet.\")\n\n json_str = seq_set.to_json()\n doc = json.loads(json_str)\n\n self.assertEqual(doc['meta']['tags'], [\"test\"],\n \"JSON representation had correct tags after add_tag().\")\n\n # Try adding the same tag yet again, shouldn't get a duplicate\n with self.assertRaises(ValueError):\n seq_set.add_tag(\"test\")\n\n json_str = seq_set.to_json()\n doc2 = json.loads(json_str)\n\n self.assertEqual(doc2['meta']['tags'], [\"test\"],\n \"JSON document did not end up with duplicate tags.\")\n\n def testRequiredFields(self):\n \"\"\" Test the required_fields() static method. \"\"\"\n required = WgsAssembledSeqSet.required_fields()\n\n self.assertEqual(type(required), tuple,\n \"required_fields() returns a tuple.\")\n\n self.assertTrue(len(required) > 0,\n \"required_field() did not return empty value.\")\n\n def testLoadSaveDeleteWgsAssembledSeqSet(self):\n \"\"\" Extensive test for the load, edit, save and delete functions. \"\"\"\n temp_file = tempfile.NamedTemporaryFile(delete=False).name\n\n # attempt to save the WgsAssembledSeqSet at all points before and\n # after adding the required fields\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n assembler = \"Test assembler\"\n assembly_name = \"Test assembly name\"\n checksums = {\"md5\": \"68b329da9893e34099c7d8ad5cb9c940\"}\n comment = \"Test comment\"\n format_ = \"fasta\"\n format_doc = \"http://example.com\"\n sequence_type = \"nucleotide\"\n size = 20000\n study = \"prediabetes\"\n links = {\"computed_from\":[]}\n\n self.assertFalse(seq_set.save(),\n \"WgsAssembledSeqSet not saved successfully, \" + \\\n \"no required fields.\")\n\n seq_set.comment = comment\n\n # Setting just the comment should not be sufficient. Let's verify.\n self.assertFalse(seq_set.save(),\n \"WgsAssembledSeqSet not saved successfully.\")\n\n seq_set.links = links\n\n self.assertFalse(seq_set.save(),\n \"WgsAssembledSeqSet not saved successfully.\")\n\n seq_set.assembler = assembler\n seq_set.assembly_name = assembly_name\n seq_set.checksums = checksums\n seq_set.format = format_\n seq_set.format_doc = format_doc\n seq_set.sequence_type = sequence_type\n seq_set.size = size\n seq_set.study = study\n seq_set.local_file = temp_file\n\n # make sure seq_set does not delete if it does not exist\n with self.assertRaises(Exception):\n seq_set.delete()\n\n self.assertTrue(seq_set.save() is True,\n \"WgsAssembledSeqSet was not saved successfully.\")\n\n # load the WgsAssembledSeqSet that was just saved from the OSDF instance\n seq_set_loaded = self.session.create_object(\"wgs_assembled_seq_set\")\n seq_set_loaded = seq_set_loaded.load(seq_set.id)\n\n # check all fields were saved and loaded successfully\n self.assertEqual(seq_set.comment,\n seq_set_loaded.comment,\n \"WgsAssembledSeqSet comment not saved & loaded successfully.\")\n\n # WgsAssembledSeqSet is deleted successfully\n self.assertTrue(seq_set.delete(),\n \"WgsAssembledSeqSet was not deleted successfully.\")\n\n # the WgsAssembledSeqSet of the initial ID should not load successfully\n load_test = self.session.create_object(\"wgs_assembled_seq_set\")\n with self.assertRaises(Exception):\n load_test = load_test.load(seq_set.id)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_wgs_assembled_seq_set.py","file_name":"test_wgs_assembled_seq_set.py","file_ext":"py","file_size_in_byte":12520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"383045904","text":"#!/usr/bin/env python\nimport rospy\nimport gazebo_msgs.msg\nimport random\nimport tf\nimport numpy as np\nimport time\nimport csv\nfrom gazebo_msgs.msg import ModelStates \nfrom gazebo_msgs.msg import ModelState \nfrom geometry_msgs.msg import Twist\nfrom gazebo_msgs.srv import SetModelState\nfrom turtlebot3_waypoint_navigation.srv import PheroReset, PheroResetResponse\nfrom math import *\nfrom time import sleep\n\nfrom std_msgs.msg import Float32MultiArray\n\nclass WaypointNavigation:\n\n MAX_FORWARD_SPEED = 0.5\n MAX_ROTATION_SPEED = 0.5\n cmdmsg = Twist()\n index = 0\n\n # Tunable parameters\n wGain = 10\n vConst = 0.5\n distThr = 0.2\n pheroThr = 1\n\n \n def __init__(self):\n\n self.num_robots = 1\n self.num_experiments = 20\n \n # Initialise pheromone values\n self.phero = [0.0] * 9\n self.phero_sum = 0.0\n \n # Initialise speed\n self.move_cmd = Twist()\n\n # Initialise positions\n self.goal = [4,0]\n self.obstacle = [[2,0], [-2,0], [0,2], [0,-2]]\n\n # Initialise ros related topics\n self.pub = rospy.Publisher('/cmd_vel', Twist, queue_size = 1)\n self.sub = rospy.Subscriber('/gazebo/model_states', ModelStates, self.Callback, (self.pub, self.cmdmsg, self.goal))\n #self.sub_test = rospy.Subscriber('/gazebo/model_states', ModelStates, self.testCallback)\n self.sub_phero = rospy.Subscriber('/phero_value', Float32MultiArray, self.ReadPhero)\n self.rate = rospy.Rate(300)\n\n self.target_x = 4\n self.target_y = 0\n\n self.prev_x = 0.0\n self.prev_y = 0.0\n\n # Initialise parameters\n \n self.step_size = 0.1\n #self.b_range = np.arange(0, 1+self.step_size, self.step_size)\n self.v_range = np.arange(0.2, 1+self.step_size, self.step_size)\n self.w_range = np.arange(0.2, 1+self.step_size, self.step_size)\n\n self.BIAS = 0.25\n self.V_COEF = 1.0#self.v_range[0]\n self.W_COEF = 0.2#self.w_range[0]\n\n #self.b_size = self.b_range.size \n self.v_size = self.v_range.size\n self.w_size = self.w_range.size\n \n self.b_counter = 0\n self.v_counter = 0\n self.w_counter = 0\n\n # Initialise simulation\n self.counter_step = 0\n self.counter_collision = 0\n self.counter_success = 0\n self.arrival_time = []\n self.target_index = 0\n self.radius = 4\n\n # Flags\n self.is_collided = False\n self.is_goal = False\n self.is_timeout = False\n\n # File name\n self.time_str = time.strftime(\"%Y%m%d-%H%M%S\")\n self.file_name = \"manual_{}_{}\".format(self.num_robots, self.time_str)\n print(self.file_name)\n\n # Initialise simulation\n self.reset_timer = time.time()\n self.reset()\n self.reset_flag = False\n \n def ReadPhero(self, message):\n phero_data = message.data \n self.phero = phero_data\n self.phero_sum = np.sum(np.asarray(phero_data))\n\n def Callback(self, message, cargs):\n\n '''\n Main Function\n - Receive position data\n - Generate action\n '''\n # ========================================================================= #\n\t # Initialisation #\n\t # ========================================================================= #\n\n pub, msg, goal = cargs\n goal = self.goal\n \n for i in range(len(message.name)):\n if message.name[i] == 'turtlebot3_waffle_pi':\n tb3 = i\n if message.name[i] == 'unit_sphere_0_0':\n tg = i\n pose = message.pose[tb3]\n twist = message.twist[tb3]\n \n pos = pose.position\n ori = pose.orientation\n angles = tf.transformations.euler_from_quaternion((ori.x, ori.y, ori.z, ori.w))\n\n theta = angles[2]\n\n # P controller\n v = 0\n w = 0\n \n # Index for # of goals\n index = self.index\n distance = sqrt((pos.x-self.target_x)**2+(pos.y-self.target_y)**2)\n\n # Reset condition reset (to prevent unwanted reset due to delay of position message subscription)\n step_timer = time.time()\n reset_time = step_timer - self.reset_timer\n\n # ========================================================================= #\n\t # Action & State assignment #\n\t # ========================================================================= #\n\n if (self.phero_sum > self.pheroThr):\n msg = self.PheroOA(self.phero)\n v = msg.linear.x\n w = msg.angular.z\n # Adjust velocities\n elif (distance > self.distThr):\n v = self.vConst\n yaw = atan2(self.target_y-pos.y, self.target_x-pos.x)\n u = yaw - theta \n bound = atan2(sin(u), cos(u))\n w = min(1.0, max(-1.0, self.wGain*bound))\n msg.linear.x = v\n msg.angular.z = w\n self.reset_flag = False\n elif (distance <= self.distThr and reset_time > 1):\n msg = Twist()\n self.is_goal = True\n self.reset()\n distance_to_obs = [1.0]*len(self.obstacle)\n for i in range(len(distance_to_obs)):\n distance_to_obs[i] = sqrt((pos.x-self.obstacle[i][0])**2+(pos.y-self.obstacle[i][1])**2)\n if (distance_to_obs[0] < 0.3 or distance_to_obs[1] < 0.3 or distance_to_obs[2] < 0.3 or distance_to_obs[3] < 0.3) and reset_time > 1:\n msg = Twist()\n self.is_collided = True\n self.reset()\n\n if reset_time > 40.0:\n print(\"Times up!\")\n self.is_timeout = True\n self.reset()\n\n\n # Publish velocity \n self.pub.publish(msg)\n\n\n self.prev_x = pos.x\n self.prev_y = pos.y\n\n \n # Reporting\n #print(\"Distance to goal {}\".format(distance))\n #print('Callback: x=%2.2f, y=%2.2f, dist=%4.2f, cmd.v=%2.2f, cmd.w=%2.2f' %(pos.x,pos.y,distance,v,w))\n \n # Angular velocity coefficient (When avg phero is high, it is more sensitive)\n def velCoef(self, value1, value2):\n '''\n - val_avg (0, 1)\n - val_dif (-1, 1)\n - dif_coef (1, 2.714)\n - coefficient (-2.714, 2.714)\n '''\n val_avg = (value1 + value2)/2\n val_dif = value1 - value2\n dif_coef = exp(val_avg)\n \n return dif_coef*val_dif\n \n def PheroOA(self, phero):\n '''\n Pheromone-based obstacle avoidance algorithm\n - Input: 9 cells of pheromone\n - Output: Twist() to avoid obstacle\n '''\n # Constants:\n # Constants:\n BIAS = self.BIAS \n V_COEF = self.V_COEF \n W_COEF = self.W_COEF \n #BIAS = 0.25\n #V_COEF = 0.2\n #W_COEF = 0.3\n \n # Initialise values\n avg_phero = np.average(np.asarray(phero)) # values are assigned from the top left (135 deg) to the bottom right (-45 deg) ((0,1,2),(3,4,5),(6,7,8))\n unit_vecs = np.asarray([[1,0], [sqrt(2)/2, sqrt(2)/2], [0,1], [-sqrt(2)/2, sqrt(2)/2]])\n vec_coefs = [0.0] * 4\n twist = Twist()\n \n # Calculate vector weights\n vec_coefs[0] = self.velCoef(phero[5], phero[3])\n vec_coefs[1] = self.velCoef(phero[2], phero[6])\n vec_coefs[2] = self.velCoef(phero[1], phero[7])\n vec_coefs[3] = self.velCoef(phero[0], phero[8])\n vec_coefs = np.asarray(vec_coefs).reshape(4,1)\n vel_vecs = np.multiply(unit_vecs, vec_coefs)\n vel_vec = np.sum(vel_vecs, axis=0)\n\n ang_vel = W_COEF*atan2(vel_vec[1], vel_vec[0])\n\n # Velocity assignment\n twist.linear.x = BIAS + V_COEF*avg_phero\n twist.angular.z = ang_vel\n\n return twist\n \n def reset(self):\n \n '''\n Resettng the Experiment\n 1. Update the counter based on the flag from step\n 2. Assign next positions and reset\n 3. Log the result in every selected time-step\n '''\n\n\n # ========================================================================= #\n\t # COUNTER UPDATE #\n\t # ========================================================================= #\n\n # Increment Collision Counter\n if self.is_collided == True:\n print(\"Collision!\")\n self.counter_collision += 1\n self.counter_step += 1\n\n # Increment Arrival Counter and store the arrival time\n if self.is_goal == True:\n print(\"Arrived goal!\")\n self.counter_success += 1\n self.counter_step += 1\n arrived_timer = time.time()\n art = arrived_timer-self.reset_timer\n self.arrival_time.append(art)\n print(\"Episode time: %0.2f\"%art)\n\n if self.is_timeout == True:\n self.counter_collision += 1\n self.counter_step += 1\n print(\"Timeout!\")\n \n # Reset the flags\n self.is_collided = False\n self.is_goal = False\n self.is_timeout = False\n\n # ========================================================================= #\n\t # RESET #\n\t # ========================================================================= #\n\n angle_target = self.target_index*2*pi/self.num_experiments \n\n self.target_x = self.radius*cos(angle_target)\n self.target_y = self.radius*sin(angle_target)\n \n if self.target_index < self.num_experiments:\n self.target_index += 1\n else:\n self.target_index = 0\n \n self.is_collided = False\n\n # Reset Turtlebot position\n state_msg = ModelState()\n state_msg.model_name = 'turtlebot3_waffle_pi'\n state_msg.pose.position.x = 0.0\n state_msg.pose.position.y = 0.0 \n state_msg.pose.position.z = 0.0\n state_msg.pose.orientation.x = 0\n state_msg.pose.orientation.y = 0\n state_msg.pose.orientation.z = 0\n state_msg.pose.orientation.w = 0\n\n # Reset Target Position\n state_target_msg = ModelState() \n state_target_msg.model_name = 'unit_sphere_0_0' #'unit_sphere_0_0' #'unit_box_1' #'cube_20k_0'\n state_target_msg.pose.position.x = self.target_x\n state_target_msg.pose.position.y = self.target_y\n state_target_msg.pose.position.z = 0.0\n state_target_msg.pose.orientation.x = 0\n state_target_msg.pose.orientation.y = 0\n state_target_msg.pose.orientation.z = 0\n state_target_msg.pose.orientation.w = 0\n\n rospy.wait_for_service('gazebo/reset_simulation')\n\n rospy.wait_for_service('/gazebo/set_model_state')\n try: \n set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)\n resp = set_state(state_msg)\n resp_targ = set_state(state_target_msg)\n except rospy.ServiceException as e:\n print(\"Service Call Failed: %s\"%e)\n\n self.move_cmd.linear.x = 0.0\n self.move_cmd.angular.z = 0.0\n self.pub.publish(self.move_cmd)\n self.pub.publish(self.move_cmd)\n\n rospy.wait_for_service('phero_reset')\n try:\n phero_reset = rospy.ServiceProxy('phero_reset', PheroReset)\n resp = phero_reset(True)\n print(\"Reset Pheromone grid successfully: {}\".format(resp))\n except rospy.ServiceException as e:\n print(\"Service Failed %s\"%e)\n\n \n # ========================================================================= #\n\t # LOGGING #\n\t # ========================================================================= #\n \n if self.counter_step == 0:\n with open('/home/swn/catkin_ws/src/turtlebot3_waypoint_navigation/src/log/csv/{}.csv'.format(self.file_name), mode='w') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(['Episode', 'Bias', 'Vcoef', 'Wcoef', 'Success Rate', 'Average Arrival time', 'Standard Deviation'])\n\n if self.counter_step != 0:\n if (self.counter_collision != 0 and self.counter_success != 0):\n succ_percentage = 100*self.counter_success/(self.counter_success+self.counter_collision)\n else:\n succ_percentage = 0\n print(\"Counter: {}\".format(self.counter_step))\n \n if (self.counter_step % 10 == 0 and self.counter_step != 0):\n print(\"BIAS: {}, V_COEF: {}, W_COEF: {}\".format(self.BIAS, self.V_COEF, self.W_COEF))\n print(\"Success Rate: {}%\".format(succ_percentage))\n\n if (self.counter_step % 20 == 0 and self.counter_step != 0):\n avg_comp = np.average(np.asarray(self.arrival_time))\n std_comp = np.std(np.asarray(self.arrival_time))\n print(\"{} trials ended. Success rate: {}, average completion time: {}, Standard deviation: {}\".format(self.counter_step, succ_percentage, avg_comp, std_comp))\n \n with open('/home/swn/catkin_ws/src/turtlebot3_waypoint_navigation/src/log/csv/{}.csv'.format(self.file_name), mode='a') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(['%i'%self.counter_step, '%0.2f'%self.BIAS, '%0.2f'%self.V_COEF, '%0.2f'%self.W_COEF, '%0.2f'%succ_percentage, '%0.2f'%avg_comp, '%0.2f'%std_comp])\n \n self.paramUpdate()\n self.arrival_time = []\n self.counter_collision = 0\n self.counter_success = 0\n self.target_index = 0\n \n\n self.reset_timer = time.time()\n self.reset_flag = True\n \n\n def paramUpdate(self):\n '''\n Parameter update after the number of experiments for a parameter set finished\n '''\n print(\"Parameters are updated!\")\n if (self.w_counter < self.w_size-1):\n self.w_counter += 1\n self.W_COEF = self.w_range[self.w_counter]\n elif (self.v_counter < self.v_size-1):\n self.w_counter = 0\n self.v_counter += 1\n self.W_COEF = self.w_range[self.w_counter]\n self.V_COEF = self.v_range[self.v_counter]\n else:\n print(\"Finish Iteration of parameters\")\n sys.exit()\n \n\nif __name__ == '__main__':\n rospy.init_node('pose_reading')\n wayN = WaypointNavigation()\n rospy.spin()\n\n\n","sub_path":"src/hand-tuned controller/manual_nav_oa_4.py","file_name":"manual_nav_oa_4.py","file_ext":"py","file_size_in_byte":14769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"28845820","text":"from django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom .models import Transaction\nfrom .jobs.sanctionjob import SanctionJob\nfrom .jobs.pepjob import PEPJob\nfrom .jobs.assessmentjob import AssessmentJob\n\n\"\"\" \nSignals gets triggered when there are \nany saves to the database.\n\"\"\"\n\n\n@receiver(post_save, sender=Transaction)\ndef process_transaction(sender, instance, created, **kwarks):\n if created:\n # Lets kick off the first process\n print(\"POST SAVE CREATED\")\n sanctionJob = SanctionJob(transaction=instance)\n sanctionJob.run()\n else:\n # Check where we are in the process\n print (\"POST SAVE UPDATED {} {}\".format(instance.id, instance.status))\n if instance.status == Transaction.Status.PEPCHECK:\n print('Starting PEP job')\n pepjob = PEPJob(transaction=instance)\n pepjob.run()\n elif instance.status == Transaction.Status.ASSESSMENT:\n print('Starting Assessment job')\n assessmentjob = AssessmentJob(transaction=instance)\n assessmentjob.run()","sub_path":"base/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"490275481","text":"\"\"\"\nThis file is needed only for future reference to\nload data from wiki data.\nDOwnload link : http://people.apache.org/~mikemccand/enwiki-20120502-lines-1k.txt.lzma\n\"\"\"\nimport glob\nimport itertools\nimport os\nimport random\n\nfrom couchbase.bucket import Bucket\n\n\nclass Docgen(object):\n def __init__(self, location, cb_url, database, chunk_size=1024):\n self.chunk_size = chunk_size\n self.location = location\n self.cb_url = cb_url\n self.database = database\n self.counts = 0\n '''\n The keygen part to create keys to optimize space\n '''\n self.key1 = self.keygen(2)\n self.key2 = self.keygen(4)\n self.key3 = self.keygen(5)\n\n @staticmethod\n def keygen(self, keynum):\n '''\n the ranges includes all ascii characters from 47 - 123\n\n '''\n s = list(map(chr, range(47, 123)))\n random.shuffle(s)\n iters = itertools.permutations(s, keynum)\n return iters\n\n def read_in_chunks(self, file_object):\n \"\"\"Lazy function (generator) to read a file piece by piece.\n Default chunk size: 1k.\"\"\"\n while True:\n data = file_object.read(self.chunk_size)\n if not data:\n break\n yield data\n\n def insert_cb(self, cb, f):\n for piece in self.read_in_chunks(f):\n test = {}\n piece = piece.replace('\\n', ' ')\n test['text'] = piece\n key = \"\"\n if self.counts < 1000:\n key = self.key1.next()\n elif self.counts < 100000:\n key = self.key2.next()\n else:\n key = self.key3.next()\n key = ''.join(key)\n cb.upsert(key, test)\n\n if self.counts == 1000000:\n return\n self.counts += 1\n\n def start_load(self):\n os.chdir(self.location)\n c = Bucket(\"couchbase://{}/{}?operation_timeout=10\".format(self.cb_url, self.database))\n for file in glob.glob(\"*.txt\"):\n try:\n f = open(file)\n self.insert_cb(c, f)\n f.close()\n except Exception as e:\n print (self.counts, file)\n raise e\n\n\nA = Docgen('/data/wikidata', '172.23.123.38', 'bucket-1')\nA.start_load()\n","sub_path":"perfrunner/tests/FTS/dataload.py","file_name":"dataload.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"478784092","text":"import tensorflow as tf\r\nimport collections\r\nimport os\r\n#from PIL import Image\r\nimport random\r\nimport numpy as np\r\nfrom datetime import datetime\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\nslim = tf.contrib.slim\r\n\r\nglobal first\r\nfirst = True\r\n\r\n'''\r\n如果激活函数使用sigmoid和tanh,最好使用xavir\r\ntf.contrib.layers.xavier_initializer_conv2d\r\n如果使用relu,则最好使用he initial\r\ntf.contrib.layers.variance_scaling_initializer\r\n'''\r\n\r\nclassnum=12\r\nbatchsize=128\r\ntestnum = tf.placeholder(tf.int32)\r\ntrainnum = tf.placeholder(tf.int32)\r\nvalidnum = tf.placeholder(tf.int32)\r\nlearnrate = tf.placeholder(tf.float32)\r\n\r\npath=r'C:\\workspace\\Alexnet\\model\\train_model.ckpt'\r\ntrainpath=[r'C:\\workspace\\Alexnet\\tfrecord\\train_L.tfrecords',\r\n r'C:\\workspace\\Alexnet\\tfrecord\\train_001_L.tfrecords']\r\ntestpath=[r'C:\\workspace\\Alexnet\\tfrecord\\test_L.tfrecords',\r\n r'C:\\workspace\\Alexnet\\tfrecord\\test_001_L.tfrecords']\r\nvalidpath = [r'C:\\workspace\\Alexnet\\tfrecord\\validation.tfrecords']\r\n\r\ndef getinputs(path,preprocess):\r\n filename_queue=tf.train.string_input_producer(path)\r\n reader=tf.TFRecordReader()\r\n _,serialized_example=reader.read(filename_queue)\r\n features=tf.parse_single_example(serialized_example,\r\n features={\r\n 'label':tf.FixedLenFeature([], tf.int64),\r\n 'img_raw' : tf.FixedLenFeature([], tf.string),\r\n })\r\n image=tf.decode_raw(features['img_raw'],tf.uint8)\r\n label=tf.cast(features['label'],tf.int32)\r\n image=tf.reshape(image,[64,64,1])\r\n if preprocess:\r\n #image=tf.image.random_flip_left_right(image)\r\n #image=tf.image.random_flip_up_down(image)\r\n image = tf.cast(image,tf.float32)\r\n \r\n #image = tf.image.random_hue(image, max_delta=0.05)\r\n image = tf.image.random_contrast(image, lower=0.8, upper=1.2)\r\n image = tf.image.random_brightness(image, max_delta=0.2)\r\n #image = tf.image.random_saturation(image, lower=0.0, upper=2.0)\r\n image = tf.minimum(image, 255.0)\r\n image = tf.maximum(image, 0.0)\r\n return image,label\r\n\r\ndef get_batch(image,label,batch_size,crop_size):\r\n #print(image.shape)\r\n #print(label.shape)\r\n #images,labels=tf.train.batch([image,label],batch_size=batch_size)\r\n images,labels=tf.train.shuffle_batch([image,label],\r\n batch_size=batch_size,num_threads=10,capacity=10000,min_after_dequeue=300)\r\n #images=[tf.image.random_flip_left_right(image,seed=None) for image in images]\r\n return images,tf.reshape(labels,[batch_size])\r\n\r\ndef get_test_batch(image,label,batch_size):\r\n images,labels=tf.train.shuffle_batch([image,label],\r\n batch_size=batch_size,num_threads=10,capacity=10000,min_after_dequeue=500)\r\n #images,labels=tf.train.batch([image,label],batch_size=batch_size)\r\n return images,tf.reshape(labels,[batch_size])\r\n\r\ndef get_valid_batch(image,label,batch_size):\r\n images,labels=tf.train.shuffle_batch([image,label],\r\n batch_size=batch_size,num_threads=10,capacity=10000,min_after_dequeue=500)\r\n #images,labels=tf.train.batch([image,label],batch_size=batch_size)\r\n return images,tf.reshape(labels,[batch_size])\r\n\r\nclass trainwork(object):\r\n def __init__(self):\r\n with tf.variable_scope('scop'):\r\n self.weights={\r\n 'conv1':tf.get_variable('w1', [3,3,1,24],initializer=tf.contrib.layers.variance_scaling_initializer()),\r\n 'conv2':tf.get_variable('w2', [3,3,24,48],initializer=tf.contrib.layers.variance_scaling_initializer()),\r\n 'conv3':tf.get_variable('w3', [3,3,48,56],initializer=tf.contrib.layers.variance_scaling_initializer()),\r\n 'conv4':tf.get_variable('w4', [3,3,56,56],initializer=tf.contrib.layers.variance_scaling_initializer()),\r\n 'conv5':tf.get_variable('w5', [3,3,56,24],initializer=tf.contrib.layers.variance_scaling_initializer()),\r\n 'fc1':tf.get_variable('w6', [8*8*24,1024],initializer=tf.contrib.layers.variance_scaling_initializer()),\r\n 'fc2':tf.get_variable('w7', [1024,512],initializer=tf.contrib.layers.variance_scaling_initializer()),\r\n 'out':tf.get_variable('w8', [512,classnum],initializer=tf.contrib.layers.variance_scaling_initializer())\r\n }\r\n self.biases={\r\n 'conv1': tf.get_variable('b1', [24],initializer=tf.constant_initializer(0.1)),\r\n 'conv2': tf.get_variable('b2', [48],initializer=tf.constant_initializer(0.1)),\r\n 'conv3': tf.get_variable('b3', [56],initializer=tf.constant_initializer(0.1)),\r\n 'conv4': tf.get_variable('b4', [56],initializer=tf.constant_initializer(0.1)),\r\n 'conv5': tf.get_variable('b5', [24],initializer=tf.constant_initializer(0.1)),\r\n 'fc1': tf.get_variable('b6', [1024],initializer=tf.constant_initializer(0.1)),\r\n 'fc2': tf.get_variable('b7', [512],initializer=tf.constant_initializer(0.1)),\r\n 'out': tf.get_variable('b8', [classnum],initializer=tf.constant_initializer(0.1))\r\n }\r\n\r\n def inference(self,images,preprocessed):\r\n if preprocessed:\r\n images=images/255.0\r\n else:\r\n images=tf.cast(images,tf.float32)/255.0\r\n \r\n conv1=tf.nn.conv2d(images,self.weights['conv1'],strides=[1,1,1,1],padding='SAME')+self.biases['conv1']\r\n relu1=tf.nn.relu(conv1)\r\n norm1=tf.nn.local_response_normalization(relu1,2,1.0,1,1)\r\n pool1=tf.nn.max_pool(norm1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\r\n \r\n conv2 = tf.nn.conv2d(pool1,self.weights['conv2'],strides=[1,1,1,1],padding='SAME')+self.biases['conv2']\r\n relu2=tf.nn.relu(conv2)\r\n norm2=tf.nn.local_response_normalization(relu2,2,1.0,1,1)\r\n pool2=tf.nn.max_pool(norm2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\r\n \r\n conv3=tf.nn.conv2d(pool2,self.weights['conv3'],strides=[1,1,1,1],padding='SAME')+self.biases['conv3']\r\n relu3=tf.nn.relu(conv3)\r\n \r\n conv4=tf.nn.conv2d(relu3,self.weights['conv4'],strides=[1,1,1,1],padding='SAME')+self.biases['conv4']\r\n relu4=tf.nn.relu(conv4)\r\n \r\n conv5=tf.nn.conv2d(relu4,self.weights['conv5'],strides=[1,1,1,1],padding='SAME')+self.biases['conv5']\r\n relu5=tf.nn.relu(conv5)\r\n pool5=tf.nn.max_pool(relu5,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\r\n \r\n flat=tf.reshape(pool5,[-1,8*8*24])\r\n fc1=tf.matmul(flat, self.weights['fc1'])+self.biases['fc1']\r\n fc1_relu=tf.nn.relu(fc1)\r\n fc1_drop=tf.nn.dropout(fc1_relu,0.8)#dropout也可以放在其它层\r\n \r\n fc2=tf.matmul(fc1_drop, self.weights['fc2'])+self.biases['fc2']\r\n fc2_relu=tf.nn.relu(fc2)\r\n fc2_drop=tf.nn.dropout(fc2_relu,0.8)\r\n out=tf.matmul(fc2_drop,self.weights['out'])+self.biases['out']\r\n return out\r\n\r\n def test_inference(self,images):\r\n images=tf.cast(images,tf.float32)/255.0\r\n conv1=tf.nn.conv2d(images,self.weights['conv1'],strides=[1,1,1,1],padding='SAME')+self.biases['conv1']\r\n relu1=tf.nn.relu(conv1)\r\n norm1=tf.nn.local_response_normalization(relu1,2,1.0,1,1)\r\n pool1=tf.nn.max_pool(norm1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\r\n \r\n conv2 = tf.nn.conv2d(pool1,self.weights['conv2'],strides=[1,1,1,1],padding='SAME')+self.biases['conv2']\r\n relu2=tf.nn.relu(conv2)\r\n norm2=tf.nn.local_response_normalization(relu2,2,1.0,1,1)\r\n pool2=tf.nn.max_pool(norm2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\r\n \r\n conv3=tf.nn.conv2d(pool2,self.weights['conv3'],strides=[1,1,1,1],padding='SAME')+self.biases['conv3']\r\n relu3=tf.nn.relu(conv3)\r\n \r\n conv4=tf.nn.conv2d(relu3,self.weights['conv4'],strides=[1,1,1,1],padding='SAME')+self.biases['conv4']\r\n relu4=tf.nn.relu(conv4)\r\n \r\n conv5=tf.nn.conv2d(relu4,self.weights['conv5'],strides=[1,1,1,1],padding='SAME')+self.biases['conv5']\r\n relu5=tf.nn.relu(conv5)\r\n pool5=tf.nn.max_pool(relu5,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\r\n \r\n flat=tf.reshape(pool5,[-1,8*8*24])\r\n fc1=tf.matmul(flat, self.weights['fc1'])+self.biases['fc1']\r\n fc1_relu=tf.nn.relu(fc1)\r\n #fc1_drop=tf.nn.dropout(fc1_relu,0.8)\r\n \r\n fc2=tf.matmul(fc1_relu, self.weights['fc2'])+self.biases['fc2']\r\n fc2_relu=tf.nn.relu(fc2)\r\n #fc2_drop=tf.nn.dropout(fc2_relu,0.8)\r\n out=tf.matmul(fc2_relu,self.weights['out'])+self.biases['out']\r\n return out\r\n \r\n def valid_inference(self,images):\r\n images=tf.cast(images,tf.float32)/255.0\r\n conv1=tf.nn.conv2d(images,self.weights['conv1'],strides=[1,1,1,1],padding='SAME')+self.biases['conv1']\r\n relu1=tf.nn.relu(conv1)\r\n norm1=tf.nn.local_response_normalization(relu1,2,1.0,1,1)\r\n pool1=tf.nn.max_pool(norm1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\r\n \r\n conv2 = tf.nn.conv2d(pool1,self.weights['conv2'],strides=[1,1,1,1],padding='SAME')+self.biases['conv2']\r\n relu2=tf.nn.relu(conv2)\r\n norm2=tf.nn.local_response_normalization(relu2,2,1.0,1,1)\r\n pool2=tf.nn.max_pool(norm2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\r\n \r\n conv3=tf.nn.conv2d(pool2,self.weights['conv3'],strides=[1,1,1,1],padding='SAME')+self.biases['conv3']\r\n relu3=tf.nn.relu(conv3)\r\n \r\n conv4=tf.nn.conv2d(relu3,self.weights['conv4'],strides=[1,1,1,1],padding='SAME')+self.biases['conv4']\r\n relu4=tf.nn.relu(conv4)\r\n \r\n conv5=tf.nn.conv2d(relu4,self.weights['conv5'],strides=[1,1,1,1],padding='SAME')+self.biases['conv5']\r\n relu5=tf.nn.relu(conv5)\r\n pool5=tf.nn.max_pool(relu5,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\r\n \r\n flat=tf.reshape(pool5,[-1,8*8*24])\r\n fc1=tf.matmul(flat, self.weights['fc1'])+self.biases['fc1']\r\n fc1_relu=tf.nn.relu(fc1)\r\n #fc1_drop=tf.nn.dropout(fc1_relu,0.8)\r\n \r\n fc2=tf.matmul(fc1_relu, self.weights['fc2'])+self.biases['fc2']\r\n fc2_relu=tf.nn.relu(fc2)\r\n #fc2_drop=tf.nn.dropout(fc2_relu,0.8)\r\n out=tf.matmul(fc2_relu,self.weights['out'])+self.biases['out']\r\n return out\r\n \r\n def softmax_loss(self,predicts,labels):\r\n \r\n predicts=tf.nn.softmax(predicts)\r\n labels=tf.one_hot(labels,classnum)\r\n #loss=-tf.reduce_sum(labels*tf.log(predicts))\r\n loss=-tf.reduce_sum(labels*tf.log(tf.clip_by_value(predicts,1e-10,1.0)))\r\n \r\n #loss=tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(predicts, labels))\r\n return loss\r\n\r\n def optimer(self,loss,lr=0.001):\r\n \r\n train_step=tf.train.AdamOptimizer(lr).minimize(loss)\r\n #train_step=tf.train.GradientDescentOptimizer(lr).minimize(loss)\r\n return train_step\r\n\r\npre=True\r\n#衰减学习率 每一百步在原来数值上乘0.96\r\n#可以将global_step这个值和Learn rate训练完成后存在本地文件,下次训练加载初始化\r\n#global_step=tf.placeholder('int32')\r\n#learnrate = tf.train.exponential_decay(0.001, global_step, 100, 0.96, staircase=True)\r\n\r\nimage,label=getinputs(trainpath,pre)\r\nbatch_image,batch_label=get_batch(image,label,batchsize,0)\r\nwork=trainwork()\r\ninf=work.inference(batch_image,pre)\r\nloss=work.softmax_loss(inf,batch_label)\r\nopti=work.optimer(loss,learnrate)\r\n\r\ntest_image,test_label=getinputs(testpath,False)\r\ntest_image_batch,test_label_batch=get_test_batch(test_image,test_label,testnum)\r\ntest_inf=work.test_inference(test_image_batch)\r\ntest_labels=tf.one_hot(test_label_batch,classnum)\r\n#train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)\r\ncorrect_prediction=tf.equal(tf.argmax(test_inf,1),tf.argmax(test_labels,1))\r\naccuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\r\ntest_pre = tf.reshape(test_inf, [testnum, classnum])\r\ntest_pre = tf.argmax(test_pre, 1)\r\ntest_true = tf.argmax(test_labels, 1)\r\n\r\n\r\nvalid_image,valid_label=getinputs(validpath,False)\r\nvalid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum)\r\nvalid_inf=work.test_inference(valid_image_batch)\r\nvalid_labels=tf.one_hot(valid_label_batch,classnum)\r\n#train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)\r\nvalid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1))\r\nvalid_accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\r\nvalid_pre = tf.reshape(valid_inf, [validnum, classnum])\r\nvalid_pre = tf.argmax(valid_pre, 1)\r\nvalid_true = tf.argmax(valid_labels, 1)\r\n#init=tf.initialize_all_variables()\r\n\r\n\r\nfrom sklearn.metrics import classification_report\r\n\r\ntarget_names = ['class sg', 'class bm', 'class wd', 'class wt', 'class wj', 'class wo', 'class ym', 'class shq', 'class shj',\r\n 'class no', 'class yh', 'class fb']\r\n\r\ninit=tf.global_variables_initializer()\r\nconfig=tf.ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\n\r\ndef train(train_num=64,test_num=32,lr=1e-4,loop_count=10000,report_step=100,save_step=1000,restore=False):\r\n with tf.Session(config=config) as sess:\r\n sess.run(init)\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(coord=coord)\r\n if restore:\r\n tf.train.Saver().restore(sess,path)\r\n feed_dict={\r\n testnum: test_num,\r\n trainnum: train_num,\r\n learnrate:lr\r\n }\r\n for i in range(loop_count):\r\n loss_np, _, label_np, image_np, inf_np = sess.run(\r\n [loss, opti, batch_label, batch_image, inf],feed_dict=feed_dict)\r\n if i > 0 and i % report_step == 0:\r\n accuracy_np = sess.run([accuracy],feed_dict=feed_dict)\r\n print(i, accuracy_np, loss_np)\r\n if i > 0 and i % save_step == 0:\r\n tf.train.Saver().save(sess, path)\r\n tf.train.Saver().save(sess, path)\r\n coord.request_stop()\r\n coord.join(threads)\r\n \r\ndef test_and_valid(test_loop=1,valid_loop=1,test_num=64,valid_num=64):\r\n feed_dict={\r\n testnum: test_num,\r\n validnum: valid_num\r\n }\r\n with tf.Session(config=config) as sess:\r\n sess.run(init)\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(coord=coord)\r\n tf.train.Saver().restore(sess,path)\r\n #test\r\n test_acc_avg = 0.0\r\n test_true_total=np.array([])\r\n test_pre_total=np.array([])\r\n for i in range(0, test_loop):\r\n \r\n accuracy_np = sess.run([accuracy],feed_dict=feed_dict)\r\n test_pre_1, test_true_1 = sess.run([test_pre, test_true],feed_dict=feed_dict)\r\n test_pre_1 = np.array(test_pre_1)\r\n test_true_1 = np.array(test_true_1)\r\n \r\n test_acc_avg = test_acc_avg + accuracy_np[0]\r\n test_true_total = np.concatenate((test_true_total,test_true_1),axis=0)\r\n test_pre_total = np.concatenate((test_pre_total,test_pre_1), axis=0)\r\n print('------test_accuracy-----')\r\n print(test_acc_avg / test_loop)\r\n print('------test_accuracy-----')\r\n\r\n print('------test_classification_report-----')\r\n print(classification_report(test_true_total, test_pre_total, target_names=target_names))\r\n print('------test_classification_report-----')\r\n print('------test_confusion_matrix-----')\r\n cm = confusion_matrix(y_true=test_true_total, y_pred=test_pre_total)\r\n print(cm)\r\n print('------test_confusion_matrix-----')\r\n\r\n #valid\r\n if valid_loop > 0:\r\n valid_acc_avg = 0.0\r\n valid_true_total=np.array([])\r\n valid_pre_total=np.array([])\r\n for i in range(0, valid_loop):\r\n accuracy_np = sess.run([valid_accuracy],feed_dict=feed_dict)\r\n valid_pre_1, valid_true_1 = sess.run([valid_pre, valid_true],feed_dict=feed_dict)\r\n valid_pre_1 = np.array(valid_pre_1)\r\n valid_true_1 = np.array(valid_true_1)\r\n \r\n valid_acc_avg = valid_acc_avg + accuracy_np[0]\r\n valid_true_total = np.concatenate((valid_true_total,valid_true_1),axis=0)\r\n valid_pre_total = np.concatenate((valid_pre_total,valid_pre_1), axis=0)\r\n print('------valid_accuracy-----')\r\n print(valid_acc_avg / valid_loop)\r\n print('------valid_accuracy-----')\r\n \r\n print('------valid_classification_report-----')\r\n print(classification_report(valid_true_total, valid_pre_total, target_names=target_names))\r\n print('------valid_classification_report-----')\r\n print('------valid_confusion_matrix-----')\r\n cm = confusion_matrix(y_true=valid_true_total, y_pred=valid_pre_total)\r\n print(cm)\r\n print('------valid_confusion_matrix-----')\r\n \r\n coord.request_stop()\r\n coord.join(threads)\r\n\r\ndef predict_time(loop=100):\r\n feed_dict={\r\n testnum:1\r\n }\r\n with tf.Session(config=config) as sess:\r\n sess.run(init)\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(coord=coord)\r\n tf.train.Saver().restore(sess,path)\r\n total=0.0\r\n for i in range(loop):\r\n a = datetime.now()\r\n accuracy_np = sess.run([accuracy],feed_dict=feed_dict)\r\n b = datetime.now()\r\n c = (b - a).microseconds\r\n total+=c\r\n print('predict_time(ms): ',total/(loop*1000))\r\n coord.request_stop()\r\n coord.join(threads)\r\n \r\n \r\ntrain(train_num=128,loop_count=1002)\r\ntest_and_valid(10,10,200,200)\r\npredict_time(1000)\r\n\r\n\r\n","sub_path":"Model_Code/train_alexnet.py","file_name":"train_alexnet.py","file_ext":"py","file_size_in_byte":17959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"500827423","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport sys\nimport time\n\n\ndef workspace_check(x_check, y_check):\n \"\"\"\n In this function the given value is checked to see if it lies in the workspace\n\n Parameters\n ----------\n x_check: X coordinate of given input\n y_check: Y coordinate of given input\n\n Returns\n -------\n True : If the input lies inside workspace\n False : If the input lies outside workspace\n \"\"\"\n\n if (x_check >= 0) and (y_check >= 0) and (x_check <= 10) and (y_check <= 10):\n return True\n else:\n return False\n\n\ndef obstacle_space(x_check, y_check):\n \"\"\"\n In this function the given value is checked to see if it lies in the obstacle space\n\n Parameters\n ----------\n x_check: X coordinate of given input\n y_check: Y coordinate of given input\n\n Returns\n -------\n True : If the input lies outside obstacle space\n False : If the input lies inside obstacle space\n \"\"\"\n\n flag1, flag2, flag3, flag4, flag5 = (True, True, True, True, True)\n\n # For Circle 1 with center at (2, 2) and radius 1 unit\n if (x_check - 2) ** 2 + (y_check - 2) ** 2 - (1 + clearance) ** 2 < 0:\n flag1 = False\n\n # For Circle 2 with center at (2, 8) and radius 1 unit\n if (x_check - 2) ** 2 + (y_check - 8) ** 2 - (1 + clearance) ** 2 < 0:\n flag2 = False\n\n # For Square with center at (1, 5) and dimension 1.5x1.5\n if 0.25 - clearance < x_check < 1.75 + clearance and 4.25 - clearance < y_check < 5.75 + clearance:\n\n if 0.25 - clearance < x_check < 0.25 and 4.25 - clearance < y_check < 4.25 and \\\n (x_check - 0.25) ** 2 + (y_check - 4.25) ** 2 - clearance ** 2 >= 0:\n flag3 = True\n\n elif 0.25 - clearance < x_check < 0.25 and 5.75 < y_check < 5.75 + clearance and \\\n (x_check - 0.25) ** 2 + (y_check - 5.75) ** 2 - clearance ** 2 >= 0:\n flag3 = True\n\n elif 1.75 < x_check < 1.75 + clearance and 5.75 < y_check < 5.75 + clearance and \\\n (x_check - 1.75) ** 2 + (y_check - 5.75) ** 2 - clearance ** 2 >= 0:\n flag3 = True\n\n elif 1.75 < x_check < 1.75 + clearance and 4.25 - clearance < y_check < 4.25 and \\\n (x_check - 1.75) ** 2 + (y_check - 4.25) ** 2 - clearance ** 2 >= 0:\n flag3 = True\n\n else:\n flag3 = False\n\n # For Rectangle 1 with center at (5, 5) and dimension 2.5x1.5\n if 3.75 - clearance < x_check < 6.25 + clearance and 4.25 - clearance < y_check < 5.75 + clearance:\n\n if 3.75 - clearance < x_check < 3.75 and 4.25 - clearance < y_check < 4.25 and \\\n (x_check - 3.75) ** 2 + (y_check - 4.25) ** 2 - clearance ** 2 >= 0:\n flag4 = True\n\n elif 3.75 - clearance < x_check < 3.75 and 5.75 < y_check < 5.75 + clearance and \\\n (x_check - 3.75) ** 2 + (y_check - 5.75) ** 2 - clearance ** 2 >= 0:\n flag4 = True\n\n elif 6.25 < x_check < 6.25 + clearance and 5.75 < y_check < 5.75 + clearance and \\\n (x_check - 6.25) ** 2 + (y_check - 5.75) ** 2 - clearance ** 2 >= 0:\n flag4 = True\n\n elif 6.25 < x_check < 6.25 + clearance and 4.25 - clearance < y_check < 4.25 and \\\n (x_check - 6.25) ** 2 + (y_check - 4.25) ** 2 - clearance ** 2 >= 0:\n flag4 = True\n\n else:\n flag4 = False\n\n # For Rectangle 2 with center at (8, 3) and dimension 1.5x2.0\n if 7.25 - clearance < x_check < 8.75 + clearance and 2.0 - clearance < y_check < 4.0 + clearance:\n\n if 7.25 - clearance < x_check < 7.25 and 2.0 - clearance < y_check < 2.0 and \\\n (x_check - 7.25) ** 2 + (y_check - 2.0) ** 2 - clearance ** 2 >= 0:\n flag5 = True\n\n elif 7.25 - clearance < x_check < 7.25 and 4.0 < y_check < 4.0 + clearance and \\\n (x_check - 7.25) ** 2 + (y_check - 4.0) ** 2 - clearance ** 2 >= 0:\n flag5 = True\n\n elif 8.75 < x_check < 8.75 + clearance and 4.0 < y_check < 4.0 + clearance and \\\n (x_check - 8.75) ** 2 + (y_check - 4.0) ** 2 - clearance ** 2 >= 0:\n flag5 = True\n\n elif 8.75 < x_check < 8.75 + clearance and 2.0 - clearance < y_check < 2.0 and \\\n (x_check - 8.75) ** 2 + (y_check - 2.0) ** 2 - clearance ** 2 >= 0:\n flag5 = True\n\n else:\n flag5 = False\n\n flag = flag1 and flag2 and flag3 and flag4 and flag5\n\n return flag\n\n\ndef find_index(node_point):\n x_pt, y_pt = node_point\n\n x_pt_mid = threshold_distance * (0.5 + int(x_pt / threshold_distance))\n y_pt_mid = threshold_distance * (0.5 + int(y_pt / threshold_distance))\n\n # ----------------------- For x_pt -----------------------\n\n if x_pt <= x_pt_mid:\n x_temp_ = threshold_distance * int(x_pt / threshold_distance)\n\n else:\n x_temp_ = threshold_distance * int(1 + x_pt / threshold_distance)\n\n # ----------------------- For y_pt ----------------\n\n if y_pt <= y_pt_mid:\n y_temp_ = threshold_distance * int(y_pt / threshold_distance)\n\n else:\n y_temp_ = threshold_distance * int(1 + y_pt / threshold_distance)\n\n node_index = np.intersect1d(np.where(np.array(X) == x_temp_), np.where(np.array(Y) == y_temp_))[0]\n\n return node_index\n\n\ndef a_star(parent_index):\n x_parent, y_parent = node_pt[parent_index]\n parent_angle = node_angle[parent_index]\n parent_cost = node_cost[parent_index]\n iter_bin.append([]) # appending an empty list for each iteration\n\n # ------------------------------------------ Performing action sets ------------------------------------------------\n\n actions = [[5, 5], [10, 10], [5, 0], [0, 5], [5, 10], [10, 5]] # mentioned in class\n\n for action in actions:\n t = 0\n x_temp = x_parent\n y_temp = y_parent\n theta_temp = parent_angle\n temp_distance = 0\n\n omega_left, omega_right = action\n temp_x = [x_temp]\n temp_y = [y_temp]\n\n while t < 1:\n t = t + dt\n x_temp0 = x_temp\n y_temp0 = y_temp\n theta_temp0 = theta_temp % (2 * math.pi)\n\n dx_temp = 0.5 * r_wheel * (omega_left + omega_right) * math.cos(theta_temp) * dt\n dy_temp = 0.5 * r_wheel * (omega_left + omega_right) * math.sin(theta_temp) * dt\n dtheta_temp = (r_wheel / L_wheel) * (omega_right - omega_left) * dt\n\n x_temp = x_temp0 + dx_temp\n y_temp = y_temp0 + dy_temp\n theta_temp = (theta_temp0 + dtheta_temp) % (2 * math.pi)\n\n temp_x.append(x_temp)\n temp_y.append(y_temp)\n temp_distance += math.sqrt(dx_temp ** 2 + dy_temp ** 2)\n\n is_valid = workspace_check(temp_x[-1], temp_y[-1]) and obstacle_space(temp_x[-1], temp_y[-1])\n\n if is_valid:\n\n neighbor_pt = np.array([temp_x[-1], temp_y[-1]])\n neighbor_index = find_index(neighbor_pt)\n\n if neighbor_index not in visited_nodes_index:\n\n if node_cost[neighbor_index] > parent_cost + temp_distance:\n node_cost[neighbor_index] = parent_cost + temp_distance\n goal_cost[neighbor_index] = math.sqrt(np.sum((neighbor_pt - goal_pt) ** 2))\n node_pt[neighbor_index] = neighbor_pt\n node_angle[neighbor_index] = theta_temp\n track.update({neighbor_index: parent_index})\n path.update({neighbor_index: {'x': temp_x, 'y': temp_y, 'UL': omega_left, 'UR': omega_right}})\n\n if neighbor_index not in unvisited_nodes_index:\n unvisited_nodes_index.append(neighbor_index)\n\n iter_bin[-1].append({}) # updating an empty list for each action as an empty dictionary\n iter_bin[-1][-1].update({'x': temp_x, 'y': temp_y})\n\n node_flag[parent_index] = 1\n unvisited_nodes_index.remove(parent_index)\n visited_nodes_index.append(parent_index)\n return 0\n\n\n# -------------------------------------------------Parameters-----------------------------------------------------------\nr_wheel = 0.038 # in m, radius of wheels\nL_wheel = 0.354 # in m, distance between two wheels\n# r_robot = 0.177 # in m, radius of robot\ndt = 0.1 # in s, differential time needed for integration\nthreshold_distance = 0.1 # in m, threshold distance between each node\ngoal_threshold_radius = 0.2 # in m, threshold radius for goal node\n# clearance_obstacle = 0.123 # in m, clearance for obstacles\nclearance = 0.25 # clearance_obstacle + r_robot # in m, effective clearance\n\n# -----------------------------------------Discretizing the workspace---------------------------------------------------\nX_ = np.linspace(0, 10, int(10 / threshold_distance) + 1)\nY_ = np.linspace(0, 10, int(10 / threshold_distance) + 1)\nX = []\nY = []\nfor ix in range(len(X_)):\n for iy in range(len(Y_)):\n X.append(X_[ix])\n Y.append(Y_[iy])\n\n# --------------------------------------------------Variables-----------------------------------------------------------\nnode_pt = np.zeros((len(X), 2)) # X & Y coordinates for each node\nnode_angle = np.zeros(len(X)) # orientation (in radians) of robot at each node\nnode_flag = np.zeros(len(X)) # will take either 0 (if unvisited) or 1 (if visited) for each node\nnode_cost = np.zeros(len(X)) + np.inf # cost to go from start pt for each node\ngoal_cost = np.zeros(len(X)) + np.inf # cost to go from each node to goal pt\nvisited_nodes_index = [] # keeps a list of visited nodes\ntrack = {} # tracks parent node from child node\npath = {} # stores coordinates and angular velocities of both the wheels for every action with child index as key\niter_bin = [] # stores coordinates for every action with iterator as key\n\nX_start = 1.0\nY_start = 1.0\nangle_start = 0.0 # in radian\nstart_pt = np.array([X_start, Y_start])\nstart_index = find_index(start_pt)\n\nX_goal = 9.0\nY_goal = 9.0\ngoal_pt = np.array([X_goal, Y_goal])\ngoal_index = find_index(goal_pt)\n\n# ----------------------------------------------------------------------------------------------------------------------\nnode_pt[start_index] = start_pt\nnode_cost[start_index] = 0.0 # assigning cost of start node to zero\ngoal_cost[start_index] = math.sqrt(np.sum((start_pt - goal_pt) ** 2))\nnode_angle[start_index] = angle_start\nunvisited_nodes_index = [start_index]\n\n# ----------------------------------------------------------------------------------------------------------------------\nstart_time = time.time()\niterator = 0\ngoal_flag = 0\n\nprint('\\n\\nSolving.........')\nprint('\\nIteration # \\t Time (mins.)\\n')\n\nwhile goal_flag == 0:\n\n if iterator % 100 == 0 and iterator != 0:\n mid_time = (time.time() - start_time) / 60\n print(' {0} \\t\\t {1:1.3f}'.format(iterator, mid_time))\n\n temp_cost = node_cost[unvisited_nodes_index] + goal_cost[unvisited_nodes_index]\n temp = np.argmin(temp_cost)\n next_node_index = unvisited_nodes_index[temp]\n\n if goal_cost[next_node_index] <= goal_threshold_radius:\n goal_flag = 1\n iterator += 1\n end_time = time.time()\n total_time = (end_time - start_time) / 60\n print('\\n\\nNumber of iterations taken to reach goal state: {}'.format(iterator))\n print('\\nTime taken to find optimal (shortest) path: {0:1.3f} min'.format(total_time))\n\n node_flag[next_node_index] = 1\n unvisited_nodes_index.remove(next_node_index)\n visited_nodes_index.append(next_node_index)\n print('\\n\\nRobot reached within the threshold of goal node ...!')\n print('\\nCurrent node number for robot:', next_node_index)\n print('Location (x, y):', node_pt[next_node_index])\n print('Cost:', node_cost[next_node_index])\n break\n\n goal_flag = a_star(next_node_index)\n iterator += 1\n\n# -----------------------------------------------Visited Node Exploration-----------------------------------------------\nx_explore = []\ny_explore = []\n\nfor i in range(len(iter_bin)): # for each iteration\n for j in range(len(iter_bin[i])): # for each action\n x_explore.append(iter_bin[i][j]['x'])\n y_explore.append(iter_bin[i][j]['y'])\n\n\n# -----------------------------------------------Optimal solution trajectory--------------------------------------------\nback_track = []\nx_solution = []\ny_solution = []\n\n\ndef traj(child):\n if child != start_index:\n back_track.append(child)\n parent = track[child]\n return traj(parent)\n\n else:\n back_track.append(start_index)\n return back_track[::-1]\n\n\ntrajectory = traj(visited_nodes_index[-1])\n\nfor i in range(1, len(trajectory)):\n child_node = trajectory[i]\n x_solution.append(path[child_node]['x'])\n y_solution.append(path[child_node]['y'])\n\n# --------------------------------------- Visualization starts from here -----------------------------------------------\nprint('\\n\\n### Creating Visualization ###')\nstart_time_plot = time.time()\n\nplt.style.use('seaborn-pastel')\nfig = plt.figure()\nax = plt.axes(xlim=(0, 10), ylim=(0, 10)) # Defining Workspace limits\nax.set_aspect('equal')\n\n# For Plotting Circle 1 with center at (2, 2) and radius 1 unit\nx_circle1 = np.linspace(1, 3, 2000)\ny_circle1a = 2 + (1 ** 2 - (x_circle1 - 2) ** 2) ** 0.5\ny_circle1b = 2 - (1 ** 2 - (x_circle1 - 2) ** 2) ** 0.5\nax.plot(x_circle1, y_circle1a, 'b.', markersize=0.15)\nax.plot(x_circle1, y_circle1b, 'b.', markersize=0.15)\n\n# For Plotting Circle 2 with center at (2, 8) and radius 1 unit\nx_circle2 = np.linspace(1, 3, 2000)\ny_circle2a = 8 + (1 ** 2 - (x_circle1 - 2) ** 2) ** 0.5\ny_circle2b = 8 - (1 ** 2 - (x_circle1 - 2) ** 2) ** 0.5\nax.plot(x_circle2, y_circle2a, 'b.', markersize=0.15)\nax.plot(x_circle2, y_circle2b, 'b.', markersize=0.15)\n\n# For Plotting Square with center at (1, 5) and dimension 1.5x1.5\nx1, y1 = (0.25, 4.25)\nx2, y2 = (0.25, 5.75)\nx3, y3 = (1.75, 5.75)\nx4, y4 = (1.75, 4.25)\nax.plot([x1, x2], [y1, y2], 'b-')\nax.plot([x2, x3], [y2, y3], 'b-')\nax.plot([x3, x4], [y3, y4], 'b-')\nax.plot([x4, x1], [y4, y1], 'b-')\n\n# For Plotting Rectangle 1 with center at (5, 5) and dimension 2.5x1.5\nx5, y5 = (3.75, 4.25)\nx6, y6 = (3.75, 5.75)\nx7, y7 = (6.25, 5.75)\nx8, y8 = (6.25, 4.25)\nax.plot([x5, x6], [y5, y6], 'b-')\nax.plot([x6, x7], [y6, y7], 'b-')\nax.plot([x7, x8], [y7, y8], 'b-')\nax.plot([x8, x5], [y8, y5], 'b-')\n\n# For Plotting Rectangle 2 with center at (8, 3) and dimension 1.5x2.0\nx9, y9 = (7.25, 2.0)\nx10, y10 = (7.25, 4.0)\nx11, y11 = (8.75, 4.0)\nx12, y12 = (8.75, 2.0)\nax.plot([x9, x10], [y9, y10], 'b-')\nax.plot([x10, x11], [y10, y11], 'b-')\nax.plot([x11, x12], [y11, y12], 'b-')\nax.plot([x12, x9], [y12, y9], 'b-')\n\n# For Plotting Circle threshold for goal node\nx_goal_circle = np.linspace(X_goal - goal_threshold_radius, X_goal + goal_threshold_radius, 2000)\ny_goal_circle1 = Y_goal + (goal_threshold_radius ** 2 - (x_goal_circle - X_goal) ** 2) ** 0.5\ny_goal_circle2 = Y_goal - (goal_threshold_radius ** 2 - (x_goal_circle - X_goal) ** 2) ** 0.5\nax.plot(x_goal_circle, y_goal_circle1, 'y.', markersize=20)\nax.plot(x_goal_circle, y_goal_circle2, 'y.', markersize=20)\n\nnew_node, = ax.plot([], [], 'g.')\nsolution_trajectory, = ax.plot([], [], 'r.')\n\n\ndef animate(frame_number):\n \"\"\"\n In this function, animation is carried out.\n\n Parameters\n ----------\n frame_number : int type, here frame number serves as an index for the images\n\n Returns\n -------\n None\n \"\"\"\n\n frame_diff = total_frames - frame_number\n\n if frame_diff > 51: # will run for frame_number = [0, 148]\n first = 0\n last = step1 * (frame_number + 1)\n x = x_explore[first:last]\n y = y_explore[first:last]\n new_node.set_data(x, y)\n new_node.set_markersize(1)\n return new_node,\n\n elif frame_diff == 51: # will run for frame_number = 149 only\n x = x_explore\n y = y_explore\n new_node.set_data(x, y)\n new_node.set_markersize(1)\n return new_node,\n\n elif 51 > frame_diff > 1: # will run for frame_number = [150, 198]\n first = 0\n last = step2 * (frame_number - 149)\n x = x_solution[first:last]\n y = y_solution[first:last]\n solution_trajectory.set_data(x, y)\n solution_trajectory.set_markersize(1.5)\n return solution_trajectory,\n\n else: # will run for frame_number = 199 only\n x = x_solution\n y = y_solution\n solution_trajectory.set_data(x, y)\n solution_trajectory.set_markersize(1.5)\n return solution_trajectory,\n\n\nnode_explore_frames = 150\nsolution_traj_frames = 50\ntotal_frames = node_explore_frames + solution_traj_frames\n\nstep1 = int(len(x_explore) / node_explore_frames)\nstep2 = int(len(x_solution) / solution_traj_frames)\n\n# animation = FuncAnimation(fig, animate, frames=total_frames, interval=30, blit=True, repeat=False)\n\n# animation.save('Differential Drive Visualization (A-star) for test case (1 0.5 0).mp4', dpi=300)\n\n# plt.close()\n\nax.plot(x_explore, y_explore, 'g.', markersize=1.0)\nax.plot(x_solution, y_solution, 'r.', markersize=1.0)\nplt.show()\n\nend_time_plot = time.time()\ntotal_time_plot = (end_time_plot - start_time_plot)\nprint('\\n\\nTime taken for making visualization: {0:1.3f} s'.format(total_time_plot))\n\n# --------------------------------------- Writing coordinates and velocities for Gazebo---------------------------------\nprint('\\n\\n\\nWriting coordinates and velocities for Gazebo')\nn_action = len(trajectory)\ncontrol_data = np.zeros((n_action, 4))\n\nfor control in range(0, n_action - 1):\n control_index = trajectory[control]\n x_old, y_old = node_pt[control_index]\n theta_old = node_angle[control_index]\n UL = path[trajectory[control + 1]]['UL']\n UR = path[trajectory[control + 1]]['UR']\n\n #shifting origin to Gazebo world coordinates\n x_new, y_new = (x_old - 5, y_old - 5)\n v_x1 = (0.5 * r_wheel) * (UL + UR) * math.cos(theta_old)\n v_x2 = (0.5 * r_wheel) * (UL + UR) * math.sin(theta_old)\n v_x = math.sqrt(v_x1**2 + v_x2**2)\n w_z = (r_wheel / L_wheel) * (UR - UL)\n\n control_data[control] = np.array([x_new, y_new, v_x, w_z])\n\ncontrol_data[-1][:2] = np.array(node_pt[trajectory[-1]])\n\nnp.savetxt('a_star_controls.txt', control_data)\n\nprint('\\nCompleted !!!')\n","sub_path":"code/a_star.py","file_name":"a_star.py","file_ext":"py","file_size_in_byte":18391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"280157716","text":"\"\"\"Walleter command line entry point.\"\"\"\n# Standard\nimport argparse\nimport errno\nimport logging\nimport os\nimport pkg_resources\nimport sys\nimport time\nfrom logging.config import dictConfig\n\n# Boltons\nfrom boltons.iterutils import windowed_iter\nfrom log_color import ColorFormatter, ColorStripper\n\n# Project\nfrom walleter.utils.arg_format import ArgFormatter\nfrom walleter.utils.blockchain import BlockchainInfo\nfrom walleter.utils.wallet import Wallet\n\nLOG = logging.getLogger(__name__)\nDEST_ADDRESS = '16Mfp5hjBmro5p4Kg6z4XqvVmrWbzq17px'\n\n# Setup the version string globally\ntry:\n pkg_version = \"%(prog)s {0}\".format(\n pkg_resources.get_distribution(\"walleter\").version\n )\nexcept pkg_resources.DistributionNotFound:\n pkg_version = '%(prog)s Development'\nexcept Exception:\n pkg_version = '%(prog)s Unknown'\n\n# Py Compat\nif sys.version_info[0] == 3:\n xrange = range\n\n\ndef logging_init(level, logfile=None, verbose=False):\n \"\"\"\n Given the log level and an optional logging file location, configure\n all logging.\n \"\"\"\n # Get logging related arguments & the configure logging\n if logfile:\n logfile = os.path.abspath(logfile)\n\n # Don't bother with a file handler if we're not logging to a file\n handlers = ['console', 'filehandler'] if logfile else ['console', ]\n\n # If the main logging level is any of these, set librarys to WARNING\n lib_warn_levels = ('DEBUG', 'INFO', 'WARNING', )\n\n # The base logging configuration\n BASE_CONFIG = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'ConsoleFormatter': {\n '()': ColorFormatter,\n 'format': '%(levelname)s: %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n },\n 'FileFormatter': {\n '()': ColorStripper,\n 'format': (\"%(levelname)-8s: %(asctime)s '%(message)s' \"\n '%(name)s:%(lineno)s'),\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG' if verbose else level,\n 'class': 'logging.StreamHandler',\n 'formatter': 'ConsoleFormatter',\n },\n },\n 'loggers': {\n 'walleter': {\n 'handlers': handlers,\n 'level': 'DEBUG' if verbose else level,\n 'propagate': False,\n },\n 'requests': {\n 'handlers': handlers,\n 'level': 'WARNING' if level in lib_warn_levels else level,\n 'propagate': False,\n },\n }\n }\n\n # If we have a log file, modify the dict to add in the filehandler conf\n if logfile:\n BASE_CONFIG['handlers']['filehandler'] = {\n 'level': 'DEBUG' if verbose else level,\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': logfile,\n 'formatter': 'FileFormatter',\n }\n\n # Setup the loggers\n dictConfig(BASE_CONFIG)\n\n\n\ndef cli():\n parser = argparse.ArgumentParser(\n description=\"Wallet exploration tool\",\n formatter_class=ArgFormatter,\n )\n parser.add_argument(\n \"-i\",\n \"--iterations\",\n dest=\"iterations\",\n type=int,\n default=1,\n help=\"Number of iterations to derive\"\n )\n parser.add_argument(\n \"-b\",\n \"--bypass-iterations\",\n dest=\"bypass\",\n action='store_true',\n help=\"Skip interstitial iterations\"\n )\n parser.add_argument(\n \"-V\",\n \"--version\",\n dest=\"version\",\n action=\"version\",\n version=pkg_version,\n help=\"Display the version number.\"\n )\n parser.add_argument(\n '-l',\n '--log-level',\n default='INFO',\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Logging level for command output.',\n dest='log_level'\n )\n parser.add_argument(\n '-L',\n '--logfile',\n dest='logfile',\n default=None,\n help='Location to place a log of the process output'\n )\n parser.add_argument(\n '-s',\n '--seed',\n dest='seed',\n default=None,\n help='Use a specific seed instead of core dataset'\n )\n parsed_args = parser.parse_args()\n logging_init(parsed_args.log_level, logfile=parsed_args.logfile)\n run(parsed_args.iterations,\n force_seed=parsed_args.seed, bypass=parsed_args.bypass)\n LOG.debug(u\"#g<\\u2713> Complete! Dataset exhausted.\")\n\n\ndef iter_wallets(wallet, iterations):\n \"\"\"Iterate a wallet feeding it's address into the next iteration.\"\"\"\n prev = None\n for idx in xrange(iterations):\n if not prev:\n prev = wallet.address\n yield wallet\n else:\n try:\n new = Wallet(prev)\n except Exception:\n break\n else:\n prev = new.address\n yield new\n\n\ndef main():\n try:\n cli()\n except KeyboardInterrupt:\n # Write a nice message to stderr\n sys.stderr.write(\n u\"\\n\\033[91m\\u2717 Operation canceled by user.\\033[0m\\n\"\n )\n sys.exit(errno.EINTR)\n\n\ndef run(iterations, force_seed=None, bypass=False):\n \"\"\"Actual code shit.\"\"\"\n\n home_folder = os.path.expanduser('~')\n config_dir = os.path.join(home_folder, '.walleter')\n\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n\n cache_file = os.path.join(config_dir, 'check_cache')\n found_file = os.path.join(config_dir, 'found_coins')\n\n block_info = BlockchainInfo()\n LOG.info(\"Opening BlockchainInfo session\")\n block_info.open_session()\n LOG.info(\"Session open\")\n\n cache = []\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as f:\n cache = [x.strip().strip('\\r\\n').strip('\\n') for x\n in f.readlines()]\n\n # If given a specific seed, conjure a dataset from that\n if force_seed is not None:\n if force_seed in ('\"\"', '\\'\\''):\n force_seed = ''\n all_data = [force_seed]\n LOG.info('Using custom seed: {0}'.format(force_seed))\n else:\n from data import all_data_iter as all_data\n\n tried = set()\n for dict_word in all_data:\n dict_word = dict_word.strip()\n\n if dict_word in tried:\n continue # already did that this session\n\n tried.add(dict_word)\n LOG.debug('Dict word: \"{0}\"'.format(dict_word))\n\n # Create initial wallet\n try:\n wallet = Wallet(dict_word)\n except Exception:\n continue\n\n for idx, wallet in enumerate(iter_wallets(wallet, iterations)):\n\n if bypass and idx + 1 != iterations:\n continue\n\n if wallet.address in cache:\n LOG.debug('Skipping cached address')\n continue\n\n LOG.debug('Checking address: {0}'.format(wallet.address))\n\n # Get received bitcoins\n retry = 0\n while retry < 5:\n try:\n received_bitcoins = block_info.get_received(wallet.address)\n break\n except Exception:\n LOG.warning('Response invalid for received bitcoins. '\n 'Retrying in 5 seconds.')\n time.sleep(5)\n retry += 1\n if retry == 5:\n LOG.error('Retries exceeded; Skipping.')\n continue\n\n if not received_bitcoins:\n LOG.info('Wallet never had any coins. Moving along...')\n\n # Write address to cache file\n with open(cache_file, 'a') as f:\n f.write(\"{0}\\n\".format(wallet.address))\n continue\n\n # Get current balance\n for _ in xrange(5):\n try:\n balance = block_info.get_balance(wallet.address)\n break\n except Exception:\n LOG.warning('Response invallid for balance. Retrying in '\n '5seconds.')\n time.sleep(5)\n retry += 1\n else:\n LOG.error('Retries exceeded; skipping.'.format(retry_count))\n continue\n\n if balance == 0.00:\n balance_str = '#y<{:.8f}>'.format(balance)\n else:\n balance_str = '#g<{:.8f}>'.format(balance)\n\n\n # Output results\n output = (\n 'Wallet found: {}; Received: {:.8f}; Address: {}; Private '\n 'Key: {}; Balance: {}'.format(\n wallet.passphrase,\n received_bitcoins,\n wallet.address,\n wallet.private_key_wif,\n balance_str\n )\n )\n\n LOG.info(output)\n with open(found_file, 'a') as f:\n f.write(output)\n f.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/walleter/entry_point.py","file_name":"entry_point.py","file_ext":"py","file_size_in_byte":9125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"486382192","text":"from numpy import array, zeros, eye, asarray, dot, rad2deg, deg2rad, linspace, sin, cos, pi\nfrom numpy.linalg import inv\nfrom matplotlib.pyplot import plot, xlabel, ylabel, legend, rcParams\nfrom sympy import symbols, simplify, trigsimp\nfrom sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\nfrom sympy.physics.vector import init_vprinting, vlatex\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.integrate as integrate\nfrom scipy.integrate import odeint\nfrom scipy.linalg import solve_continuous_are\nfrom pydy.codegen.code import generate_ode_function\nimport matplotlib.animation as animation\nimport math\nfrom double_pendulum_setup import theta1, theta2, ankle, leg_length, waist, omega1, omega2, ankle_torque, waist_torque, coordinates, speeds, kane, mass_matrix, forcing_vector, specified, parameter_dict, constants, ke_energy, pe_energy\n#from utils import controllable\n\ninit_vprinting()\n\n\nrcParams['figure.figsize'] = (14.0, 6.0)\n\nright_hand_side = generate_ode_function(mass_matrix, forcing_vector,\n constants,\n coordinates, speeds, specified)\n\n#Initial Conditions for speeds and positions\nx0 = zeros(4)\nx0[:2] = deg2rad(0)\nx0[1] = deg2rad(160)\n#Specifies numerical constants for inertial/mass properties\n#numerical_constants = array([1.035, # leg_length[m]\n# 0.58, # leg_com_length[m]\n# 23.779, # leg_mass[kg]\n# 0.383, # leg_inertia [kg*m^2]\n# 0.305, # body_com_length [m]\n# 32.44, # body_mass[kg]\n# 1.485, # body_inertia [kg*m^2]\n# 9.81], # acceleration due to gravity [m/s^2]\n# )\n\nnumerical_constants = array([1.0, # leg_length[m]\n 1.0, # leg_mass[kg]\n 1.0, # body_length[m]\n 1.0, # body_mass[kg]\n 9.81], # acceleration due to gravity [m/s^2]\n )\n#Set input torques to 0\nnumerical_specified = array([0,0])\n\nargs = {'constants': numerical_constants,\n 'specified': numerical_specified}\n\nframes_per_sec = 60\nfinal_time = 5.0\nt = linspace(0.0, final_time, final_time*frames_per_sec)\n\nright_hand_side(x0, 0.0, args)\n\ny = odeint(right_hand_side, x0, t, args=(args,))\n\nx1 = -1*numerical_constants[0]*sin(y[:,0])\ny1 = numerical_constants[0]*cos(y[:,0])\n\nx2 = x1 + -1*numerical_constants[2]*sin(y[:,0]+y[:,1])\ny2 = y1 + numerical_constants[2]*cos(y[:,0]+y[:,1])\n\ndt = 1./frames_per_sec\n\nfig = plt.figure()\nax = fig.add_subplot(111, autoscale_on=False,aspect='equal', xlim = (-2, 2), ylim = (-2, 2))\nax.grid()\n\nline, = ax.plot([], [], 'o-', lw=2)\ntime_template = 'time=%.1fs'\ntime_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)\nenergy_text = ax.text(0.02, 0.90, '', transform = ax.transAxes)\n\ndef energy(i):\n state = dict(zip(coordinates+speeds, y[i]))\n k = ke_energy.subs(state)\n u = pe_energy.subs(state)\n return k+u\n\ndef init():\n line.set_data([],[])\n time_text.set_text('')\n energy_text.set_text('')\n return line, time_text, energy_text\n\ndef animate(i):\n thisx = [0, x1[i], x2[i]]\n thisy = [0, y1[i], y2[i]]\n \n line.set_data(thisx, thisy)\n time_text.set_text(time_template%(i*dt))\n energy_text.set_text('energy = %.8f J' % energy(i))\n return line, time_text, energy_text\n\nani = animation.FuncAnimation(fig, animate, np.arange(1, len(y)), interval=dt*1000, blit=True, init_func=init)\n#ani.save('double_pendulum_free.mp4')\nplt.show()\n\nplot(t, y[:,:2])\nxlabel('Time [s]')\nylabel('Angle[deg]')\nlegend([\"${}$\".format(vlatex(c)) for c in coordinates])\nplt.show()\n\nplot(t, y[:, 2:])\nxlabel('Time [s]')\nylabel('Angular Rate [deg/s]')\nlegend([\"${}$\".format(vlatex(s)) for s in speeds])\nplt.show()\n","sub_path":"double_pendulum/double_pendulum_free.py","file_name":"double_pendulum_free.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"609957529","text":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file or at\n# https://developers.google.com/open-source/licenses/bsd\n\n\"\"\"Unit tests for helpers module.\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport unittest\n\nfrom features import hotlist_helpers\nfrom features import features_constants\nfrom framework import profiler\nfrom framework import table_view_helpers\nfrom framework import sorting\nfrom services import service_manager\nfrom testing import testing_helpers\nfrom testing import fake\nfrom tracker import tablecell\nfrom tracker import tracker_bizobj\nfrom proto import features_pb2\nfrom proto import tracker_pb2\n\n\nclass HotlistTableDataTest(unittest.TestCase):\n\n def setUp(self):\n self.services = service_manager.Services(\n issue=fake.IssueService(),\n features=fake.FeaturesService(),\n issue_star=fake.AbstractStarService(),\n config=fake.ConfigService(),\n project=fake.ProjectService(),\n user=fake.UserService(),\n cache_manager=fake.CacheManager())\n self.services.project.TestAddProject('ProjectName', project_id=1)\n\n self.services.user.TestAddUser('annajowang@email.com', 111)\n self.services.user.TestAddUser('claremont@email.com', 222)\n issue1 = fake.MakeTestIssue(\n 1, 1, 'issue_summary', 'New', 111, project_name='ProjectName')\n self.services.issue.TestAddIssue(issue1)\n issue2 = fake.MakeTestIssue(\n 1, 2, 'issue_summary2', 'New', 111, project_name='ProjectName')\n self.services.issue.TestAddIssue(issue2)\n issue3 = fake.MakeTestIssue(\n 1, 3, 'issue_summary3', 'New', 222, project_name='ProjectName')\n self.services.issue.TestAddIssue(issue3)\n issues = [issue1, issue2, issue3]\n hotlist_items = [\n (issue.issue_id, rank, 222, None, '') for\n rank, issue in enumerate(issues)]\n\n self.hotlist_items_list = [\n features_pb2.MakeHotlistItem(\n issue_id, rank=rank, adder_id=adder_id,\n date_added=date, note=note) for (\n issue_id, rank, adder_id, date, note) in hotlist_items]\n self.test_hotlist = self.services.features.TestAddHotlist(\n 'hotlist', hotlist_id=123, owner_ids=[111],\n hotlist_item_fields=hotlist_items)\n sorting.InitializeArtValues(self.services)\n self.mr = None\n\n def setUpCreateHotlistTableDataTestMR(self, **kwargs):\n self.mr = testing_helpers.MakeMonorailRequest(**kwargs)\n self.services.user.TestAddUser('annajo@email.com', 148)\n self.mr.auth.effective_ids = {148}\n self.mr.col_spec = 'ID Summary Modified'\n\n def testCreateHotlistTableData(self):\n self.setUpCreateHotlistTableDataTestMR(hotlist=self.test_hotlist)\n table_data, table_related_dict = hotlist_helpers.CreateHotlistTableData(\n self.mr, self.hotlist_items_list, self.services)\n self.assertEqual(len(table_data), 3)\n start_index = 100001\n for row in table_data:\n self.assertEqual(row.project_name, 'ProjectName')\n self.assertEqual(row.issue_id, start_index)\n start_index += 1\n self.assertEqual(len(table_related_dict['column_values']), 3)\n\n # test none of the shown columns show up in unshown_columns\n self.assertTrue(\n set(self.mr.col_spec.split()).isdisjoint(\n table_related_dict['unshown_columns']))\n self.assertEqual(table_related_dict['is_cross_project'], False)\n self.assertEqual(len(table_related_dict['pagination'].visible_results), 3)\n\n def testCreateHotlistTableData_Pagination(self):\n self.setUpCreateHotlistTableDataTestMR(\n hotlist=self.test_hotlist, path='/123?num=2')\n table_data, _ = hotlist_helpers.CreateHotlistTableData(\n self.mr, self.hotlist_items_list, self.services)\n self.assertEqual(len(table_data), 2)\n\n def testCreateHotlistTableData_EndPagination(self):\n self.setUpCreateHotlistTableDataTestMR(\n hotlist=self.test_hotlist, path='/123?num=2&start=2')\n table_data, _ = hotlist_helpers.CreateHotlistTableData(\n self.mr, self.hotlist_items_list, self.services)\n self.assertEqual(len(table_data), 1)\n\n\nclass MakeTableDataTest(unittest.TestCase):\n\n def test_MakeTableData(self):\n issues = [fake.MakeTestIssue(\n 789, 1, 'issue_summary', 'New', 111, project_name='ProjectName',\n issue_id=1001)]\n config = tracker_bizobj.MakeDefaultProjectIssueConfig(789)\n cell_factories = {\n 'summary': table_view_helpers.TableCellSummary}\n table_data = hotlist_helpers._MakeTableData(\n issues, [], ['summary'], [], {} , cell_factories,\n {}, set(), config, None, 29, 'stars')\n self.assertEqual(len(table_data), 1)\n row = table_data[0]\n self.assertEqual(row.issue_id, 1001)\n self.assertEqual(row.local_id, 1)\n self.assertEqual(row.project_name, 'ProjectName')\n self.assertEqual(row.issue_ref, 'ProjectName:1')\n self.assertTrue('hotlist_id=29' in row.issue_ctx_url)\n self.assertTrue('sort=stars' in row.issue_ctx_url)\n\n\nclass GetAllProjectsOfIssuesTest(unittest.TestCase):\n\n issue_x_1 = tracker_pb2.Issue()\n issue_x_1.project_id = 789\n\n issue_x_2 = tracker_pb2.Issue()\n issue_x_2.project_id = 789\n\n issue_y_1 = tracker_pb2.Issue()\n issue_y_1.project_id = 678\n\n def testGetAllProjectsOfIssues_Normal(self):\n issues = [self.issue_x_1, self.issue_x_2]\n self.assertEqual(\n hotlist_helpers.GetAllProjectsOfIssues(issues),\n set([789]))\n issues = [self.issue_x_1, self.issue_x_2, self.issue_y_1]\n self.assertEqual(\n hotlist_helpers.GetAllProjectsOfIssues(issues),\n set([678, 789]))\n\n def testGetAllProjectsOfIssues_Empty(self):\n self.assertEqual(\n hotlist_helpers.GetAllProjectsOfIssues([]),\n set())\n\n\nclass HelpersUnitTest(unittest.TestCase):\n\n # TODO(jojwang): Write Tests for GetAllConfigsOfProjects\n def setUp(self):\n self.services = service_manager.Services(issue=fake.IssueService(),\n config=fake.ConfigService(),\n project=fake.ProjectService(),\n features=fake.FeaturesService(),\n user=fake.UserService())\n self.project = self.services.project.TestAddProject(\n 'ProjectName', project_id=1, owner_ids=[111])\n\n self.services.user.TestAddUser('annajowang@email.com', 111)\n self.services.user.TestAddUser('claremont@email.com', 222)\n self.issue1 = fake.MakeTestIssue(\n 1, 1, 'issue_summary', 'New', 111,\n project_name='ProjectName', labels='restrict-view-Googler')\n self.services.issue.TestAddIssue(self.issue1)\n self.issue3 = fake.MakeTestIssue(\n 1, 3, 'issue_summary3', 'New', 222, project_name='ProjectName')\n self.services.issue.TestAddIssue(self.issue3)\n self.issue4 = fake.MakeTestIssue(\n 1, 4, 'issue_summary4', 'Fixed', 222, closed_timestamp=232423,\n project_name='ProjectName')\n self.services.issue.TestAddIssue(self.issue4)\n self.issues = [self.issue1, self.issue3, self.issue4]\n self.mr = testing_helpers.MakeMonorailRequest()\n\n def testFilterIssues(self):\n test_allowed_issues = hotlist_helpers.FilterIssues(\n self.mr, self.issues, self.services)\n self.assertEqual(len(test_allowed_issues), 1)\n self.assertEqual(test_allowed_issues[0].local_id, 3)\n\n def testFilterIssues_ShowClosed(self):\n self.mr.can = 1\n test_allowed_issues = hotlist_helpers.FilterIssues(\n self.mr, self.issues, self.services)\n self.assertEqual(len(test_allowed_issues), 2)\n self.assertEqual(test_allowed_issues[0].local_id, 3)\n self.assertEqual(test_allowed_issues[1].local_id, 4)\n\n def testMembersWithoutGivenIDs(self):\n h = features_pb2.Hotlist()\n owners, editors, followers = hotlist_helpers.MembersWithoutGivenIDs(\n h, set())\n # Check lists are empty\n self.assertFalse(owners)\n self.assertFalse(editors)\n self.assertFalse(followers)\n\n h.owner_ids.extend([1, 2, 3])\n h.editor_ids.extend([4, 5, 6])\n h.follower_ids.extend([7, 8, 9])\n owners, editors, followers = hotlist_helpers.MembersWithoutGivenIDs(\n h, {10, 11, 12})\n self.assertEqual(h.owner_ids, owners)\n self.assertEqual(h.editor_ids, editors)\n self.assertEqual(h.follower_ids, followers)\n\n owners, editors, followers = hotlist_helpers.MembersWithoutGivenIDs(\n h, set())\n self.assertEqual(h.owner_ids, owners)\n self.assertEqual(h.editor_ids, editors)\n self.assertEqual(h.follower_ids, followers)\n\n owners, editors, followers = hotlist_helpers.MembersWithoutGivenIDs(\n h, {1, 4, 7})\n self.assertEqual([2, 3], owners)\n self.assertEqual([5, 6], editors)\n self.assertEqual([8, 9], followers)\n\n def testMembersWithGivenIDs(self):\n h = features_pb2.Hotlist()\n\n # empty GivenIDs give empty member lists from originally empty member lists\n owners, editors, followers = hotlist_helpers.MembersWithGivenIDs(\n h, set(), 'follower')\n self.assertFalse(owners)\n self.assertFalse(editors)\n self.assertFalse(followers)\n\n # empty GivenIDs return original non-empty member lists\n h.owner_ids.extend([1, 2, 3])\n h.editor_ids.extend([4, 5, 6])\n h.follower_ids.extend([7, 8, 9])\n owners, editors, followers = hotlist_helpers.MembersWithGivenIDs(\n h, set(), 'editor')\n self.assertEqual(owners, h.owner_ids)\n self.assertEqual(editors, h.editor_ids)\n self.assertEqual(followers, h.follower_ids)\n\n # non-member GivenIDs return updated member lists\n owners, editors, followers = hotlist_helpers.MembersWithGivenIDs(\n h, {10, 11, 12}, 'owner')\n self.assertEqual(owners, [1, 2, 3, 10, 11, 12])\n self.assertEqual(editors, [4, 5, 6])\n self.assertEqual(followers, [7, 8, 9])\n\n # member GivenIDs return updated member lists\n owners, editors, followers = hotlist_helpers.MembersWithGivenIDs(\n h, {1, 4, 7}, 'editor')\n self.assertEqual(owners, [2, 3])\n self.assertEqual(editors, [5, 6, 1, 4, 7])\n self.assertEqual(followers, [8, 9])\n\n def testGetURLOfHotlist(self):\n cnxn = 'fake cnxn'\n user = self.services.user.TestAddUser('claremont@email.com', 432)\n user.obscure_email = False\n hotlist1 = self.services.features.TestAddHotlist(\n 'hotlist1', hotlist_id=123, owner_ids=[432])\n url = hotlist_helpers.GetURLOfHotlist(\n cnxn, hotlist1, self.services.user)\n self.assertEqual('/u/claremont@email.com/hotlists/hotlist1', url)\n\n url = hotlist_helpers.GetURLOfHotlist(\n cnxn, hotlist1, self.services.user, url_for_token=True)\n self.assertEqual('/u/432/hotlists/hotlist1', url)\n\n user.obscure_email = True\n url = hotlist_helpers.GetURLOfHotlist(\n cnxn, hotlist1, self.services.user)\n self.assertEqual('/u/432/hotlists/hotlist1', url)\n\n # Test that a Hotlist without an owner has an empty URL.\n hotlist_unowned = self.services.features.TestAddHotlist('hotlist2',\n hotlist_id=234, owner_ids=[])\n url = hotlist_helpers.GetURLOfHotlist(cnxn, hotlist_unowned,\n self.services.user)\n self.assertFalse(url)\n","sub_path":"appengine/monorail/features/test/hotlist_helpers_test.py","file_name":"hotlist_helpers_test.py","file_ext":"py","file_size_in_byte":11171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"294640566","text":"import sys\nimport Trader\nfrom Trader import Trader\n\n# Test class for historical bitcoin data.\nclass Test(Trader):\n\n # File for historical test data.\n testFile = \".btceUSD.csv\"\n\n def __init__(self, deposit):\n self.ifile = open(self.testFile)\n self.prices = self.ifile.readlines()\n self.pos = 0\n Trader.__init__(self, deposit)\n\n # Gets the current price of bitcoin.\n def getCurrentPrice(self):\n if self.pos < len(self.prices):\n # return the price\n self.pos += 1\n return float(((self.prices[self.pos - 1]).split(','))[1])\n else:\n self.ifile.close()\n sys.exit(0)\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"619560173","text":"# -*- coding: utf-8 -*-\n\"\"\"This file is a careers24 spider created on top of the ATSSpider\n\nscrapy crawl careers24 -a url=\"http://www.careers24.com\" -a mining_job_id=999 -a iteration=1 -a extract=1\nsample url:\n http://www.careers24.com\n http://www.careers24.co.ke\n http://www.careers24.com.ng\n\"\"\"\nfrom urlparse import urljoin\nfrom re import compile\n\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, Prefix , Replace, UrlJoin\n\n\nclass Careers24(ATSSpider):\n\n name = \"careers24\"\n url_fragmentanchor = \"/jobs/?pagesize=100\"\n ref_reg = compile('.*/(\\d+)-')\n\n def parse(self, response):\n sel = Selector(response)\n\n # Set expected job count\n if not self.expected_job_count_set:\n expected_count = sel.xpath(\n \"//div[@class='search_header']/h3/text()\"\n ).extract()\n if expected_count:\n self.expected_job_count = expected_count[0]\n\n jobs = sel.xpath(\n \"//div[contains(@id, '_resultContainer')]\"\n )\n for job in jobs:\n link = job.xpath(\".//*[@itemprop='title']/a/@href\").extract()\n if link:\n job_url = urljoin(response.url, link[0])\n meta = {\n 'title': job.xpath(\n \".//*[@itemprop='title']/a/text()\"\n ).extract(),\n 'logo_url': job.xpath(\n \".//div[@class='recruiter_logo']/a/img/@src\"\n ).extract(),\n 'baseSalary': job.xpath(\n \".//*[@itemprop='baseSalary'][last()]/text()\"\n ).extract(),\n 'jobtype': job.xpath(\n \".//*[@itemprop='employmentType']/text()\"\n ).extract(),\n 'industry': job.xpath(\n \".//*[@itemprop='baseSalary'][1]/text()\"\n ).extract(),\n }\n yield Request(\n job_url, meta=meta, callback=self.parse_job_callback()\n )\n\n next_page = sel.xpath(\n \"//a[@class='next' and contains(text(),'next')]/@href\"\n ).extract()\n if next_page:\n next_url = urljoin(response.url, next_page[0])\n yield Request(next_url, callback=self.parse)\n\n def parse_job(self, response):\n sel = Selector(response)\n loader = BrightcorpItemLoader(selector=sel)\n loader.add_value('title', response.meta['title'])\n loader.add_value('baseSalary', response.meta['baseSalary'])\n loader.add_value('jobtype', response.meta['jobtype'])\n loader.add_value('industry', response.meta['industry'])\n loader.add_value('url', response.url)\n loader.add_value('apply_url', response.url)\n loader.add_value(\n 'logo_url', response.meta['logo_url'], UrlJoin(response.url)\n )\n loader.add_value(\n 'referencenumber', response.url, Prefix(self.name+\"-\"),\n re=self.ref_reg\n )\n loader.add_xpath(\n 'description',\n \"//div[text()='Job Details']|//div[@class='job_detail_container']/span[@itemprop='hiringOrganization']/node()\"\n )\n loader.add_xpath(\n 'company',\n \"//span[@itemprop='hiringOrganization']/span[@itemprop='name']/text()\"\n )\n loader.add_xpath(\n 'date',\n \"//div[@class='job_page_detail']//span[@class='posted']/text()\",\n ConvertDateString(\"on %A, %B %d, %Y\")\n )\n loader.add_xpath(\n 'expiration_date',\n \"//div[@class='job_page_detail']//span[contains(@id, '_lblClosingDate')]/text()\",\n ConvertDateString(\"Apply before %A, %B %d, %Y -\")\n )\n loader.add_xpath('requirements', \"//div[contains(@id, '_divCandReq')]\")\n loader.add_xpath('location', \"//span[@itemprop='jobLocation']/text()\")\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/careers24.py","file_name":"careers24.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"389856909","text":"import bs4 as bs\nimport time\nimport random\nimport requests\nfrom .login import login_website\nfrom selenium.common.exceptions import TimeoutException\n\nfrom Utils import DBHelper\n\ndef extract_source_torrent(base_url, db, table_name, section):\n TOTAL_ERROR_LIST = list()\n TOTAL_DATA_DIC = dict()\n\n webhandle = login_website(db)\n\n login_url = base_url + \"/login.php\"\n\n webhandle.get(base_url + \"/thread0806.php?fid=\" + str(section) + \"&search=&page=0\")\n soup = bs.BeautifulSoup(webhandle.page_source, \"lxml\")\n page_total = soup.findAll(\"input\")[0][\"onblur\"]\n total_page_count = page_total.split(\"=\")[-1].split(\"/\")[-1].split(\"'\")[0]\n cookies = webhandle.get_cookies()\n s = requests.Session()\n for cookie in cookies:\n s.cookies.set(cookie['name'], cookie['value'])\n\n for index in range(1, int(total_page_count) + 1):\n time.sleep(random.randint(1, 10))\n print(\"*********************************Current page index is : {}*********************************\".format(\n index))\n complete_url = base_url + \"/thread0806.php?fid=\" + str(section) + \"&search=&page=\" + str(index)\n try:\n webhandle.get(complete_url)\n #r = s.get(complete_url)\n except Exception as ex:\n print(ex.msg)\n try:\n webhandle.get(complete_url)\n except:\n webhandle.close()\n webhandle = login_website(db)\n try:\n webhandle.get(complete_url)\n except Exception as e:\n print(e)\n print(\"Skip this loop!\")\n continue\n\n source = webhandle.page_source\n #source = r.content\n soup = bs.BeautifulSoup(source, \"lxml\")\n for sub_item in soup.findAll(\"h3\"):\n if len(sub_item.findAll('a')) == 0:\n TOTAL_ERROR_LIST.append(str(sub_item))\n continue\n link = sub_item.findAll('a')[0]\n title = link.text\n try:\n full_link = \"/\".join(login_url.split('/')[:-1]) + '/' + link[\"href\"]\n except:\n print(link)\n TOTAL_ERROR_LIST.append(str(sub_item))\n continue\n # print(\"-\" * 100)\n if full_link in TOTAL_DATA_DIC:\n print(\"{} has already exixts\".format(full_link))\n continue\n if \"htm_data\" not in full_link:\n print(\"Invalid link\")\n continue\n\n cursor = DBHelper.fetchData(db, \"SELECT * FROM {} WHERE Link='{}'\".format(table_name, full_link))\n if cursor.fetchone() != None:\n print(\"Record has already existed!\")\n continue\n else:\n print(\"Title : {}\".format(title))\n print(\"link : {}\".format(full_link))\n print(\"\\n\")\n \n InsertSQL = u\"INSERT INTO {} (Title, Link, Section, TorrentLink, DownloadingCount) VALUES (%s, %s, %s, %s, %s )\".format(table_name)\n InsertData = (title, full_link, section, \"\", 0)\n try:\n DBHelper.insert_table(db, InsertSQL, InsertData)\n except Exception as err:\n print(err)\n continue\n\n webhandle.close()\n\n\n\n","sub_path":"module/cl_bbs/extract_source.py","file_name":"extract_source.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"522633089","text":"# Copyright (C) 2014 New York University\n# This file is part of ReproZip which is released under the Revised BSD License\n# See file LICENSE for full license details.\n\n# This file is shared:\n# reprozip/reprozip/utils.py\n# reprounzip/reprounzip/utils.py\n\n\"\"\"Utility functions.\n\nThese functions are shared between reprozip and reprounzip but are not specific\nto this software (more utilities).\n\n\"\"\"\n\nfrom __future__ import division, print_function, unicode_literals\n\nimport codecs\nimport contextlib\nfrom datetime import datetime\nimport email.utils\nimport itertools\nimport locale\nimport logging\nimport operator\nimport os\nimport requests\nfrom rpaths import Path, PosixPath\nimport stat\nimport subprocess\nimport sys\nimport time\n\n\nlogger = logging.getLogger(__name__.split('.', 1)[0])\n\n\nclass StreamWriter(object):\n def __init__(self, stream):\n writer = codecs.getwriter(locale.getpreferredencoding())\n self._writer = writer(stream, 'replace')\n self.buffer = stream\n\n def writelines(self, lines):\n self.write(str('').join(lines))\n\n def write(self, obj):\n if isinstance(obj, bytes):\n self.buffer.write(obj)\n else:\n self._writer.write(obj)\n\n def __getattr__(self, name,\n getattr=getattr):\n\n \"\"\" Inherit all other methods from the underlying stream.\n \"\"\"\n return getattr(self._writer, name)\n\n\nPY3 = sys.version_info[0] == 3\n\n\nif PY3:\n izip = zip\n irange = range\n iteritems = lambda d: d.items()\n itervalues = lambda d: d.values()\n listvalues = lambda d: list(d.values())\n\n stdout_bytes = sys.stdout.buffer if sys.stdout is not None else None\n stderr_bytes = sys.stderr.buffer if sys.stderr is not None else None\n stdin_bytes = sys.stdin.buffer if sys.stdin is not None else None\n stdout, stderr = sys.stdout, sys.stderr\nelse:\n izip = itertools.izip\n irange = xrange # noqa: F821\n iteritems = lambda d: d.iteritems()\n itervalues = lambda d: d.itervalues()\n listvalues = lambda d: d.values()\n\n _writer = codecs.getwriter(locale.getpreferredencoding())\n stdout_bytes, stderr_bytes = sys.stdout, sys.stderr\n stdin_bytes = sys.stdin\n stdout, stderr = StreamWriter(sys.stdout), StreamWriter(sys.stderr)\n\n\nif PY3:\n int_types = int,\n unicode_ = str\nelse:\n int_types = int, long # noqa: F821\n unicode_ = unicode # noqa: F821\n\n\ndef flatten(n, iterable):\n \"\"\"Flattens an iterable by repeatedly calling chain.from_iterable() on it.\n\n >>> a = [[1, 2, 3], [4, 5, 6]]\n >>> b = [[7, 8], [9, 10, 11, 12, 13, 14, 15, 16]]\n >>> l = [a, b]\n >>> list(flatten(0, a))\n [[1, 2, 3], [4, 5, 6]]\n >>> list(flatten(1, a))\n [1, 2, 3, 4, 5, 6]\n >>> list(flatten(1, l))\n [[1, 2, 3], [4, 5, 6], [7, 8], [9, 10, 11, 12, 13, 14, 15, 16]]\n >>> list(flatten(2, l))\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\n \"\"\"\n for _ in irange(n):\n iterable = itertools.chain.from_iterable(iterable)\n return iterable\n\n\nclass UniqueNames(object):\n \"\"\"Makes names unique amongst the ones it's already seen.\n \"\"\"\n def __init__(self):\n self.names = set()\n\n def insert(self, name):\n assert name not in self.names\n self.names.add(name)\n\n def __call__(self, name):\n nb = 1\n attempt = name\n while attempt in self.names:\n nb += 1\n attempt = '%s_%d' % (name, nb)\n self.names.add(attempt)\n return attempt\n\n\ndef escape(s):\n \"\"\"Escapes backslashes and double quotes in strings.\n\n This does NOT add quotes around the string.\n \"\"\"\n return s.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')\n\n\ndef optional_return_type(req_args, other_args):\n \"\"\"Sort of namedtuple but with name-only fields.\n\n When deconstructing a namedtuple, you have to get all the fields:\n\n >>> o = namedtuple('T', ['a', 'b', 'c'])(1, 2, 3)\n >>> a, b = o\n ValueError: too many values to unpack\n\n You thus cannot easily add new return values. This class allows it:\n\n >>> o2 = optional_return_type(['a', 'b'], ['c'])(1, 2, 3)\n >>> a, b = o2\n >>> c = o2.c\n \"\"\"\n if len(set(req_args) | set(other_args)) != len(req_args) + len(other_args):\n raise ValueError\n\n # Maps argument name to position in each list\n req_args_pos = dict((n, i) for i, n in enumerate(req_args))\n other_args_pos = dict((n, i) for i, n in enumerate(other_args))\n\n def cstr(cls, *args, **kwargs):\n if len(args) > len(req_args) + len(other_args):\n raise TypeError(\n \"Too many arguments (expected at least %d and no more than \"\n \"%d)\" % (len(req_args),\n len(req_args) + len(other_args)))\n\n args1, args2 = args[:len(req_args)], args[len(req_args):]\n req = dict((i, v) for i, v in enumerate(args1))\n other = dict(izip(other_args, args2))\n\n for k, v in iteritems(kwargs):\n if k in req_args_pos:\n pos = req_args_pos[k]\n if pos in req:\n raise TypeError(\"Multiple values for field %s\" % k)\n req[pos] = v\n elif k in other_args_pos:\n if k in other:\n raise TypeError(\"Multiple values for field %s\" % k)\n other[k] = v\n else:\n raise TypeError(\"Unknown field name %s\" % k)\n\n args = []\n for i, k in enumerate(req_args):\n if i not in req:\n raise TypeError(\"Missing value for field %s\" % k)\n args.append(req[i])\n\n inst = tuple.__new__(cls, args)\n inst.__dict__.update(other)\n return inst\n\n dct = {'__new__': cstr}\n for i, n in enumerate(req_args):\n dct[n] = property(operator.itemgetter(i))\n return type(str('OptionalReturnType'), (tuple,), dct)\n\n\ndef tz_offset():\n offset = time.timezone if time.localtime().tm_isdst == 0 else time.altzone\n return -offset\n\n\ndef isodatetime():\n offset = tz_offset()\n sign = '+'\n if offset < 0:\n sign = '-'\n offset = -offset\n if offset % 60 == 0:\n offset = '%02d:%02d' % (offset // 3600, (offset // 60) % 60)\n else:\n offset = '%02d:%02d:%02d' % (offset // 3600, (offset // 60) % 60,\n offset % 60)\n # Remove microsecond\n now = datetime.now()\n now = datetime(year=now.year, month=now.month, day=now.day,\n hour=now.hour, minute=now.minute, second=now.second)\n return '%s%s%s' % (now.isoformat(),\n sign,\n offset)\n\n\ndef hsize(nbytes):\n \"\"\"Readable size.\n \"\"\"\n if nbytes is None:\n return \"unknown\"\n\n KB = 1 << 10\n MB = 1 << 20\n GB = 1 << 30\n TB = 1 << 40\n PB = 1 << 50\n\n nbytes = float(nbytes)\n\n if nbytes < KB:\n return \"{0} bytes\".format(nbytes)\n elif nbytes < MB:\n return \"{0:.2f} KB\".format(nbytes / KB)\n elif nbytes < GB:\n return \"{0:.2f} MB\".format(nbytes / MB)\n elif nbytes < TB:\n return \"{0:.2f} GB\".format(nbytes / GB)\n elif nbytes < PB:\n return \"{0:.2f} TB\".format(nbytes / TB)\n else:\n return \"{0:.2f} PB\".format(nbytes / PB)\n\n\ndef normalize_path(path):\n \"\"\"Normalize a path obtained from the database.\n \"\"\"\n # For some reason, os.path.normpath() keeps multiple leading slashes\n # We don't want this since it has no meaning on Linux\n path = PosixPath(path)\n if path.path.startswith(path._sep + path._sep):\n path = PosixPath(path.path[1:])\n return path\n\n\ndef find_all_links_recursive(filename, files):\n path = Path('/')\n for c in filename.components[1:]:\n # At this point, path is a canonical path, and all links in it have\n # been resolved\n\n # We add the next path component\n path = path / c\n\n # That component is possibly a link\n if path.is_link():\n # Adds the link itself\n files.add(path)\n\n target = path.read_link(absolute=True)\n # Here, target might contain a number of symlinks\n if target not in files:\n # Recurse on this new path\n find_all_links_recursive(target, files)\n # Restores the invariant; realpath might resolve several links here\n path = path.resolve()\n return path\n\n\ndef find_all_links(filename, include_target=False):\n \"\"\"Dereferences symlinks from a path.\n\n If include_target is True, this also returns the real path of the final\n target.\n\n Example:\n /\n a -> b\n b\n g -> c\n c -> ../a/d\n d\n e -> /f\n f\n >>> find_all_links('/a/g/e', True)\n ['/a', '/b/c', '/b/g', '/b/d/e', '/f']\n \"\"\"\n files = set()\n filename = Path(filename)\n assert filename.absolute()\n path = find_all_links_recursive(filename, files)\n files = list(files)\n if include_target:\n files.append(path)\n return files\n\n\ndef join_root(root, path):\n \"\"\"Prepends `root` to the absolute path `path`.\n \"\"\"\n p_root, p_loc = path.split_root()\n assert p_root == b'/'\n return root / p_loc\n\n\n@contextlib.contextmanager\ndef make_dir_writable(directory):\n \"\"\"Context-manager that sets write permission on a directory.\n\n This assumes that the directory belongs to you. If the u+w permission\n wasn't set, it gets set in the context, and restored to what it was when\n leaving the context. u+x also gets set on all the directories leading to\n that path.\n \"\"\"\n uid = os.getuid()\n\n try:\n sb = directory.stat()\n except OSError:\n pass\n else:\n if sb.st_uid != uid or sb.st_mode & 0o700 == 0o700:\n yield\n return\n\n # These are the permissions to be restored, in reverse order\n restore_perms = []\n try:\n # Add u+x to all directories up to the target\n path = Path('/')\n for c in directory.components[1:-1]:\n path = path / c\n sb = path.stat()\n if sb.st_uid == uid and not sb.st_mode & 0o100:\n logger.debug(\"Temporarily setting u+x on %s\", path)\n restore_perms.append((path, sb.st_mode))\n path.chmod(sb.st_mode | 0o700)\n\n # Add u+wx to the target\n sb = directory.stat()\n if sb.st_uid == uid and sb.st_mode & 0o700 != 0o700:\n logger.debug(\"Temporarily setting u+wx on %s\", directory)\n restore_perms.append((directory, sb.st_mode))\n directory.chmod(sb.st_mode | 0o700)\n\n yield\n finally:\n for path, mod in reversed(restore_perms):\n path.chmod(mod)\n\n\ndef rmtree_fixed(path):\n \"\"\"Like :func:`shutil.rmtree` but doesn't choke on annoying permissions.\n\n If a directory with -w or -x is encountered, it gets fixed and deletion\n continues.\n \"\"\"\n if path.is_link():\n raise OSError(\"Cannot call rmtree on a symbolic link\")\n\n uid = os.getuid()\n st = path.lstat()\n\n if st.st_uid == uid and st.st_mode & 0o700 != 0o700:\n path.chmod(st.st_mode | 0o700)\n\n for entry in path.listdir():\n if stat.S_ISDIR(entry.lstat().st_mode):\n rmtree_fixed(entry)\n else:\n entry.remove()\n\n path.rmdir()\n\n\n# Compatibility with ReproZip <= 1.0.3\ncheck_output = subprocess.check_output\n\n\ndef copyfile(source, destination, CHUNK_SIZE=4096):\n \"\"\"Copies from one file object to another.\n \"\"\"\n while True:\n chunk = source.read(CHUNK_SIZE)\n if chunk:\n destination.write(chunk)\n if len(chunk) != CHUNK_SIZE:\n break\n\n\ndef download_file(url, dest, cachename=None, ssl_verify=None):\n \"\"\"Downloads a file using a local cache.\n\n If the file cannot be downloaded or if it wasn't modified, the cached\n version will be used instead.\n\n The cache lives in ``~/.cache/reprozip/``.\n \"\"\"\n if cachename is None:\n if dest is None:\n raise ValueError(\"One of 'dest' or 'cachename' must be specified\")\n cachename = dest.components[-1]\n\n headers = {}\n\n if 'XDG_CACHE_HOME' in os.environ:\n cache = Path(os.environ['XDG_CACHE_HOME'])\n else:\n cache = Path('~/.cache').expand_user()\n cache = cache / 'reprozip' / cachename\n if cache.exists():\n mtime = email.utils.formatdate(cache.mtime(), usegmt=True)\n headers['If-Modified-Since'] = mtime\n\n cache.parent.mkdir(parents=True)\n\n try:\n response = requests.get(url, headers=headers,\n timeout=2 if cache.exists() else 10,\n stream=True, verify=ssl_verify)\n response.raise_for_status()\n if response.status_code == 304:\n raise requests.HTTPError(\n '304 File is up to date, no data returned',\n response=response)\n except requests.RequestException as e:\n if cache.exists():\n if e.response and e.response.status_code == 304:\n logger.info(\"Download %s: cache is up to date\", cachename)\n else:\n logger.warning(\"Download %s: error downloading %s: %s\",\n cachename, url, e)\n if dest is not None:\n cache.copy(dest)\n return dest\n else:\n return cache\n else:\n raise\n\n logger.info(\"Download %s: downloading %s\", cachename, url)\n try:\n with cache.open('wb') as f:\n for chunk in response.iter_content(4096):\n f.write(chunk)\n response.close()\n except Exception as e: # pragma: no cover\n try:\n cache.remove()\n except OSError:\n pass\n raise e\n logger.info(\"Downloaded %s successfully\", cachename)\n\n if dest is not None:\n cache.copy(dest)\n return dest\n else:\n return cache\n","sub_path":"reprounzip/reprounzip/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"254700923","text":"#!usr/bin/env python\n\"\"\"\nFirst let's cut A into two parts at a random position i:\n\n left_A | right_A\nA[0], A[1], ..., A[i-1] | A[i], A[i+1], ..., A[m-1]\nSince A has m elements, so there are m+1 kinds of cutting( i = 0 ~ m ). And we know: len(left_A) = i, len(right_A) = m - i . Note: when i = 0 , left_A is empty, and when i = m , right_A is empty.\n\nWith the same way, cut B into two parts at a random position j:\n\n left_B | right_B\nB[0], B[1], ..., B[j-1] | B[j], B[j+1], ..., B[n-1]\nPut left_A and left_B into one set, and put right_A and right_B into another set. Let's name them left_part and right_part :\n\n left_part | right_part\nA[0], A[1], ..., A[i-1] | A[i], A[i+1], ..., A[m-1]\nB[0], B[1], ..., B[j-1] | B[j], B[j+1], ..., B[n-1]\nIf we can ensure:\n\n1) len(left_part) == len(right_part)\n2) max(left_part) <= min(right_part)\nthen we divide all elements in {A, B} into two parts with equal length, and one part is always greater than the other. Then median = (max(left_part) + min(right_part))/2.\n\nTo ensure these two conditions, we just need to ensure:\n\n(1) i + j == m - i + n - j (or: m - i + n - j + 1)\n if n >= m, we just need to set: i = 0 ~ m, j = (m + n + 1)/2 - i\n(2) B[j-1] <= A[i] and A[i-1] <= B[j]\nps.1 For simplicity, I presume A[i-1],B[j-1],A[i],B[j] are always valid even if i=0/i=m/j=0/j=n . I will talk about how to deal with these edge values at last.\n\nps.2 Why n >= m? Because I have to make sure j is non-nagative since 0 <= i <= m and j = (m + n + 1)/2 - i. If n < m , then j may be nagative, that will lead to wrong result.\n\nSo, all we need to do is:\n\nSearching i in [0, m], to find an object `i` that:\n B[j-1] <= A[i] and A[i-1] <= B[j], ( where j = (m + n + 1)/2 - i )\nAnd we can do a binary search following steps described below:\n\n<1> Set imin = 0, imax = m, then start searching in [imin, imax]\n\n<2> Set i = (imin + imax)/2, j = (m + n + 1)/2 - i\n\n<3> Now we have len(left_part)==len(right_part). And there are only 3 situations\n that we may encounter:\n B[j-1] <= A[i] and A[i-1] <= B[j]\n Means we have found the object `i`, so stop searching.\n B[j-1] > A[i]\n Means A[i] is too small. We must `ajust` i to get `B[j-1] <= A[i]`.\n Can we `increase` i?\n Yes. Because when i is increased, j will be decreased.\n So B[j-1] is decreased and A[i] is increased, and `B[j-1] <= A[i]` may\n be satisfied.\n Can we `decrease` i?\n `No!` Because when i is decreased, j will be increased.\n So B[j-1] is increased and A[i] is decreased, and B[j-1] <= A[i] will\n be never satisfied.\n So we must `increase` i. That is, we must ajust the searching range to\n [i+1, imax]. So, set imin = i+1, and goto <2>.\n A[i-1] > B[j]\n Means A[i-1] is too big. And we must `decrease` i to get `A[i-1]<=B[j]`.\n That is, we must ajust the searching range to [imin, i-1].\n So, set imax = i-1, and goto <2>.\nWhen the object i is found, the median is:\n\nmax(A[i-1], B[j-1]) (when m + n is odd)\nor (max(A[i-1], B[j-1]) + min(A[i], B[j]))/2 (when m + n is even)\n\n\"\"\"\ndef median(A,B):\n m,n=len(A),len(B)\n if m>n:\n A,B,m,n=B,A,n,m\n if n==0:\n raise ValueError\n imin, imax, half_len=0,m,(m+n+1)/2\n while imin<=imax:\n i = int((imin+imax)/2)\n j = int(half_len-i)\n if iA[i]:\n imin=i+1\n elif i>0 and A[i-1]>B[j]:\n imax=i-1\n else:\n #i is perfect\n if i==0:\n max_left=B[j-1]\n elif j==0:\n max_left=A[i-1]\n else:\n max_left=max(A[i-1],B[j-1])\n\n if (m+n)%2==1:\n return max_left\n if i==m:\n min_right=B[j]\n elif j==n:\n min_right=A[i]\n else:\n min_right=min(A[i],B[j])\n print(max_left,min_right)\n return (max_left+min_right)/2.0\nA=[1,3]\nB=[2]\nprint(median(A,B))\n\n \n","sub_path":"leetcode/4.MedianofTwoSortedArrays/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"89607227","text":"from flask import Flask, request\nfrom flask_restful import Resource, Api\nfrom threading import Thread\nimport DetectEmotionsOnVideo as Deov\n\napp = Flask(__name__)\napi = Api(app)\nfrom flask import jsonify\n\nthread = Thread(target=Deov.DetectEmotionsOnVideo.start_video)\nthread.start()\n\n\nclass RunVideo(Resource):\n @staticmethod\n def get():\n url = request.args.get('url')\n if url in Deov.pendingResults:\n result = Deov.pendingResults[url]\n if result.progress == 100:\n del Deov.pendingResults[url]\n res = \"{\\\"positionInQueue\\\":\" + str(result.positionInQueue) + \\\n \",\\\"progress\\\":\" + str(result.progress) + \\\n \",\\\"emotions\\\":\" + str(result.emotions) + \\\n \",\\\"error\\\":\" + str(result.error) + \"}\"\n return res\n\n result = Deov.Result(Deov.queue.qsize() + 1, 0, [])\n Deov.pendingResults[url] = result\n Deov.queue.put(url)\n res = \"{\\\"positionInQueue\\\":\" + str(result.positionInQueue) + \\\n \",\\\"progress\\\":\" + str(result.progress) + \\\n \",\\\"emotions\\\":\" + str(result.emotions) + \\\n \",\\\"error\\\":\" + str(result.error) + \"}\"\n return res\n\n\napi.add_resource(RunVideo, '/runVideo')\n\nif __name__ == '__main__':\n app.run(port='8080')\n","sub_path":"W2W_Py_Backend/RequestListener.py","file_name":"RequestListener.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"141317381","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom pathlib import Path\n\nfrom pandas import read_csv\n\n\ndef make_epsg_4326_latlon_dataset():\n logger = logging.getLogger(__name__)\n\n parties = ['PT', 'PSDB']\n min_latitude, max_latitude = -180, 180\n min_longitude, max_longitude = -90, 90\n\n interim_dir = Path(data_dir, 'interim').resolve()\n processed_dir = Path(data_dir, 'processed').resolve()\n latlon_dir = Path(interim_dir, 'latlon').resolve()\n\n epsg_4326_latlon_dir = Path(processed_dir, 'epsg_4326').resolve()\n epsg_4326_latlon_dir.mkdir(exist_ok=True)\n for party in parties:\n logger.info('starting to normalize {} series_latlon data'.format(party))\n\n file_path = Path(latlon_dir, party, 'latlon.csv').resolve()\n dataset = read_csv(file_path)\n\n dataset.latitude = (dataset.latitude -\n min_latitude) / (max_latitude - min_latitude)\n dataset.longitude = (dataset.longitude -\n min_longitude) / (max_longitude - min_longitude)\n\n party_dir = Path(epsg_4326_latlon_dir, party).resolve()\n party_dir.mkdir(exist_ok=True)\n file_path = Path(party_dir, 'latlon.csv').resolve()\n dataset.to_csv(file_path, index=False)\n\n logger.info('done normalizing {} series_latlon data'.format(party))\n\n\ndef main():\n \"\"\" Runs data processing scripts to turn brazil voting data from (../interim) into\n presidential data (saved in ../interim/presidential).\n \"\"\"\n logger = logging.getLogger(__name__)\n\n logger.info(\n 'normalizing series_latlon dataset with EPSG:4326... Saving at ../data/processed/latlon')\n make_epsg_4326_latlon_dataset()\n logger.info(\n 'done normalizing series_latlon dataset with EPSG:4326... Saved at ../data/processed/latlon')\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n project_dir = Path(__file__).resolve().parents[2]\n data_dir = Path(project_dir, 'data').resolve()\n\n main()\n","sub_path":"src/data/8_3_make_epsg_4326_latlon_dataset.py","file_name":"8_3_make_epsg_4326_latlon_dataset.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"341703305","text":"# -*- coding: utf-8 -*-\n\nfrom hamcrest import assert_that, has_key, equal_to_ignoring_whitespace\n\nfrom hermes.org import OrgApi\n\n\nclass TestOrg(object):\n\n def test_check_org_hierarchies_response_items(self, logged_client):\n _, result = OrgApi.hierarchies()\n assert_that(result[0], has_key('id'),\n 'Expect that result has _id_ key.')\n assert_that(result[0], has_key('children'),\n 'Expect that result has _children_ key.')\n assert_that(result[0], has_key('name'),\n 'Expect that result has _name_ key.')\n\n def test_check_org_hierarchies(self, logged_client, config):\n _, result = OrgApi.hierarchies()\n assert_that(result[0]['name'],\n equal_to_ignoring_whitespace(config['org2']['name']),\n 'Expect that the org name is \"%s\", but was \"%s\".' % (\n config['org2']['name'], result[0]['name']\n ))\n assert_that(result[0]['children'][0]['name'],\n equal_to_ignoring_whitespace(config['org4']['name']),\n 'Expected that org child name is \"%s\", but was \"%s\".' % (\n config['org4']['name'],\n result[0]['children'][0]['name']\n ))\n assert_that(result[1]['name'],\n equal_to_ignoring_whitespace(config['org3']['name']),\n 'Expect that the org name is \"%s\", but was \"%s\".' % (\n config['org3']['name'], result[1]['name']\n ))\n assert_that(result[1]['children'][0]['name'],\n equal_to_ignoring_whitespace(config['org5']['name']),\n 'Expected that org child name is \"%s\", but was \"%s\".' % (\n config['org5']['name'],\n result[1]['children'][0]['name']\n ))\n","sub_path":"hermes_api_tests/tests/orgs/test_org_hierarchies.py","file_name":"test_org_hierarchies.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"366908843","text":"import numpy as np\nimport itertools\n\n\n# Функционал, реализующий комбинаторные задачи\n\n# На вход:\n# n_elems - количество элементов\n# capacities - list с возможными емкостями контейнеров\ndef get_containers_by_elems(n_elems, capacities):\n capacities.sort()\n res_array = []\n\n for m in range(1, n_elems):\n current_min_sum = capacities[0] * m\n current_max_sum = capacities[-1] * m\n if current_min_sum > n_elems or current_max_sum < n_elems:\n continue\n\n res_array.append([p for p in itertools.combinations_with_replacement(capacities, r=m)])\n\n containers = []\n for res_series in res_array:\n for res in res_series:\n if sum(res) == n_elems:\n containers.append(res)\n\n # На выход - list с перечислением контейнеров, где значение - емкость контейнера\n return containers\n\n\n# if __name__ == '__main__':\n# containers_list_outer = [2, 4, 6, 7]\n# n = 20\n#\n# print(get_containers_by_elems(n, containers_list_outer))\n","sub_path":"ecm_linkers/combinatorics.py","file_name":"combinatorics.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"560519538","text":"import numpy as num\nimport csv\nfrom sklearn.svm import SVC\nimport matplotlib.pyplot as plt\n\n\nwith open(\"Grand-slams-men-2013.csv\") as f:\n csv_list = list(csv.reader(f))\n\nwnr1_list = num.array([])\nwnr2_list = num.array([])\nresult_list = num.array([])\nfor row in csv_list:\n if row != csv_list[0]:\n wnr1_list = num.append(wnr1_list, int(row[12]))\n wnr2_list = num.append(wnr2_list, int(row[30]))\n result_list = num.append(result_list, int(row[3]))\n\n\nX = num.vstack((wnr1_list, wnr2_list)).T\nY = result_list.T\n\nX_Train = X[0:200, :]\nX_Test = X[200:, :]\nY_Train = Y[0:200]\nY_Test = Y[200:]\n\nY_pred_1 = num.array([])\nY_pred_2 = num.array([])\nY_pred_3 = num.array([])\n\nreg1 = SVC(kernel = 'linear')\nreg2 = SVC(kernel = 'poly')\nreg3 = SVC(kernel = 'rbf')\n\nreg1.fit(X_Train,Y_Train)\nreg2.fit(X_Train, Y_Train)\nreg3.fit(X_Train, Y_Train)\n\n\nY_pred_1 = reg1.predict(X_Test)\nY_pred_2 = reg2.predict(X_Test)\nY_pred_3 = reg3.predict(X_Test)\n\n\nX0 = num.array([])\nX1 = num.array([])\nX0A = num.array([])\nX0B = num.array([])\nX1A = num.array([])\nX1B = num.array([])\nind = num.array([], dtype=\"int\")\nind2 = num.array([], dtype=\"int\")\nind3 = num.array([], dtype=\"int\")\n\n##numpy.delete(arr, obj, axis=None)[source]\n##Return a new array with sub-arrays along an axis deleted. For a one dimensional array, this returns those entries not returned by arr[obj].\n\nfor i in range(len(Y_pred_1)) :\n if Y_pred_1[i] == 0:\n ind = num.append(ind, i)\n\nX0 = X_Test[ind]\nX1 = num.delete(X_Test, ind, 0)\n\nfor i in range(len(Y_pred_2)) :\n if Y_pred_2[i] == 0 :\n ind2 = num.append(ind2, i)\n\nX0A = X_Test[ind2]\nX1A = num.delete(X_Test, ind2, 0)\n\nfor i in range(len(Y_pred_3)):\n if Y_pred_3[i] == 0:\n ind3 = num.append(ind3, i)\n\nX0B = X_Test[ind3]\nX1B = num.delete(X_Test, ind3, 0)\n\n\nplt. figure()\nplt.scatter(X0[:, 0],X0[:, 1], color =\"pink\")\nplt.scatter(X1[:, 0],X1[:, 1], color =\"brown\")\n\nplt.figure()\nplt.scatter(X0A[:, 0],X0A[:, 1], color= \"yellow\")\nplt.scatter(X1A[:, 0],X1A[:, 1], color =\"black\")\n\nplt.figure()\nplt.scatter(X0B[:, 0],X0B[:, 1], color=\"green\")\nplt.scatter(X1B[:, 0],X1B[:, 1], color=\"orange\")\nplt.show()\n\n\n","sub_path":"LAB 7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"334439583","text":"#!/usr/bin/env python\nimport optparse\nimport matplotlib.pyplot as plt\nimport pickle as pl\n\n\nif __name__ == '__main__':\n\n parser = optparse.OptionParser(usage='usage: %prog run event ', version='%prog 1.0')\n parser.add_option('-c', '--cmap' , type='string' , default='gray_r' , help='palette for 2D image')\n parser.add_option('-s', '--step' , type='string' , default='raw' , help='step of the reconstruction: raw,1st,2nd,all,sc')\n\n (options, args) = parser.parse_args()\n\n font = {'family': 'arial',\n 'color': 'black',\n 'weight': 'normal',\n 'size': 24,\n }\n\n suff = {'raw': 'oriIma',\n '1st': '1st_3D',\n '2nd': '2nd_3D',\n 'all': 'all_3D',\n 'sc' : 'sc_3D',\n }\n \n \n fig_handle = pl.load(open('pic_run0{run}_ev{ev}_{step}.pkl'.format(run=args[0],ev=args[1],step=suff[options.step]),'rb'))\n plt.set_cmap(options.cmap)\n if options.step=='raw':\n plt.title('Image after zero suppression', font, pad=40)\n plt.xlabel('x (pixels)', font, labelpad=20)\n plt.ylabel('y (pixels)', font, labelpad=20)\n plt.clim(vmin=-5,vmax=10)\n if int(args[0])==2317 and int(args[1])==8: ## example of split track\n plt.clim(vmin=0,vmax=25)\n csize = 160\n plt.xlim(4*240,4*(240+csize))\n plt.ylim(4*70,4*(70+csize))\n elif int(args[0])==2097 and int(args[1])==317: # ambe 60/40 (6 keV NR candidate)\n plt.clim(vmin=0,vmax=40)\n csize = 100\n plt.xlim(1200,1200+csize)\n plt.ylim(880,880+csize)\n elif int(args[0])==2097 and int(args[1])==59: # ambe 60/40 (6 keV NR candidate)\n plt.clim(vmin=0,vmax=40)\n csize = 100\n plt.xlim(660,660+csize)\n plt.ylim(1020,1020+csize)\n\n else:\n plt.title('Rebinned image', font, pad=40)\n plt.xlabel('x (macro-pixels)', font, labelpad=20)\n plt.ylabel('y (macro-pixels)', font, labelpad=20)\n if int(args[0])==1843: ## iron\n plt.clim(vmin=98,vmax=120)\n csize = 60\n plt.xlim(240,240+csize)\n plt.ylim(170,170+csize)\n elif int(args[0])==2317 and int(args[1])==8: ## example of split track\n plt.clim(vmin=100,vmax=110)\n csize = 160\n plt.xlim(240,240+csize)\n plt.ylim(70,70+csize)\n elif int(args[0])==2156: # cosmics\n plt.clim(vmin=98,vmax=110) \n \n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n\n #plt.show()\n\n for ext in ['pdf','png']:\n plt.savefig('pic_run0{run}_ev{ev}_{step}_paper.{ext}'.format(run=args[0],ev=args[1],step=suff[options.step],ext=ext))\n plt.gcf().clear()\n plt.close('all')\n","sub_path":"debug_code/plot_pickled_image.py","file_name":"plot_pickled_image.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"447303065","text":"def d(num):\n\tadd=0\n\tfor i in range(1,num):\n\t\tif not num%i:add+=i\n\treturn(add)\nsets = []\nfor a in range(1,10000):\n b = d(a)\n if (d(b) == a and a!=b):\n sets.append(a)\n sets.append(b)\n\nprint(sum(set(sets))) \n","sub_path":"PR_PY/pe21.py","file_name":"pe21.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"449502016","text":"from collections import defaultdict\nfrom ctree.tune import TuningDriver, Result\nimport itertools\nimport hpgmg\n\nimport numpy as np\n\n__author__ = 'nzhang-dev'\n\n\nclass SmoothTuningDriver(TuningDriver):\n def __init__(self, objective):\n super(SmoothTuningDriver, self).__init__()\n self.iterators = {}\n self.best_results = defaultdict(Result)\n self.last_configs = {}\n self.best_configs = defaultdict(tuple)\n self.exhausted = defaultdict(bool)\n self.objective = objective\n self.args = None\n self.subconfig = None\n\n def _get_configs(self):\n if not hpgmg.finite_volume.CONFIG.tune:\n while True:\n #print hpgmg.finite_volume.CONFIG.block_hierarchy\n yield hpgmg.finite_volume.CONFIG.block_hierarchy\n self.args, self.subconfig = yield () # will always try this\n while True:\n #print(self.args, self.subconfig, self.iterators.keys())\n shape = self.subconfig['level'].interior_space\n #print(shape)\n if shape not in self.iterators:\n #print(shape)\n logs = tuple(int(np.log2(i)) for i in shape)\n iteration_space = [\n (2**k for k in range(i+1)) for i in logs\n ]\n self.iterators[shape] = itertools.product(*iteration_space)\n #print(self.iterators[shape])\n try:\n result = next(self.iterators[shape])\n self.last_configs[shape] = result\n except StopIteration:\n result = self.best_configs[shape]\n self.exhausted[shape] = True\n response = yield result\n if response is not None:\n self.args, self.subconfig = response\n\n def is_exhausted(self):\n try:\n shape = self.subconfig['level'].interior_space\n return self.exhausted[shape]\n except TypeError:\n return False\n\n def report(self, *args, **kwargs):\n if self.subconfig is not None:\n shape = self.subconfig['level'].interior_space\n result = Result(*args, **kwargs)\n if self.objective.compare(result, self.best_results[shape]):\n self.best_results[shape] = result\n self.best_configs[shape] = self.last_configs[shape]\n print(self.best_configs[shape])","sub_path":"hpgmg/finite_volume/operators/tune/tuners.py","file_name":"tuners.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"133549242","text":"from baseAgent import HBDagent\nimport random\nimport minimax\n\nclass dubSeven(HBDagent):\n def __init__(self):\n self.isChecked = False\n self.player = None\n self.enemy = None\n\n def step(self, state):\n '''\n Input:\n state - It's a 2D array of the entire board at the current time\n\n This function should return :\n x - the x coordinate on the board\n y - the y coordinate on the board\n quadrant - the quadrant that the user wants to rotate \n This is divided into 4 parts \n 1 - board[:3, :3]\n 2 - board[:3, 3:]\n 3 - board[3:, :3]\n 4 - board[3:, 3:]\n direction - this is the direction in which the quadrant should rotate in\n 1 - anticlockwise\n -1 - clockwise\n '''\n\n if (not self.isChecked):\n for i in state:\n for j in i:\n if (j == 1):\n self.player = 2\n self.enemy = 1\n else:\n self.player = 1\n self.enemy = 2\n self.isChecked = True\n action = minimax.getBestAction(state.tolist(), 6, self.player, self.enemy)\n x, y = action.x_coordinate, action.y_coordinate\n if (action.square_index == 2):\n quadrant = 3\n elif (action.square_index == 3):\n quadrant = 2\n else:\n quadrant = action.square_index\n if (action.direction == 'L'):\n direction = 1\n else:\n direction = -1\n\n\n return [x, y, quadrant, direction]\n","sub_path":"dubSeven.py","file_name":"dubSeven.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"246198845","text":"import csv\n\ndef add_list_to_csv(list, filename):\n new_rows_list = []\n with open(filename, 'r') as in_csv:\n reader = csv.reader(in_csv)\n for row, new_col in zip(reader, list):\n row.append(new_col)\n new_rows_list.append(row)\n in_csv.close()\n\n # Do the writing\n with open(filename, 'w+', newline='') as out_csv:\n writer = csv.writer(out_csv)\n for row in new_rows_list:\n writer.writerow(row)\n out_csv.close()\n","sub_path":"add_column.py","file_name":"add_column.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"532290357","text":"from googleapiclient import discovery, errors\r\nimport google_auth_oauthlib.flow\r\nfrom bs4 import BeautifulSoup\r\nimport ast\r\nimport requests\r\nimport requests_html\r\nimport os\r\nimport re\r\nimport sys\r\nimport youtube_dl\r\nimport setting\r\n\r\n\r\n# Set DEVELOPER_KEY to the API key value from the APIs & auth > Registered apps\r\n# tab of\r\n# https://cloud.google.com/console\r\n# Please ensure that you have enabled the YouTube Data API for your project.\r\nYOUTUBE_API_SERVICE_NAME = \"youtube\"\r\nYOUTUBE_API_VERSION = \"v3\"\r\nYOUTUBE_URL = \"https://www.youtube.com\"\r\n\r\ndef auth():\r\n youtube = discovery.build(YOUTUBE_API_SERVICE_NAME,\r\n YOUTUBE_API_VERSION, developerKey=setting.DEVELOPER_KEY)\r\n return youtube\r\n\r\n\r\ndef getVideoData(youtube, options):\r\n\r\n # Call the search.list method to retrieve results matching the specified\r\n # query term.\r\n search_response = youtube.search().list(\r\n q=options.q,\r\n part=\"id,snippet\",\r\n maxResults=options.max_results,\r\n channelId=options.channel_id,\r\n order='date'\r\n ).execute()\r\n\r\n videos = []\r\n\r\n # Add each result to the appropriate list, and then display the lists of\r\n # matching videos, channels, and playlists.\r\n for search_result in search_response.get(\"items\", []):\r\n if search_result[\"id\"][\"kind\"] == \"youtube#video\":\r\n videos.append(\"%s (%s)\" % (search_result[\"snippet\"][\"title\"],\r\n search_result[\"id\"][\"videoId\"]))\r\n\r\n print(\"Videos:\\n\", \"\\n\".join(videos), \"\\n\")\r\n\r\n return videos\r\n\r\n\r\ndef getAudioFromVideo(video_id):\r\n\r\n url = YOUTUBE_URL + '/watch?v=' + video_id\r\n output_file = setting.TMP_FILE_DIR + \"audio\" + '.%(ext)s'\r\n\r\n ydl_opts = {\r\n 'outtmpl': output_file,\r\n #'format': 'bestaudio/best',\r\n 'postprocessors': [{\r\n 'key': 'FFmpegExtractAudio',\r\n 'preferredcodec': 'flac',\r\n }],\r\n }\r\n\r\n ydl = youtube_dl.YoutubeDL(ydl_opts)\r\n info_dict = ydl.extract_info(url, download=True)\r\n\r\n return output_file\r\n\r\n\r\ndef getCommentData(video_id):\r\n # Set up variables for requests.\r\n target_url = YOUTUBE_URL + \"/watch?v=\" + video_id\r\n dict_str = ''\r\n next_url = ''\r\n comment_data = []\r\n session = requests_html.HTMLSession()\r\n headers = {\r\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'}\r\n\r\n # Get the video page.\r\n resp = session.get(target_url)\r\n resp.html.render(sleep=3)\r\n\r\n # Retrieve the title and sanitize so it is a valid filename.\r\n title = resp.html.find('title')\r\n title = title[0].text.replace(' - YouTube', '')\r\n title = get_valid_filename(title)\r\n\r\n print(title)\r\n\r\n # Regex match for emoji.\r\n RE_EMOJI = re.compile('[\\U00010000-\\U0010ffff]', flags=re.UNICODE)\r\n\r\n # Find any live_chat_replay elements, get URL for next live chat message.\r\n for iframe in resp.html.find(\"iframe\"):\r\n if \"live_chat_replay\" in iframe.attrs[\"src\"]:\r\n next_url = \"\".join([YOUTUBE_URL, iframe.attrs[\"src\"]])\r\n\r\n if not next_url:\r\n print(\"Couldn't find live_chat_replay iframe. Maybe try running again?\")\r\n sys.exit(1)\r\n\r\n # TODO - We should fail fast if next_url is empty, otherwise you get error:\r\n # Invalid URL '': No schema supplied. Perhaps you meant http://?\r\n\r\n # TODO - This loop is fragile. It loops endlessly when some exceptions are hit.\r\n while(1):\r\n\r\n try:\r\n html = session.get(next_url, headers=headers)\r\n soup = BeautifulSoup(html.text, 'lxml')\r\n\r\n # Loop through all script tags.\r\n for script in soup.find_all('script'):\r\n script_text = str(script)\r\n if 'ytInitialData' in script_text:\r\n dict_str = ''.join(script_text.split(\" = \")[1:])\r\n\r\n # Capitalize booleans so JSON is valid Python dict.\r\n dict_str = dict_str.replace(\"false\", \"False\")\r\n dict_str = dict_str.replace(\"true\", \"True\")\r\n\r\n # Strip extra HTML from JSON.\r\n dict_str = re.sub(r'};.*\\n?.*<\\/script>', '}', dict_str)\r\n\r\n # Correct some characters.\r\n dict_str = dict_str.rstrip(\" \\n;\")\r\n\r\n # TODO: I don't seem to have any issues with emoji in the messages.\r\n dict_str = RE_EMOJI.sub(r'', dict_str)\r\n\r\n # Evaluate the cleaned up JSON into a python dict.\r\n dics = ast.literal_eval(dict_str)\r\n\r\n # TODO: On the last pass this returns KeyError since there are no more\r\n # continuations or actions. Should probably just break in that case.\r\n continue_url = dics[\"continuationContents\"][\"liveChatContinuation\"][\r\n \"continuations\"][0][\"liveChatReplayContinuationData\"][\"continuation\"]\r\n print('Found another live chat continuation:')\r\n print(continue_url)\r\n next_url = \"https://www.youtube.com/live_chat_replay?continuation=\" + continue_url\r\n\r\n # Extract the data for each live chat comment.\r\n for samp in dics[\"continuationContents\"][\"liveChatContinuation\"][\"actions\"]:\r\n\r\n # 全コメは重いのでスパチャだけ抽出する\r\n if \"replayChatItemAction\" in samp and 'actions' in samp[\"replayChatItemAction\"] and 'addChatItemAction' in samp[\"replayChatItemAction\"][\"actions\"][0]:\r\n if 'liveChatPaidMessageRenderer' in samp[\"replayChatItemAction\"][\"actions\"][0]['addChatItemAction']['item']:\r\n comment_data.append(str(samp))\r\n\r\n except requests.ConnectionError:\r\n print(\"Connection Error\")\r\n continue\r\n except requests.HTTPError:\r\n print(\"HTTPError\")\r\n break\r\n except requests.Timeout:\r\n print(\"Timeout\")\r\n continue\r\n except requests.exceptions.RequestException as e:\r\n print(e)\r\n break\r\n except KeyError as e:\r\n error = str(e)\r\n if 'liveChatReplayContinuationData' in error:\r\n print('Hit last live chat segment, finishing job.')\r\n else:\r\n print(\"KeyError\")\r\n print(e)\r\n break\r\n except SyntaxError as e:\r\n print(\"SyntaxError\")\r\n print(e)\r\n break\r\n # continue #TODO\r\n except KeyboardInterrupt:\r\n break\r\n except Exception:\r\n print(\"Unexpected error:\" + str(sys.exc_info()[0]))\r\n\r\n return comment_data\r\n","sub_path":"YoutubeManager.py","file_name":"YoutubeManager.py","file_ext":"py","file_size_in_byte":6628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"333553882","text":"\n# \n# Copyright 2016 RIFT.IO Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport gi\nimport logging\n\nfrom . import core\n\nimport xml.etree.ElementTree as etree\nfrom gi.repository import RwTopologyYang as RwTl\n\ngi.require_version('RwYang', '1.0')\nfrom gi.repository import RwYang\n\ngi.require_version('RwKeyspec', '1.0')\nfrom gi.repository.RwKeyspec import quoted_key\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SdnSim(core.Topology):\n def __init__(self):\n super(SdnSim, self).__init__()\n self._model = RwYang.Model.create_libyang()\n self._model.load_schema_ypbc(RwTl.get_schema())\n\n def get_network_list(self, account):\n \"\"\"\n Returns the discovered network\n\n @param account - a SDN account\n\n \"\"\"\n\n nwtop = RwTl.YangData_IetfNetwork()\n #topology_source = \"/net/boson/home1/rchamart/work/topology/l2_top.xml\"\n if not account.sdnsim.has_field('topology_source') or account.sdnsim.topology_source is None:\n return nwtop\n topology_source = account.sdnsim.topology_source\n logger.info(\"Reading topology file: %s\", topology_source)\n if 'json' in topology_source: \n with open(topology_source,'r') as f:\n print(\"Reading static topology file\")\n op_json = f.read()\n nwtop.from_json(self._model,op_json)\n for nw in nwtop.network:\n nw.server_provided = False\n logger.debug(\"...Network id %s\", nw.network_id)\n #nw_xpath = (\"D,/nd:network[network-id={}]\").format(quoted_key(nw.network_id))\n #xact_info.respond_xpath(rwdts.XactRspCode.MORE,\n # nw_xpath, nw)\n elif 'xml' in topology_source:\n tree = etree.parse(topology_source)\n root = tree.getroot()\n xmlstr = etree.tostring(root, encoding=\"unicode\")\n\n # The top level topology object does not have XML conversion\n # Hence going one level down\n #l2nw1 = nwtop.network.add()\n #l2nw1.from_xml_v2(self._model, xmlstr)\n nwtop.from_xml_v2(self._model,xmlstr)\n\n logger.debug(\"Returning topology data imported from XML file\")\n\n return nwtop\n","sub_path":"osm/SO/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py","file_name":"sdnsim.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"316471224","text":"def solveCircuit(data):##THIS IS THE MEAT####### \r\n def getGroundCount(data):\r\n #Check to see for one ground - determine ground starting point\r\n for row in range(len(data.elementGrid)):\r\n for col in range(len(data.elementGrid[0])):\r\n if type(data.elementGrid[row][col]) == Ground: \r\n data.GroundCount +=1\r\n (data.startRow, data.startCol) = (row, col)\r\n def AllNonZero(data):\r\n nonZeroCount = 1\r\n for row in range(len(data.elementGrid)):\r\n for col in range(len(data.elementGrid[0])):\r\n if(data.elementGrid[row][col] != 0): nonZeroCount += 1\r\n return nonZeroCount\r\n def isLegalCircuitSeries(data):\r\n #Next, check for a complete connection\r\n (NORTH, EAST, SOUTH, WEST) = ((-1,0), (0,1), (1,0), (0,-1))\r\n nonZeroCount = AllNonZero(data)\r\n copyboolean = copy.deepcopy(data.booleanGrid)\r\n #we start at a ground\r\n for element in range(nonZeroCount-1):\r\n conditionalPenetration = 0\r\n for (drow, dcol) in (NORTH, WEST, SOUTH, EAST):\r\n #makes sure we don't go back in line\r\n if(data.elementGrid[data.startRow+drow][data.startCol+dcol] != 0 and \r\n copyboolean[data.startRow+drow][data.startCol+dcol]):\r\n conditionalPenetration += 1\r\n copyboolean[data.startRow+drow][data.startCol+dcol] = False\r\n data.startRow += drow\r\n data.startCol += dcol\r\n break\r\n #Check to see if last element is ground\r\n if(element == nonZeroCount-2): \r\n if (type(data.elementGrid[data.startRow][data.startCol]) != Ground): return False\r\n #check to see if every element aside from this is connected to another element\r\n elif(conditionalPenetration != 1): return False\r\n return True\r\n\r\n #ONLY FOR SERIES CIRCUITS!!\r\n def determineVoltageandCurrentSeries(data):\r\n if(isLegalCircuitSeries(data) and len(data.acVoltage) == 0 and len(data.acCurrent) == 0):\r\n data.isLegalCircuit = True\r\n if(len(data.dcVoltage) > 0 and len(data.dcCurrent) == 0):\r\n totalVoltageSupply = 0\r\n for dcVoltage in data.dcVoltage:\r\n totalVoltageSupply += dcVoltage.voltage\r\n #All resistors are here\r\n if(len(data.resistors) > 0 and len(data.capacitors) == 0 and len(data.inductors) == 0):\r\n totalResistance = 0\r\n for resistor in data.resistors:\r\n totalResistance += resistor.resistance\r\n totalCurrent = totalVoltageSupply/totalResistance #Ohm's Law\r\n for resistor in data.resistors:\r\n (resistor.i0, resistor.i8) = (0.01*int(100*totalCurrent), 0.01*int(100*totalCurrent))\r\n (resistor.v0, resistor.v8) = (0.01*int(100*totalCurrent*resistor.resistance),\r\n 0.01*int(100*totalCurrent*resistor.resistance))\r\n for dcVoltage in data.dcVoltage:\r\n (dcVoltage.i0, dcVoltage.i8) = (totalCurrent, totalCurrent)\r\n #All capacitors and resistors\r\n elif(len(data.resistors) > 0 and len(data.capacitors) > 0 and len(data.inductors)==0):\r\n totalResistance = 0\r\n (totalCapacitanceNumerator, totalCapacitanceDenomator) = (1, 0)\r\n for resistor in data.resistors:\r\n totalResistance += resistor.resistance\r\n totalCurrent = totalVoltageSupply/totalResistance #Ohm's Law \r\n for capacitor in data.capacitors:\r\n if(len(data.capacitors) == 1): totalCapacitance = capacitor.capacitance\r\n else:\r\n totalCapacitanceNumerator *= capacitor.capacitance\r\n totalCapacitanceDenomator += capacitor.capacitance\r\n totalCapacitance = totalCapacitanceNumerator/totalCapacitanceDenomator\r\n totalCharge = totalVoltageSupply*totalCapacitance\r\n for resistor in data.resistors:\r\n (resistor.i0, resistor.i8) = (0.01*int(100*totalCurrent), 0.00)\r\n (resistor.v0, resistor.v8) = (0.01*int(100*totalCurrent*resistor.resistance), 0.00)\r\n for capacitor in data.capacitors:\r\n (capacitor.i0, capacitor.i8) = (0.01*int(100*totalCurrent), 0.00)\r\n (capacitor.v0, capacitor.v8) = (0.00, 0.01*int(100*totalCharge/capacitor.capacitance))\r\n for dcVoltage in data.dcVoltage:\r\n (dcVoltage.i0, dcVoltage.i8) = (totalCurrent, 0.00)\r\n #All resistors and inductors\r\n elif(len(data.resistors) > 0 and len(data.capacitors) == 0 and len(data.inductors) > 0):\r\n totalResistance = 0\r\n totalInductance = 0\r\n for resistor in data.resistors:\r\n totalResistance += resistor.resistance\r\n totalCurrent = totalVoltageSupply/totalResistance #Ohm's Law\r\n for inductor in data.inductors:\r\n totalInductance += inductor.inductance\r\n for resistor in data.resistors:\r\n (resistor.i0, resistor.i8) = (0.00, 0.01*int(100*totalCurrent))\r\n (resistor.v0, resistor.v8) = (0.00, 0.01*int(100*totalCurrent*resistor.resistance))\r\n for inductor in data.inductors:\r\n (inductor.i0, inductor.i8) = (0.00, 0.01*int(100*totalCurrent))\r\n (inductor.v0, inductor.v8) = (0.01*int(100*totalVoltageSupply*inductor.inductance/totalInductance), 0.00)\r\n for dcVoltage in data.dcVoltage:\r\n (dcVoltage.i0, dcVoltage.i8) = (0.00, totalCurrent)\r\n elif(len(data.dcCurrent) > 0 and len(data.dcVoltage) == 0): \r\n totalCurrent = 0\r\n for dcCurrent in data.dcCurrent:\r\n totalCurrent += dcCurrent.current\r\n #All resistors are here\r\n if(len(data.resistors) > 0 and len(data.capacitors) == 0 and len(data.inductors) == 0):\r\n totalResistance = 0\r\n for resistor in data.resistors:\r\n totalResistance += resistor.resistance\r\n totalVoltage = totalCurrent*totalResistance #Ohm's Law\r\n for resistor in data.resistors:\r\n (resistor.i0, resistor.i8) = (0.01*int(100*totalCurrent), 0.01*int(100*totalCurrent))\r\n (resistor.v0, resistor.v8) = (0.01*int(100*totalCurrent*resistor.resistance),\r\n 0.01*int(100*totalCurrent*resistor.resistance))\r\n for dcCurrent in data.dcCurrent:\r\n (dcCurrent.v0, dcCurrent.v8) = (-totalResistance*totalCurrent, -totalResistance*totalCurrent)\r\n else: data.isLegalCircuit = False\r\n\r\n getGroundCount(data)\r\n if(data.GroundCount == 2): determineVoltageandCurrentSeries(data)\r\n","sub_path":"key-function.py","file_name":"key-function.py","file_ext":"py","file_size_in_byte":7411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"53583226","text":"# -*- coding: utf-8 -*-\n\"\"\"\nHealth checks\n=============\n\nFunctions performing basic health checks on xarray.DataArrays.\n\"\"\"\nimport datetime as dt\nimport fnmatch\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nfrom boltons.funcutils import wraps\n\nfrom .options import cfcheck\nfrom .options import CHECK_MISSING\nfrom .options import datacheck\nfrom .options import MISSING_METHODS\nfrom .options import MISSING_OPTIONS\nfrom .options import OPTIONS\nfrom .options import register_missing_method\nfrom .utils import ValidationError\n\n# Dev notes\n# ---------\n#\n# I suggest we use `check` for weak checking, and `assert` for strong checking.\n# Weak checking would log problems in a log, while strong checking would raise an error.\n#\n# ANSWER to the above:\n# The use of set_options with a switch for raising vs warning vs logging would render these terms useless.\n#\n# `functools.wraps` is used to copy the docstring and the function's original name from the source\n# function to the decorated function. This allows sphinx to correctly find and document functions.\n\n\n# TODO: Implement pandas infer_freq in xarray with CFTimeIndex. >> PR pydata/xarray#4033\n@cfcheck\ndef check_valid(var, key, expected):\n r\"\"\"Check that a variable's attribute has the expected value. Warn user otherwise.\"\"\"\n\n att = getattr(var, key, None)\n if att is None:\n raise ValidationError(f\"Variable does not have a `{key}` attribute.\")\n if not fnmatch.fnmatch(att, expected):\n raise ValidationError(\n f\"Variable has a non-conforming {key}. Got `{att}`, expected `{expected}`\",\n )\n\n\n@datacheck\ndef check_daily(var):\n r\"\"\"Assert that the series is daily and monotonic (no jumps in time index).\n\n A ValueError is raised otherwise.\"\"\"\n\n t0, t1 = var.time[:2]\n\n # This won't work for non-standard calendars. Needs to be implemented in xarray. Comment for now\n if isinstance(t0.values, np.datetime64):\n if pd.infer_freq(var.time.to_pandas()) != \"D\":\n raise ValidationError(\"time series is not recognized as daily.\")\n\n # Check that the first time step is one day.\n if np.timedelta64(dt.timedelta(days=1)) != (t1 - t0).data:\n raise ValidationError(\"time series is not daily.\")\n\n # Check that the series does not go backward in time\n if not var.time.to_pandas().is_monotonic_increasing:\n raise ValidationError(\"time index is not monotonically increasing.\")\n\n\ndef check_valid_temperature(var, units):\n r\"\"\"Check that variable is air temperature.\"\"\"\n\n check_valid(var, \"standard_name\", \"air_temperature\")\n check_valid(var, \"units\", units)\n check_daily(var)\n\n\ndef check_valid_discharge(var):\n r\"\"\"Check that the variable is a discharge.\"\"\"\n #\n check_valid(var, \"standard_name\", \"water_volume_transport_in_river_channel\")\n check_valid(var, \"units\", \"m3 s-1\")\n\n\ndef valid_daily_min_temperature(comp, units=\"K\"):\n r\"\"\"Decorator to check that a computation runs on a valid temperature dataset.\"\"\"\n\n @wraps(comp)\n def func(tasmin, *args, **kwds):\n check_valid_temperature(tasmin, units)\n check_valid(tasmin, \"cell_methods\", \"time: minimum within days\")\n return comp(tasmin, **kwds)\n\n return func\n\n\ndef valid_daily_mean_temperature(comp, units=\"K\"):\n r\"\"\"Decorator to check that a computation runs on a valid temperature dataset.\"\"\"\n\n @wraps(comp)\n def func(tas, *args, **kwds):\n check_valid_temperature(tas, units)\n check_valid(tas, \"cell_methods\", \"time: mean within days\")\n return comp(tas, *args, **kwds)\n\n return func\n\n\ndef valid_daily_max_temperature(comp, units=\"K\"):\n r\"\"\"Decorator to check that a computation runs on a valid temperature dataset.\"\"\"\n\n @wraps(comp)\n def func(tasmax, *args, **kwds):\n check_valid_temperature(tasmax, units)\n check_valid(tasmax, \"cell_methods\", \"time: maximum within days\")\n return comp(tasmax, *args, **kwds)\n\n return func\n\n\ndef valid_daily_max_min_temperature(comp, units=\"K\"):\n r\"\"\"Decorator to check that a computation runs on valid min and max temperature datasets.\"\"\"\n\n @wraps(comp)\n def func(tasmax, tasmin, **kwds):\n valid_daily_max_temperature(tasmax, units)\n valid_daily_min_temperature(tasmin, units)\n\n return comp(tasmax, tasmin, **kwds)\n\n return func\n\n\ndef valid_daily_mean_discharge(comp):\n r\"\"\"Decorator to check that a computation runs on valid discharge data.\"\"\"\n\n @wraps(comp)\n def func(q, **kwds):\n check_valid_discharge(q)\n return comp(q, **kwds)\n\n return func\n\n\ndef valid_missing_data_threshold(comp, threshold=0):\n r\"\"\"Check that the relative number of missing data points does not exceed a threshold.\"\"\"\n # TODO\n raise NotImplementedError\n\n\ndef check_is_dataarray(comp):\n r\"\"\"Decorator to check that a computation has an instance of xarray.DataArray\n as first argument.\"\"\"\n\n @wraps(comp)\n def func(data_array, *args, **kwds):\n assert isinstance(data_array, xr.DataArray)\n return comp(data_array, *args, **kwds)\n\n return func\n\n\n# This function can probably be made simpler once CFPeriodIndex is implemented.\nclass MissingBase:\n def __init__(self, da, freq, **indexer):\n self.null, self.count = self.prepare(da, freq, **indexer)\n\n @staticmethod\n def split_freq(freq):\n if freq is None:\n return \"\", None\n\n if \"-\" in freq:\n return freq.split(\"-\")\n\n return freq, None\n\n @staticmethod\n def is_null(da, freq, **indexer):\n \"\"\"Return a boolean array indicating which values are null.\"\"\"\n from xclim.indices import generic\n\n selected = generic.select_time(da, **indexer)\n if selected.time.size == 0:\n raise ValueError(\"No data for selected period.\")\n\n null = selected.isnull()\n if freq:\n return null.resample(time=freq)\n\n return null\n\n def prepare(self, da, freq, **indexer):\n \"\"\"Prepare arrays to be fed to the `is_missing` function.\n\n Parameters\n ----------\n da : xr.DataArray\n Input data.\n freq : str\n Resampling frequency defining the periods defined in\n http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling.\n **indexer : {dim: indexer, }, optional\n Time attribute and values over which to subset the array. For example, use season='DJF' to select winter\n values, month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given,\n all values are considered.\n\n Returns\n -------\n xr.DataArray, xr.DataArray\n Boolean array indicating which values are null, array of expected number of valid values.\n\n Notes\n -----\n If `freq=None` and an indexer is given, then missing values during period at the start or end of array won't be\n flagged.\n \"\"\"\n from xclim.indices import generic\n\n null = self.is_null(da, freq, **indexer)\n\n pfreq, anchor = self.split_freq(freq)\n\n c = null.sum(dim=\"time\")\n\n # Otherwise simply use the start and end dates to find the expected number of days.\n if pfreq.endswith(\"S\"):\n start_time = c.indexes[\"time\"]\n end_time = start_time.shift(1, freq=freq)\n elif pfreq:\n end_time = c.indexes[\"time\"]\n start_time = end_time.shift(-1, freq=freq)\n else:\n i = da.time.to_index()\n start_time = i[:1]\n end_time = i[-1:]\n\n if indexer:\n # Create a full synthetic time series and compare the number of days with the original series.\n t0 = str(start_time[0].date())\n t1 = str(end_time[-1].date())\n if isinstance(da.indexes[\"time\"], xr.CFTimeIndex):\n cal = da.time.encoding.get(\"calendar\")\n t = xr.cftime_range(t0, t1, freq=\"D\", calendar=cal)\n else:\n t = pd.date_range(t0, t1, freq=\"D\")\n\n sda = xr.DataArray(data=np.ones(len(t)), coords={\"time\": t}, dims=(\"time\",))\n st = generic.select_time(sda, **indexer)\n if freq:\n count = st.notnull().resample(time=freq).sum(dim=\"time\")\n else:\n count = st.notnull().sum(dim=\"time\")\n\n else:\n n = (end_time - start_time).days\n if freq:\n count = xr.DataArray(n.values, coords={\"time\": c.time}, dims=\"time\")\n else:\n count = xr.DataArray(n.values[0] + 1)\n\n return null, count\n\n def is_missing(self, null, count, **kwargs):\n \"\"\"Return whether or not the values within each period should be considered missing or not.\"\"\"\n raise NotImplementedError\n\n @staticmethod\n def validate(**kwargs):\n \"\"\"Return whether or not arguments are valid.\"\"\"\n return True\n\n def __call__(self, **kwargs):\n if not self.validate(**kwargs):\n raise ValueError(\"Invalid arguments\")\n return self.is_missing(self.null, self.count, **kwargs)\n\n\n@register_missing_method(\"any\")\nclass MissingAny(MissingBase):\n def is_missing(self, null, count, **kwargs):\n cond0 = null.count(dim=\"time\") != count # Check total number of days\n cond1 = null.sum(dim=\"time\") > 0 # Check if any is missing\n return cond0 | cond1\n\n\n@register_missing_method(\"wmo\")\nclass MissingWMO(MissingAny):\n def __init__(self, da, freq, **indexer):\n # Force computation on monthly frequency\n if not freq.startswith(\"M\"):\n raise ValueError\n super().__init__(da, freq, **indexer)\n\n def is_missing(self, null, count, nm=11, nc=5):\n import xclim.indices.run_length as rl\n\n # Check total number of days\n cond0 = null.count(dim=\"time\") != count\n\n # Check if more than threshold is missing\n cond1 = null.sum(dim=\"time\") >= nm\n\n # Check for consecutive missing values\n cond2 = null.map(rl.longest_run, dim=\"time\") >= nc\n\n return cond0 | cond1 | cond2\n\n @staticmethod\n def validate(nm, nc):\n return nm < 31 and nc < 31\n\n\n@register_missing_method(\"pct\")\nclass MissingPct(MissingBase):\n def is_missing(self, null, count, tolerance=0.1):\n if tolerance < 0 or tolerance > 1:\n raise ValueError(\"tolerance should be between 0 and 1.\")\n\n n = count - null.count(dim=\"time\") + null.sum(dim=\"time\")\n return n / count >= tolerance\n\n @staticmethod\n def validate(tolerance):\n return 0 <= tolerance <= 1\n\n\n@register_missing_method(\"at_least_n\")\nclass AtLeastNValid(MissingBase):\n def is_missing(self, null, count, n=20):\n \"\"\"The result of a reduction operation is considered missing if less than `n` values are valid.\"\"\"\n nvalid = null.count(dim=\"time\") - null.sum(dim=\"time\")\n return nvalid < n\n\n @staticmethod\n def validate(n):\n return n > 0\n\n\ndef missing_any(da, freq, **indexer):\n r\"\"\"Return whether there are missing days in the array.\n\n Parameters\n ----------\n da : DataArray\n Input array at daily frequency.\n freq : str\n Resampling frequency.\n **indexer : {dim: indexer, }, optional\n Time attribute and values over which to subset the array. For example, use season='DJF' to select winter values,\n month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given, all values are\n considered.\n\n Returns\n -------\n out : DataArray\n A boolean array set to True if period has missing values.\n \"\"\"\n return MissingAny(da, freq, **indexer)()\n\n\ndef missing_wmo(da, freq, nm=11, nc=5, **indexer):\n r\"\"\"Return whether a series fails WMO criteria for missing days.\n\n The World Meteorological Organisation recommends that where monthly means are computed from daily values,\n it should considered missing if either of these two criteria are met:\n\n – observations are missing for 11 or more days during the month;\n – observations are missing for a period of 5 or more consecutive days during the month.\n\n Stricter criteria are sometimes used in practice, with a tolerance of 5 missing values or 3 consecutive missing\n values.\n\n Parameters\n ----------\n da : DataArray\n Input array at daily frequency.\n freq : str\n Resampling frequency.\n nm : int\n Number of missing values per month that should not be exceeded.\n nc : int\n Number of consecutive missing values per month that should not be exceeded.\n **indexer : {dim: indexer, }, optional\n Time attribute and values over which to subset the array. For example, use season='DJF' to select winter values,\n month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given, all values are\n considered.\n\n Returns\n -------\n out : DataArray\n A boolean array set to True if period has missing values.\n \"\"\"\n missing = MissingWMO(da, \"M\", **indexer)(nm=nm, nc=nc)\n return missing.resample(time=freq).any()\n\n\ndef missing_pct(da, freq, tolerance, **indexer):\n r\"\"\"Return whether there are more missing days in the array than a given percentage.\n\n Parameters\n ----------\n da : DataArray\n Input array at daily frequency.\n freq : str\n Resampling frequency.\n tolerance : float\n Fraction of missing values that is tolerated.\n **indexer : {dim: indexer, }, optional\n Time attribute and values over which to subset the array. For example, use season='DJF' to select winter\n values,\n month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given, all values are\n considered.\n\n Returns\n -------\n out : DataArray\n A boolean array set to True if period has missing values.\n \"\"\"\n return MissingPct(da, freq, **indexer)(tolerance=tolerance)\n\n\ndef at_least_n_valid(da, freq, n=1, **indexer):\n r\"\"\"Return whether there are at least a given number of valid values.\n\n Parameters\n ----------\n da : DataArray\n Input array at daily frequency.\n freq : str\n Resampling frequency.\n n : int\n Minimum of valid values required.\n **indexer : {dim: indexer, }, optional\n Time attribute and values over which to subset the array. For example, use season='DJF' to select winter\n values, month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given,\n all values are considered.\n\n Returns\n -------\n out : DataArray\n A boolean array set to True if period has missing values.\n \"\"\"\n return AtLeastNValid(da, freq, **indexer)(n=n)\n\n\ndef missing_from_context(da, freq, **indexer):\n \"\"\"Return whether each element of the resampled da should be considered missing according\n to the currently set options in `xclim.set_options`.\n\n See `xclim.set_options` and `xclim.core.options.register_missing_method`.\n \"\"\"\n name = OPTIONS[CHECK_MISSING]\n cls = MISSING_METHODS[name]\n opts = OPTIONS[MISSING_OPTIONS][name]\n\n return cls(da, freq, **indexer)(**opts)\n","sub_path":"xclim/core/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":15168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"85583414","text":"from __future__ import division\n\nfrom slithy.presentation import *\nfrom slithy.library import *\n\nfrom fonts import fonts\n\n#def zero(width=(SCALAR,0,1)):\n# set_camera(Rect(0,0,width=2,height=2))\n# path = Path().moveto(0,1).curveto(-width,1,-width,-1,0,-1)\n# widestroke(path,0.1)\n\ndef unit_circle(circle_thickness=(SCALAR,0.05,2)):\n set_camera(Rect(0,0,width=2,height=2))\n thickness(circle_thickness)\n circle(1-circle_thickness/2)\n\ndef logo(\n circle_position=(SCALAR,-10,10,-2),\n circle_width=(SCALAR,0,2,0.5),\n line_1_x1=(SCALAR,-10,10,0),\n line_1_y1=(SCALAR,-2,2,0.75),\n line_1_x2=(SCALAR,-10,10,0),\n line_1_y2=(SCALAR,-2,2,-0.75),\n line_2_x1=(SCALAR,-10,10,0.75),\n line_2_y1=(SCALAR,-2,2,0),\n line_2_x2=(SCALAR,-10,10,-0.75),\n line_2_y2=(SCALAR,-2,2,-0),\n line_3_x1=(SCALAR,-10,10,2),\n line_3_y1=(SCALAR,-2,2,1),\n line_3_x2=(SCALAR,-10,10,2),\n line_3_y2=(SCALAR,-2,2,-1),\n line_4_x1=(SCALAR,-10,10,-1),\n line_4_y1=(SCALAR,-2,2,0),\n line_4_x2=(SCALAR,-10,10,-1),\n line_4_y2=(SCALAR,-2,2,0),\n dot_x=(SCALAR,-10,10,-1),\n dot_y=(SCALAR,0,0,0),\n dot_radius=(SCALAR,0,0.5,0),\n dot_color=(COLOR,Color(3/4,0,0,0)),\n bracket_position=(SCALAR,-10,10,5.5),\n path_alpha=(SCALAR,0,1,0),\n ):\n set_camera(Rect(0,0,width=10,height=4))\n color(white)\n thickness(0.2)\n\n def drawit():\n push()\n scale(circle_width,1,circle_position,0)\n circle(1,circle_position,0)\n pop()\n line(line_1_x1,line_1_y1,line_1_x2,line_1_y2)\n line(line_2_x1,line_2_y1,line_2_x2,line_2_y2)\n line(line_3_x1,line_3_y1,line_3_x2,line_3_y2)\n line(line_4_x1,line_4_y1,line_4_x2,line_4_y2)\n\n path = (\n Path()\n .moveto(line_4_x1,line_4_y1)\n .lineto(line_4_x2,line_4_y2)\n .lineto(line_2_x1,line_2_y1)\n .lineto(line_2_x2,line_2_y2)\n .lineto(line_3_x2,line_3_y2)\n )\n\n push()\n translate(0.1,0.1)\n color(black)\n drawit()\n color(black,path_alpha)\n widestroke(path,0.2)\n pop()\n \n color(1,0,1)\n drawit()\n color(1,1/8,1,path_alpha)\n widestroke(path,0.2) \n\n color(dot_color)\n dot(dot_radius,dot_x,dot_y)\n\n color(1,1/2,1)\n \n line(-bracket_position,1.5,-bracket_position,-1.5)\n\n widestroke(\n Path()\n .moveto(bracket_position,1.5)\n .lineto(bracket_position+1,0)\n .lineto(bracket_position,-1.5)\n ,\n 0.2\n )\n\nbg = Fill( style = 'horz', color = black, color2 = blue )\n\ndef animate_logo():\n c = get_camera().restrict_aspect(10.0/4)\n\n my_logo = Drawable(c,logo)\n\n start_animation(bg,my_logo)\n\n pause()\n\n parallel()\n dt=1\n linear(dt,my_logo.circle_position,-1)\n linear(dt,my_logo.circle_width,0.8)\n linear(dt,my_logo.line_1_x1,-0.25)\n linear(dt,my_logo.line_1_y1,-1)\n linear(dt,my_logo.line_1_x2,0.5)\n linear(dt,my_logo.line_1_y2,1)\n linear(dt,my_logo.line_2_x1,0.5)\n linear(dt,my_logo.line_2_y1,1)\n linear(dt,my_logo.line_2_x2,1.25)\n linear(dt,my_logo.line_2_y2,-1)\n linear(dt,my_logo.line_3_x1,1.25)\n linear(dt,my_logo.line_3_y1,-1)\n linear(dt,my_logo.line_3_x2,2)\n linear(dt,my_logo.line_3_y2,1)\n end()\n \n parallel()\n dt=0.6\n smooth(dt,my_logo.dot_radius,0.15,e=-1.5)\n\n serial()\n wait(dt/2)\n smooth(dt/2,my_logo.dot_color,Color(3/4,1/4,0,1),e=-1.5)\n end()\n \n end()\n\n wait(0.2)\n\n parallel()\n dt=0.3\n linear(dt,my_logo.dot_x,-0.25)\n linear(dt,my_logo.dot_y,-1)\n linear(dt,my_logo.line_4_x2,-0.25)\n linear(dt,my_logo.line_4_y2,-1)\n end()\n\n parallel()\n \n dt=0.6\n \n parallel()\n linear(dt,my_logo.dot_x,0)\n linear(dt,my_logo.dot_y,0)\n linear(dt,my_logo.dot_radius,2)\n end()\n\n serial()\n wait(dt/4)\n parallel()\n linear(3*dt/4,my_logo.dot_color,Color(1,1/8,1,0))\n linear(3*dt/4,my_logo.path_alpha,1)\n end()\n end()\n \n end()\n\n smooth(1,my_logo.bracket_position,2.5,e=-0.5)\n\n return end_animation()\n\nanimate_logo = animate_logo()\n\nplay(animate_logo)\n\nrun_presentation() \n","sub_path":"SQuInT/logo.py","file_name":"logo.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"542306719","text":"#!/usr/bin/python\nimport web\nfrom web import form\nimport random\nimport string\nimport hashlib\n##\nfrom db import connect_db\nimport ser\n\nPASSWORD = 'davidfanisawesome'\n\ndb = connect_db()\n\n\nsess = ser.sess\n\n\ndef tpl(*params):\n global template_obj\n return template_obj(*params)\n\n\ndef template_closure(directory):\n global settings\n templates = web.template.render(directory,\n globals={'sess': sess, 'tpl': tpl})\n def render(name, *params):\n return getattr(templates, name)(*params)\n return render\n\ntemplate_obj = template_closure('t/admin')\n\n\ndef ltpl(*params):\n return tpl('layout', tpl(*params))\n\n\ndef lmsg(msg):\n return tpl('layout', '
%s
' % msg)\n\n\n\ndef login():\n global sess\n\n good = False\n if 'ok' in sess:\n good = True\n else:\n cookies = web.cookies()\n if cookies.get('adminpw') == PASSWORD:\n good = True\n\n if not good:\n raise web.seeother('/admin/login')\n\n\nclass PageIndex:\n def GET(self):\n login()\n return ltpl('index')\n\n\nclass PageLogin:\n _form = form.Form(\n form.Password('password'),\n form.Button('login')\n )\n\n def GET(self):\n return ltpl('form', self._form(), 'Login')\n\n def POST(self):\n form = self._form()\n if not form.validates():\n return 'houston we have a problem'\n\n if form.d.password != PASSWORD:\n return 'password incorrect'\n\n web.setcookie('adminpw', PASSWORD, 99999999)\n sess.ok = True\n raise web.seeother('/admin/')\n\n\nclass PageLogout:\n def GET(self):\n sess.kill()\n web.setcookie('adminpw', '', expires=-1)\n raise web.seeother('/admin')\n\n\nclass PageRelationships:\n def GET(self):\n follows = db.query('''\n select\n follows.*,\n u1.name as follower_name,\n u2.name as following_name,\n u1.username as follower_username,\n u2.username as following_username\n from follows\n join users u1 on u1.id = follows.follower\n join users u2 on u2.id = follows.follow''')\n\n friends = db.query('''\n select\n friends.*,\n u1.name as u1_name,\n u2.name as u2_name,\n u1.username as u1_username,\n u2.username as u2_username\n from friends\n join users u1 on u1.id = friends.id1\n join users u2 on u2.id = friends.id2''')\n\n return ltpl('relations', follows, friends)\n\n\n\nUSERS_QUERY = '''\n select\n users.*,\n count(distinct f1) as follower_count,\n count(distinct f2) as follow_count,\n count(distinct p1) as repin_count,\n count(distinct p2) as pin_count,\n count(distinct friends) as friend_count\n from users\n left join follows f1 on f1.follow = users.id\n left join follows f2 on f2.follower = users.id\n left join friends on (friends.id1 = users.id or\n friends.id2 = users.id)\n left join pins p1 on p1.repin = users.id\n left join pins p2 on p2.user_id = users.id\n %s\n group by users.id'''\n\n\nclass PageSearch:\n _form = form.Form(\n form.Textbox('query'),\n form.Button('search')\n )\n\n def GET(self, allusers=None):\n login()\n\n params = web.input(order=None, query=None)\n order = params.order\n \n def make_query(query):\n if order is not None:\n return query + (' order by %s desc' % order)\n return query\n\n if allusers is not None:\n query = make_query(USERS_QUERY % '')\n results = db.query(query)\n return ltpl('search', results)\n\n search_query = params.query\n if search_query is None:\n return ltpl('searchform', self._form(), params)\n\n query = make_query(USERS_QUERY % '''\n where\n users.email ilike $search or\n users.name ilike $search or\n users.about ilike $search''')\n\n results = db.query(query, vars={'search': '%%%s%%' % search_query})\n return ltpl('search', results)\n\n\nclass PageUser:\n def GET(self, user_id):\n user_id = int(user_id)\n user = db.query('''\n select\n users.*,\n count(f1.follow) as follower_count,\n count(f2.follow) as follow_count,\n count(pins.id) as repin_count,\n count(friends) as friend_count\n from users\n left join follows f1 on f1.follow = users.id\n left join follows f2 on f2.follower = users.id\n left join friends on (friends.id1 = users.id or\n friends.id2 = users.id)\n left join pins on pins.repin = users.id\n where users.id = $id\n group by users.id''', vars={'id': user_id})\n if not user:\n return 'user not found'\n\n return ltpl('user', user[0])\n\n\nclass PageCloseUser:\n def GET(self, user_id):\n login()\n user_id = int(user_id)\n db.query('delete from pins where user_id = $id', vars={'id': user_id})\n db.query('delete from users where id = $id', vars={'id': user_id})\n raise web.seeother('/admin/')\n\n\nclass PageEditUser:\n def make_form(self, user=None):\n user = user or dict()\n return form.Form(\n form.Textbox('name', value=user.get('name')),\n form.Textbox('email', value=user.get('email')),\n form.Textarea('about', value=user.get('about')),\n form.Button('update'))()\n\n def GET(self, user_id):\n login()\n user_id = int(user_id)\n user = db.select('users', where='id = $id', vars={'id': user_id})\n if not user:\n return 'That user does not exist.'\n\n return ltpl('edituser', self.make_form(user[0]))\n\n def POST(self, user_id):\n login()\n user_id = int(user_id)\n form = self.make_form()\n if not form.validates():\n return 'invalid form input'\n\n d = dict(form.d)\n del d['update']\n db.update('users', where='id = $id', vars={'id': user_id}, **d)\n raise web.seeother('/admin/edituser/%d' % user_id)\n\n\ndef email_exists(email):\n result = db.select('users',\n what='1',\n where='email=$email',\n vars={'email': email},\n limit=1)\n return bool(result)\n\n\ndef hash(data):\n return hashlib.sha1(data).hexdigest()\n\n\ndef create_user(email, password, **params):\n pw_hash = hash(password)\n pw_salt = generate_salt()\n pw_hash = hash(pw_hash+pw_salt)\n\n return db.insert('users', email=email, pw_hash=pw_hash, pw_salt=pw_salt, **params)\n\n\ndef generate_salt(length=10):\n random.seed()\n pool = string.ascii_uppercase + string.ascii_lowercase + string.digits\n return ''.join(random.choice(pool) for i in range(length))\n\n\ndef username_exists(username):\n result = db.select('users',\n what='1',\n where='username=$username',\n vars={'username': username},\n limit=1)\n return bool(result)\n\n\nclass PageCreateUser:\n _form = form.Form(\n form.Textbox('email'),\n form.Textbox('name'),\n form.Textbox('username'),\n form.Password('password'),\n form.Button('create account')\n )\n\n def GET(self):\n login()\n form = self._form()\n return ltpl('reg', form)\n\n def POST(self):\n login()\n form = self._form()\n if not form.validates():\n return 'bad input'\n\n if email_exists(form.d.email):\n return 'email already exists'\n\n if username_exists(form.d.username):\n return 'username already exists'\n\n user_id = create_user(form.d.email, form.d.password, name=form.d.name, username=form.d.username)\n if not user_id:\n return 'couldn\\'t create user'\n\n raise web.seeother('/admin/user/%d' % user_id)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"150994203","text":"from random import random\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nfrom transformers import XLMRobertaTokenizer, XLMRobertaForSequenceClassification, Trainer, TrainingArguments, XLMRobertaConfig\r\nfrom load_data import *\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# 평가를 위한 metrics function.\r\ndef compute_metrics(pred):\r\n labels = pred.label_ids\r\n preds = pred.predictions.argmax(-1)\r\n # calculate accuracy using sklearn's function\r\n acc = accuracy_score(labels, preds)\r\n return {\r\n 'accuracy': acc,\r\n }\r\n\r\ndef train():\r\n # load model and tokenizer\r\n MODEL_NAME = \"xlm-roberta-large\"\r\n tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_NAME)\r\n\r\n # split dataset\r\n dataset = pd.read_csv('/opt/ml/input/data/train/train.tsv', delimiter='\\t', header=None)\r\n train, dev = train_test_split(dataset, test_size=0.2, random_state=42)\r\n train.to_csv('/opt/ml/input/data/train/train_train.tsv', sep='\\t', header=None, index=False)\r\n dev.to_csv('/opt/ml/input/data/train/train_dev.tsv', sep='\\t', header=None, index=False)\r\n\r\n # load dataset\r\n train_dataset = load_data('/opt/ml/input/data/train/train_train.tsv')\r\n train_add_dataset = load_data(\"/opt/ml/input/data/train/all_csv.tsv\")\r\n train_dataset = pd.concat([train_dataset, train_add_dataset.sample(n=9000)])\r\n print(len(train_dataset))\r\n dev_dataset = load_data('/opt/ml/input/data/train/train_dev.tsv')\r\n\r\n train_label = train_dataset['label'].values\r\n dev_label = dev_dataset['label'].values\r\n\r\n # tokenizing dataset\r\n tokenized_train = tokenized_dataset(train_dataset, tokenizer)\r\n tokenized_dev = tokenized_dataset(dev_dataset, tokenizer)\r\n\r\n # make dataset for pytorch.\r\n RE_train_dataset = RE_Dataset(tokenized_train, train_label)\r\n RE_dev_dataset = RE_Dataset(tokenized_dev, dev_label)\r\n\r\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\n\r\n # setting model hyperparameter\r\n bert_config = XLMRobertaConfig.from_pretrained(MODEL_NAME)\r\n bert_config.num_labels = 42\r\n model = XLMRobertaForSequenceClassification.from_pretrained(MODEL_NAME, config=bert_config)\r\n model.to(device)\r\n\r\n # 사용한 option 외에도 다양한 option들이 있습니다.\r\n # https://huggingface.co/transformers/main_classes/trainer.html#trainingarguments 참고해주세요.\r\n training_args = TrainingArguments(\r\n output_dir='./results',\r\n save_total_limit=3,\r\n save_steps=100,\r\n num_train_epochs=10,\r\n learning_rate=1e-5,\r\n per_device_train_batch_size=32,\r\n per_device_eval_batch_size=32,\r\n warmup_steps=300,\r\n weight_decay=0.01,\r\n logging_dir='./logs',\r\n logging_steps=100,\r\n evaluation_strategy='steps',\r\n eval_steps = 100,\r\n dataloader_num_workers=4,\r\n label_smoothing_factor=0.5\r\n )\r\n trainer = Trainer(\r\n model=model,\r\n args=training_args,\r\n train_dataset=RE_train_dataset,\r\n eval_dataset=RE_dev_dataset,\r\n compute_metrics=compute_metrics\r\n )\r\n\r\n # train model\r\n trainer.train()\r\n\r\ndef main():\r\n train()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"train2.py","file_name":"train2.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"169421927","text":"from statistics import mode, StatisticsError\nfrom copy import deepcopy\nfrom scipy.stats import entropy\nimport numpy as np\n\n\nclass Node:\n def __init__(self, attribute_name: str, depth=0):\n self._depth = depth\n self._children = {}\n self._attribute_name = attribute_name\n\n def addChild(self, attribute_value, child=None):\n self._children[attribute_value] = child\n\n def get_attribute_name(self):\n return self._attribute_name\n\n def get_children(self):\n return self._children\n\n def set_depth(self, depth):\n self._depth = depth\n\n def get_depth(self):\n return self._depth\n\n\ndef id3(data_obj: object, attributes: dict, labels: list, depth=-1):\n label = labels[0]\n same_label = True\n\n for i in range(len(labels)):\n if labels[i] != label:\n same_label = False\n break\n\n if same_label:\n return Node(label, depth + 1)\n\n attribute_name = get_best_attribute(data_obj)\n # attribute_name = list(attributes.keys())[0]\n # a = attributes[attribute_name]\n\n root = Node(attribute_name, depth + 1)\n for v in data_obj.get_attribute_possible_vals(attribute_name):\n new_data_obj = data_obj.get_row_subset(str(attribute_name), v)\n\n if len(new_data_obj) == 0:\n try:\n common_value = mode(data_obj.get_column('label'))\n except StatisticsError:\n common_value = label\n root.addChild(v, Node(common_value, depth + 2))\n else:\n new_attributes = deepcopy(attributes)\n new_attributes.pop(attribute_name)\n root.addChild(v, id3(new_data_obj, new_attributes, new_data_obj.get_column('label'), depth + 1))\n\n return root\n\n\ndef pruning_tree(id3_tree, max_depth):\n root = id3_tree\n if len(id3_tree.get_children()) == 0:\n return id3_tree\n\n pruned_tree = explore_tree(root, max_depth)\n\n return pruned_tree\n\n\ndef explore_tree(node, max_depth):\n current_depth = node.get_depth()\n\n if current_depth >= max_depth:\n label_values = []\n get_label_values(node, label_values)\n\n try:\n common_value = mode(label_values)\n except StatisticsError:\n common_value = label_values[0]\n\n return Node(common_value, depth=current_depth)\n\n for attribute_value, child in node.get_children().items():\n node.addChild(attribute_value, explore_tree(child, max_depth))\n\n return node\n\n\ndef get_label_values(node, label_values):\n if len(node.get_children()) == 0:\n label_values.append(node.get_attribute_name())\n return\n\n for attribute_value, child in node.get_children().items():\n get_label_values(child, label_values)\n\n\n\n\ndef group_label(data_obj):\n possible_label_vals = np.unique(data_obj.get_column('label'))\n label_count = dict(zip(possible_label_vals, [0] * len(possible_label_vals)))\n\n for label in data_obj.get_column('label'):\n label_count[label] += 1\n\n return label_count\n\n\n\ndef group_attribute_by_label(data_obj, attribute_label_cols):\n attribute_grouped_by_label = {}\n\n for attribute, label in attribute_label_cols:\n\n if attribute not in attribute_grouped_by_label.keys():\n possible_label_vals = np.unique(data_obj.get_column('label'))\n label_data = dict(zip(possible_label_vals, [0] * len(possible_label_vals)))\n\n attribute_grouped_by_label[attribute] = label_data\n\n attribute_grouped_by_label[attribute][label] += 1\n\n return attribute_grouped_by_label\n\n\n\ndef gain(label_count, attribute_grouped_by_label):\n total_entropy = entropy([x / sum(label_count.values()) for x in label_count.values()], base=2)\n expected_entropy = attribute_expected_entropy(label_count, attribute_grouped_by_label)\n\n return total_entropy - expected_entropy\n\n\ndef attribute_expected_entropy(label_count, attribute_grouped_by_label):\n attribute_entropy = []\n\n for attribute_value in attribute_grouped_by_label.items():\n fraction = sum(attribute_value[1].values()) / sum(label_count.values())\n attribute_value_entropy = fraction * entropy(\n [x / sum(attribute_value[1].values()) for x in attribute_value[1].values()], base=2)\n\n attribute_entropy.append(attribute_value_entropy)\n\n return sum(attribute_entropy)\n\n\ndef get_best_attribute(data_obj):\n max_gain = ('', 0.0)\n\n for i in data_obj.attributes.keys():\n attribute_label_cols = data_obj.get_column([i, 'label'])\n attribute_grouped_by_label = group_attribute_by_label(data_obj, attribute_label_cols)\n\n current_gain = gain(group_label(data_obj), attribute_grouped_by_label)\n if current_gain >= max_gain[1]:\n max_gain = (i, current_gain)\n\n return max_gain[0]\n\n\ndef report_error(data_obj, main_root):\n max_depth = 0\n wrong_predictions = 0\n\n for test in data_obj.raw_data:\n root = main_root\n\n while len(root.get_children()) != 0:\n current_attribute = root.get_attribute_name()\n attribute_index = data_obj.get_column_index(current_attribute)\n next_attribute_value = test[attribute_index]\n\n if next_attribute_value in root.get_children().keys():\n root = root.get_children()[next_attribute_value]\n else:\n try:\n common_value = mode(data_obj.get_column('label'))\n except StatisticsError:\n common_value = data_obj.get_column('label')[0]\n\n root = Node(common_value, depth=root.get_depth() + 1)\n if max_depth < root.get_depth():\n max_depth = root.get_depth()\n break\n\n if max_depth < root.get_depth():\n max_depth = root.get_depth()\n\n attribute_index = data_obj.get_column_index('label')\n if test[attribute_index] != root.get_attribute_name():\n wrong_predictions += 1\n\n training_error = wrong_predictions / len(data_obj.raw_data) * 100\n\n return training_error, max_depth\n\ndef predict(data_obj, example, id3_tree_root):\n root = id3_tree_root\n test = example\n\n while len(root.get_children()) != 0:\n current_attribute = root.get_attribute_name()\n attribute_index = data_obj.get_column_index(current_attribute)\n next_attribute_value = test[attribute_index]\n\n if next_attribute_value in root.get_children().keys():\n root = root.get_children()[next_attribute_value]\n else:\n try:\n common_value = mode(data_obj.get_column('label', data_obj.raw_data))\n except StatisticsError:\n common_value = data_obj.get_column('label', data_obj.raw_data)[0]\n root = Node(common_value, depth=root.get_depth() + 1)\n break\n\n attribute_index = data_obj.get_column_index('label')\n\n # return test[attribute_index] != root.get_attribute_name()\n return root.get_attribute_name()","sub_path":"CS 5350 Machine Learning/SVM/dt.py","file_name":"dt.py","file_ext":"py","file_size_in_byte":6973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"491986","text":"from __future__ import print_function\r\nfrom apiclient.discovery import build\r\nfrom httplib2 import Http\r\nfrom oauth2client import file, client, tools\r\nimport argparse\r\n\r\nclass Login:\r\n\tdef login():\r\n\t\tSCOPES = 'https://mail.google.com/'\r\n\t\tparser = argparse.ArgumentParser(parents=[tools.argparser])\r\n\t\tflags = parser.parse_args()\r\n\t\tstore = file.Storage('credentials.json')\r\n\t\tcreds = store.get()\r\n\t\tif not creds or creds.invalid:\r\n\t\t flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\r\n\t\t creds = tools.run_flow(flow, store , flags)\r\n\t\tservice = build('gmail', 'v1', http=creds.authorize(Http()))\r\n\t\treturn service\r\n\r\nclass Labels:\r\n\tdef labels(service):\r\n\t\tresults = service.users().labels().list(userId='me').execute()\r\n\t\tlabels = results.get('labels', [])\r\n\t\tif not labels:\r\n\t\t print('No labels found.')\r\n\t\telse:\r\n\t\t print('Labels:')\r\n\t\t for label in labels:\r\n\t\t print(label['name'])\r\n\r\nimport base64\r\nfrom email.mime.text import MIMEText\r\n\r\nclass cm:\r\n def create_message(sender, to, subject, message_text):\r\n message = MIMEText(message_text)\r\n message['to'] = to\r\n message['from'] = sender\r\n message['subject'] = subject\r\n return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}\r\n\r\nfrom googleapiclient.errors import HttpError\r\n\r\nclass sm:\r\n def send_message(service, user_id, message):\r\n try:\r\n print('sending email....')\r\n message = (service.users().messages().send(userId=user_id, body=message).execute())\r\n print('___email sent___')\r\n print('Message Id: %s' % message['id'])\r\n return message\r\n except HttpError as error:\r\n print('An error occurred: %s' % error)\r\n\r\nfrom email.mime.audio import MIMEAudio\r\nfrom email.mime.image import MIMEImage\r\nfrom email.mime.base import MIMEBase\r\nfrom email.mime.application import MIMEApplication\r\nfrom email.mime.multipart import MIMEMultipart\r\nimport mimetypes\r\nimport os\r\n\r\nclass Attach:\r\n def create_message_with_attachment(sender, to, subject, message_text, file):\r\n message = MIMEMultipart()\r\n message['to'] = to\r\n message['from'] = sender\r\n message['subject'] = subject\r\n msg = MIMEText(message_text)\r\n message.attach(msg)\r\n content_type, encoding = mimetypes.guess_type(file)\r\n #print(content_type)\r\n #print(encoding)\r\n if content_type is None or encoding is not None:\r\n content_type = 'application/octet-stream'\r\n main_type, sub_type = content_type.split('/', 1)\r\n if main_type == 'text':\r\n print('uploading text....')\r\n fp = open(file, 'rb')\r\n msg = MIMEText(fp.read(), _subtype=sub_type)\r\n fp.close()\r\n elif main_type == 'image':\r\n print('uploading image....')\r\n fp = open(file, 'rb')\r\n msg = MIMEImage(fp.read(), _subtype=sub_type)\r\n fp.close()\r\n elif main_type == 'audio':\r\n print('uploading audio....')\r\n fp = open(file, 'rb')\r\n msg = MIMEAudio(fp.read(), _subtype=sub_type)\r\n fp.close()\r\n elif main_type == 'application':\r\n print('uploading %s....' % sub_type)\r\n fp = open(file, 'rb')\r\n msg = MIMEApplication(fp.read(), _subtype=sub_type)\r\n fp.close()\r\n else:\r\n print('uploading %s (%s)....' % (main_type , sub_type) )\r\n fp = open(file, 'rb')\r\n msg = MIMEBase(main_type, sub_type)\r\n msg.set_payload(fp.read())\r\n fp.close()\r\n filename = os.path.basename(file)\r\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\r\n message.attach(msg)\r\n return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}\r\n","sub_path":"api_test.py","file_name":"api_test.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"595325592","text":"import json\n\nfrom flask import request, url_for, abort\nfrom werkzeug.utils import redirect\n\nfrom auth import query\nfrom db import connect_db\nfrom env import CLIENT_ID\nfrom security import logged_in, get_staff_endpoints\n\nREJECTED = object()\nUNABLE = object()\n\nADD_TO_SLACK = f\"https://slack.com/oauth/v2/authorize?client_id={CLIENT_ID}&scope=channels:join,channels:read,chat:write&user_scope=channels:history,chat:write,groups:history,im:history,mpim:history,users:read\"\n\nwith open(\"config.json\") as f:\n CONFIG = json.load(f)\n\n\ndef init_db():\n with connect_db() as db:\n db(\n \"\"\"CREATE TABLE IF NOT EXISTS tokens (\n user varchar(128),\n token text,\n PRIMARY KEY (`user`)\n );\"\"\"\n )\n\n db(\n \"\"\"CREATE TABLE IF NOT EXISTS silenced_users (\n user varchar(128), \n PRIMARY KEY (`user`) \n );\"\"\"\n )\n\n db(\n \"\"\"CREATE TABLE IF NOT EXISTS bot_data (\n bot_access_token varchar(256),\n auth_client varchar(128),\n auth_secret varchar(256),\n team_id varchar(256),\n course varchar(128)\n )\"\"\"\n )\n\n db(\n \"\"\"CREATE TABLE IF NOT EXISTS activated_services (\n course varchar(128),\n service varchar(128)\n )\"\"\"\n )\n\n\ninit_db()\n\n\ndef get_endpoint(course):\n return query(\"/api/{}/get_endpoint\".format(course), course=None)\n\n\ndef create_config_client(app):\n @app.route(\"/\")\n @logged_in\n def index():\n staff_endpoints = set(get_staff_endpoints(app.remote))\n active_courses = []\n for course in CONFIG:\n if get_endpoint(course) in staff_endpoints:\n active_courses.append(course)\n\n if len(active_courses) == 0:\n return (\n \"You are not a member of staff in any course that uses this tool\",\n 401,\n )\n if len(active_courses) == 1:\n return redirect(url_for(\"register_course\", course=active_courses[0]))\n\n options = \"

\".join(\n ''.format(course, course)\n for course in active_courses\n )\n\n return f\"\"\"\n Please select your course:\n

\n {options}\n
\n \"\"\"\n\n @app.route(\"/register/\")\n def register_course(course):\n print(get_endpoint(course), list(get_staff_endpoints(app.remote)))\n if get_endpoint(course) not in get_staff_endpoints(app.remote):\n abort(403)\n\n with connect_db() as db:\n ret = db(\"SELECT * FROM bot_data WHERE course = (%s)\", [course]).fetchone()\n\n if ret:\n # course already setup\n return redirect(ADD_TO_SLACK)\n else:\n return redirect(url_for(\"course_config\", course=course))\n\n @app.route(\"/config/\")\n def course_config(course):\n if get_endpoint(course) not in get_staff_endpoints(app.remote):\n abort(403)\n\n with connect_db() as db:\n ret = db(\n \"SELECT auth_client FROM bot_data WHERE course = (%s)\", [course]\n ).fetchone()\n\n client = ret[0] if ret else \"\"\n\n return f\"\"\"\n First, ensure that
61A Auth is set up for your course.\n

\n Create a client on 61A Auth with staff access to Piazza. Then, set up the slackbot:\n

\n \n \n
\n \n
\n

\n Then, add the slackbot to your workspace!\n \"\"\"\n\n @app.route(\"/set_config/\", methods=[\"POST\"])\n def set_course_config(course):\n if get_endpoint(course) not in get_staff_endpoints(app.remote):\n abort(403)\n\n client_name = request.form[\"client_name\"]\n client_secret = request.form[\"client_secret\"]\n\n with connect_db() as db:\n check = db(\n \"SELECT * FROM bot_data WHERE course = (%s)\", [course]\n ).fetchone()\n if not check:\n db(\n \"INSERT INTO bot_data VALUES (%s, %s, %s, %s, %s)\",\n [\"\", client_name, client_secret, \"\", course],\n )\n else:\n db(\n \"UPDATE bot_data SET auth_client=(%s), auth_secret=(%s) WHERE course=(%s)\",\n [client_name, client_secret, course],\n )\n\n return redirect(url_for(\"course_config\", course=course))\n\n\ndef store_user_token(user, token):\n with connect_db() as db:\n result = db(\"SELECT user FROM tokens WHERE user=%s\", (user,))\n if not result.fetchone():\n db(\"INSERT INTO tokens VALUES (%s, %s)\", (user, token))\n db(\"UPDATE tokens SET token=(%s) WHERE user=(%s)\", (token, user))\n\n\ndef get_user_token(user):\n with connect_db() as db:\n out = db(\"SELECT token FROM tokens WHERE user=%s\", (user,)).fetchone()\n if not out:\n check = db(\n \"SELECT user FROM silenced_users WHERE user=%s\", (user,)\n ).fetchone()\n if check: # user doesn't want to use the tool :(\n return REJECTED\n return UNABLE\n return out[\"token\"]\n\n\ndef store_bot_token(course, team_id, token):\n with connect_db() as db:\n db(\"UPDATE bot_data SET bot_access_token=(%s), team_id=(%s) WHERE course=(%s)\", [token, team_id, course])\n\n\ndef get_team_data(team_id):\n with connect_db() as db:\n return db(\"SELECT course, bot_access_token FROM bot_data WHERE team_id = (%s)\", [team_id]).fetchone()\n","sub_path":"src/config_client.py","file_name":"config_client.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"285982705","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nfrom typing import List\n\n\nclass Solution:\n def massage(self, nums: List[int]) -> int:\n n = len(nums)\n dp = [0] * n\n dp[0] = nums[0]\n dp[1] = max(nums[0], nums[1])\n for i in range(2, n):\n dp[i] = max(dp[i - 2] + nums[i], dp[i - 1])\n return dp[-1]\n\n\nif __name__ == '__main__':\n sn = Solution()\n nums = [2, 1, 4, 5, 3, 1, 1, 3] # 12\n print(sn.massage(nums))\n","sub_path":"动态规划/1716.py","file_name":"1716.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"138351288","text":"import numpy as np\r\nfrom matplotlib import pyplot as plt \r\nx = np.linspace(0,15,400)\r\ny = -3 * ( x - 30 ) ** 2 * np.sin(x)\r\nplt.figure(1)\r\nplt.xlabel(\"x\")\r\nplt.ylabel(\"y\")\r\nplt.hlines(0,0,15 ,colors = \"c\", linestyles = \"dashed\")\r\nplt.ylim(-3000, 3000)\r\nplt.plot(x,y)\r\nplt.show()\r\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"195250905","text":"from ch_model_base import *\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nhome_stdout = sys.stdout\n\ninp_model = sys.argv[1]\nchain = sys.argv[2]\nnum_models = int(sys.argv[3])\ncdr3a = sys.argv[4].split('_')\ncdr3b = sys.argv[5].split('_')\nmove_beta = int(sys.argv[6])\ninp_pdb_path = sys.argv[7]\n\nif len(sys.argv) == 9:\n structure_name = sys.argv[8]\nelse:\n structure_name = 'TCR_loop'\n\nlog.verbose()\nenv = environ()\nenv.io.atom_files_directory = ['.', inp_pdb_path]\n\nclass MyLoop(loopmodel):\n # This routine picks the residues to be refined by loop modeling\n\n def select_loop_atoms(self):\n # Two residue ranges (both will be refined simultaneously)\n chain_selection = []\n trim_alpha = 0\n trim_beta = 0\n if chain == 'all' or chain == 'paired' or chain == 'alpha':\n if int(cdr3a[1]) - int(cdr3a[0]) > 6:\n trim_alpha = 3\n print('yay_alpha')\n chain_selection.append(self.residue_range('{0}:A'.format(int(cdr3a[0])+trim_alpha), '{0}:A'.format(int(cdr3a[1])-trim_alpha)))\n if chain == 'all' or chain == 'paired' or chain == 'beta':\n if int(cdr3b[1]) - int(cdr3b[0]) > 6:\n trim_beta = 3\n print('yay_beta')\n chain_selection.append(self.residue_range('{0}:B'.format(int(cdr3b[0])+move_beta+trim_beta), '{0}:B'.format(int(cdr3b[1])+move_beta-trim_beta)))\n print(chain_selection)\n return selection(*chain_selection)\n\na = MyLoop(env,\n inimodel='{0}'.format(inp_model), # initial model of the target\n sequence='{0}'.format(structure_name)) # assess each loop with DOPE\n\na.loop.starting_model = 1 # First loop model\na.loop.ending_model = num_models # Last loop model\na.loop.md_level = refine.slow\n\na.make() # do modeling and loop refinement\n\ntemplate_mdlch = complete_pdb(env, inp_model)\n\nts = selection(template_mdlch)\nts.assess_dope(output=\"ENERGY_PROFILE NO_REPORT\", file='{0}{1}.profile'.format(structure_name, 0),\n normalize_profile=True, smoothing_window=15)\n\nfor n in range(1, num_models+1):\n mdlch = complete_pdb(env, '{0}.BL{1:{fill}{align}4}0001'.format(structure_name, n, fill=0, align='>'))\n s = selection(mdlch)\n s.assess_dope(output=\"ENERGY_PROFILE NO_REPORT\", file='{0}{1}.profile'.format(structure_name, n),\n normalize_profile=True, smoothing_window=15)\n","sub_path":"ch_scripts/modeller/ch_loop_refinement.py","file_name":"ch_loop_refinement.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"208303728","text":"from datetime import datetime, timedelta\nimport threading\nimport time, json\nfrom app.api import helpers\nfrom elasticsearch import Elasticsearch\n\nimport celery\nimport os\nimport pandas as pd\nimport feather\nimport multiprocessing\n\nfrom flask import request, Blueprint\nfrom app.api.workers import arima_forecast, holt_winters, forecast\n\nreceiver = Blueprint('receiver', __name__)\n\ndef insert_sub_task(payload):\n result = helpers.post_request('http://localhost:5002/api/v1/subtasks/add', json.dumps(payload))\n return result\n\ndef get_initial_data(index, field, task_name, task_id, field_name, field_value, timestamp, count, filename):\n details = helpers.query_elastic_details(es, index, field)\n initial_sub_task_data = pd.DataFrame(columns=['Datetime', 'Count'])\n\n overall_datetime = []\n overall_count = []\n for _details in details['aggregations']['3']['buckets'][field][\"2\"]['buckets']:\n overall_datetime.append(datetime.strptime(_details['key_as_string'], '%Y-%m-%dT%H:%M:%S.%fZ'))\n overall_count.append(_details['doc_count'])\n\n overall_datetime.append(timestamp)\n overall_count.append(int(count))\n\n initial_sub_task_data['Datetime'] = overall_datetime\n initial_sub_task_data['Count'] = overall_count\n\n helpers.check_file(\"app/data/{}/{}\".format(task_name, filename), initial_sub_task_data)\n sub_task = insert_sub_task({\n 'task_id': task_id,\n 'task_field_name': field_name.strip(),\n 'task_field_value': field_value.strip(),\n 'filename': filename\n })\n\n return sub_task['sub_task_id']\n\n\ndef check_previous_prediction(timestamp, count, subtask_id):\n check_result = helpers.get_request('http://localhost:5002/api/v1/results/{}'.format(subtask_id))\n\n payload = {\n 'result_id': check_result['result_id'],\n 'count': count,\n 'hour': timestamp.hour,\n 'day': helpers.convert_weekday(timestamp.weekday()),\n 'minute': timestamp.minute\n }\n\n if check_result is not None:\n update_result = helpers.post_request('http://localhost:5002/api/v1/results/update', json.dumps(payload))\n\n return update_result\n\n\ndef process_prediction(sub_task_id, prediction_schedule, interval_type, timestamp, task_id, filename, source, task_name):\n # inserting of initial result\n result = helpers.get_request('http://localhost:5002/api/v1/results/add/{}'.format(sub_task_id))\n result_id = result['result_id']\n\n predicted_date = ''\n if interval_type == 1:\n predicted_date = timestamp + timedelta(minutes=prediction_schedule)\n\n model_data = helpers.get_request('http://localhost:5002/api/v1/taskmodels/{}'.format(task_id))\n\n model_list = []\n for _model in model_data['data']:\n model_name = _model['name'].lower().strip()\n if model_name == 'arima':\n arima_forecast.predict(sub_task_id, result_id, _model['model_id'], filename, predicted_date, source,\n task_name)\n # elif model_name == 'holt_winter':\n # holt_winters.predict.delay(sub_task_id, result_id, _model['model_id'], filename, predicted_date, source, task_name)\n\n\n return model_list\n\n\ndef predict(task_queue):\n while not task_queue.empty():\n data = task_queue.get()\n source = data[0]\n task_name = data[1]\n field_name = data[2]\n field_value = data[3]\n timestamp = data[4]\n count = data[5]\n check_task_name = data[6]\n\n result = dict()\n\n filename = ''\n sub_task_id = ''\n\n helpers.check_dir(\"app/data/{}\".format(task_name))\n check_sub_task = helpers.post_request('http://localhost:5002/api/v1/subtasks/exists', json.dumps({\n 'task_id': check_task_name['task_id'],\n 'task_field_name': field_name,\n 'task_field_value': field_value\n }))\n timestamp = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')\n\n if check_sub_task['result']:\n filename = check_sub_task['filename']\n df = feather.read_dataframe(\"app/data/{0}/{1}\".format(task_name, check_sub_task['filename']))\n df2 = pd.DataFrame({\"Datetime\": [timestamp], \"Count\": [count]})\n df = df.append(df2, ignore_index=True)\n os.remove(\"app/data/{0}/{1}\".format(task_name, check_sub_task['filename']))\n df.to_feather(\"app/data/{0}/{1}\".format(task_name, check_sub_task['filename']))\n sub_task_id = check_sub_task['sub_task_id']\n #check_previous_prediction(timestamp, count, check_sub_task['sub_task_id'])\n process_prediction(sub_task_id, check_task_name['prediction_schedule'], check_task_name['interval_type'],\n timestamp, check_task_name['task_id'], filename, source, task_name)\n else:\n filename = '{0}_{1}'.format(field_value.strip().lower().replace(\" \", \"_\"),\n datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n sub_task_id = get_initial_data('sjc01-c01-cdrs-acme-*', '{}:{}'.format(field_name, field_value), task_name,\n check_task_name['task_id'], field_name, field_value, timestamp, count,\n filename)\n\n\n return 'Prediction Running'\n\ndef fill_data(task_queue, _data, check_task_name):\n if _data:\n source = int(_data['source'])\n task_name = _data['task_name'].strip()\n field_name = _data['field_name']\n field_value = _data['field_value']\n timestamp = _data['timestamp']\n count = _data['count']\n\n task_queue.put([source, task_name, field_name, field_value, timestamp, count, check_task_name])\n\n@receiver.route(\"/api/v1/formatter\", methods=['POST', 'GET'])\ndef main():\n data = request.json\n prediction_start = datetime.now()\n thread_list = []\n\n check_task_name = helpers.get_request('http://localhost:5002/api/v1/tasks/exists/{}'\n .format(data['task_name'].strip()))\n\n if check_task_name['result']:\n task_queue = multiprocessing.Queue()\n for _data in data[\"data\"]:\n fill_data(task_queue, _data, check_task_name)\n # process = multiprocessing.Process(target=predict, args=(int(source), task_name, field_name, field_value,\n # timestamp, int(count), check_task_name))\n # thread_list.append(process)\n # process.start()\n #\n # #for thread in thread_list:\n #\n #\n # for thread in thread_list:\n # thread.join()\n processes = []\n MAX_THREAD = 10\n\n for proc in range(MAX_THREAD):\n process = multiprocessing.Process(target=predict, args=(task_queue, ))\n\n ## then sa \"predict\" function, mag task_queue.get() then i-assign mo sa any variable\n\n processes.append(process)\n process.start()\n\n for p in processes:\n p.join()\n\n for p in processes:\n p.close()\n\n\n else:\n result['message'] = 'Task Name does not Exists'\n result['status'] = 'Error'\n\n prediction_end = datetime.now()\n prediction_time = prediction_end - prediction_start\n prediction_time = prediction_time.total_seconds()\n prediction_time = prediction_time / 60\n\n return 'Prediction Running'\n","sub_path":"app/api/receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":7372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"443195750","text":"import typing\n\nfrom src.api.db.functions.service_credentials import (\n get_credentials_by_user_id,\n update_expires_in_field,\n)\n\n\nclass SelectCredentials:\n def __init__(self, conn):\n self.conn = conn\n\n async def get_credentials(self, user_id: int) -> typing.Any:\n cred_instance = await get_credentials_by_user_id(\n conn=self.conn, user_id=user_id\n )\n return cred_instance\n\n\nclass UpdateCredentials:\n def __init__(self, conn):\n self.conn = conn\n\n async def credentials_expires_in(\n self, credentials_id: int, access_token: str, new_expires_in: float\n ) -> typing.Any:\n updated_credentials = await update_expires_in_field(\n conn=self.conn,\n credentials_id=credentials_id,\n access_token=access_token,\n new_expires_in=new_expires_in,\n )\n return updated_credentials\n","sub_path":"src/api/v1/core/engine/queries/service_credentials.py","file_name":"service_credentials.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"263553843","text":"from .DataObject import DataObject\nfrom flask import abort\nfrom slugify import slugify\nfrom .OrderableMixin import OrderableMixin\nfrom .VersionedMixin import VersionedMixin\nfrom sqlalchemy import event\n\nfrom silverflask import db\n\nregistered_subclasses = []\n\nclass MetaClass(type):\n def __new__(cls, clsname, bases, attrs):\n newclass = super(MetaClass, cls).__new__(cls, clsname, bases, attrs)\n registered_subclasses.append(newclass) # here is your register function\n return newclass\n\n\nclass SiteTree(VersionedMixin, DataObject, OrderableMixin, db.Model):\n \"\"\"\n The SiteTree is the database model from which all pages have to inherit.\n It defines the parent/children relationships of the page tree.\n It also defines everything that's needed to get nice URL slugs working.\n \"\"\"\n\n parent_id = db.Column(db.Integer, db.ForeignKey('sitetree.id'))\n name = db.Column(db.String)\n database = [\"parent_id\", \"name\"]\n type = db.Column(db.String(50))\n urlsegment = db.Column(db.String(250), nullable=False)\n\n template = \"page.html\"\n\n __mapper_args__ = {\n 'polymorphic_identity': 'sitetree',\n 'polymorphic_on': type\n }\n\n children = db.relationship('SiteTree',\n cascade=\"all\",\n # many to one + adjacency list - remote_side\n # is required to reference the 'remote'\n # column in the join condition.\n backref=db.backref(\"parent\", remote_side='SiteTree.id'),\n order_by='SiteTree.sort_order'\n )\n\n allowed_children = []\n can_be_root = True\n icon = 'glyphicon glyphicon-file'\n\n def get_siblings(self):\n return SiteTree.query.filter(SiteTree.parent_id == self.parent_id)\n\n @classmethod\n def default_template(cls):\n return cls.__name__.lower() + \".html\"\n\n @staticmethod\n def get_sitetree(parent_id=None):\n base_page = SiteTree.query.filter(SiteTree.parent_id == parent_id)\\\n .order_by(SiteTree.sort_order.asc())\n dest_list = []\n for p in base_page:\n dest_dict = {}\n SiteTree.recursive_build_tree(p, dest_dict)\n dest_list.append(dest_dict)\n return dest_list\n\n @staticmethod\n def recursive_build_tree(root_node, dest_dict):\n dest_dict.update(root_node.jqtree_dict())\n children = root_node.children\n if children:\n dest_dict['children'] = []\n for child in children:\n temp_dict = {}\n dest_dict['children'].append(temp_dict)\n SiteTree.recursive_build_tree(child, temp_dict)\n else:\n return\n\n\n @classmethod\n def get_cms_form(cls):\n form = super().get_cms_form()\n del form.children\n del form.sort_order\n del form.urlsegment\n return form\n\n def append_child(self, child):\n self.children.append(child)\n\n def as_dict(self):\n d = dict()\n try:\n d = super().as_dict()\n except:\n d = super(self.__class__, self).as_dict()\n d.update({\n \"parent_id\": self.parent_id,\n \"name\": self.name,\n \"type\": self.type\n })\n return d\n\n def jqtree_dict(self):\n return {\n \"text\": self.name,\n \"parent_id\": self.parent_id,\n \"created_on\": self.created_on,\n \"type\": self.__class__.__name__,\n \"li_attr\": {\n \"data-pageid\": str(self.id)\n },\n \"a_attr\": {\n \"href\": \"/admin/edit/page/{0}\".format(self.id)\n }\n }\n\n @staticmethod\n def get_by_url(url, cls=None):\n if not cls:\n cls = SiteTree\n vars = url.split('/')\n node = cls.query.filter(cls.urlsegment == vars[0]).first()\n if not node:\n abort(404)\n\n for var in vars[1:]:\n node = cls.query.filter(cls.urlsegment == var,\n cls.parent_id == node.id).first()\n if not node:\n abort(404)\n return node\n\n def get_url(self):\n url = \"/\" + self.urlsegment\n el = self\n while el.parent:\n url = \"/\" + el.parent.url_segment + url\n el = el.parent\n return url\n\n\n def set_parent(self, parent_id):\n if not parent_id:\n if self.can_be_root:\n self.parent_id = None\n return\n else:\n raise Exception(\"This page type can not be a root node!\")\n else:\n parent = SiteTree.query.get(int(parent_id))\n if parent:\n if hasattr(parent, 'allowed_children') and self.__class__.__name__ in parent.allowed_children:\n self.parent_id = parent_id\n else:\n raise Exception(\"Parent not allowed!\")\n else:\n raise Exception(\"Parent not existing!\")\n return\n\n def __init__(self):\n pass\n # self.database.extend(super(SiteTree, self).database)\n\n @classmethod\n def create_slug(cls, target, id=None):\n possible_slug = slugify(target.name, to_lower=True)\n slug = possible_slug\n count = 0\n def get_query(target, slug, id=None):\n query = cls.query.filter(cls.parent_id == target.parent_id,\n cls.urlsegment == slug)\n if id:\n query = query.filter(cls.id != id)\n return query\n while get_query(target, slug, id).count() > 0:\n slug = \"{0}-{1}\".format(possible_slug, count)\n target.urlsegment = slug\n\n @classmethod\n def pagetypes(cls):\n polymorphic_map = cls.__mapper__.polymorphic_map\n sitetree_props = {}\n for mapper in polymorphic_map.values():\n if mapper.class_ != cls:\n mapped_class = mapper.class_\n sitetree_props[mapped_class.__name__] = {\n 'allowed_children': mapped_class.allowed_children,\n 'icon': mapped_class.icon if mapped_class.icon else 'default'\n }\n return sitetree_props\n\n def before_insert(self, mapper, context, target):\n self.create_slug(target)\n\n def before_update(self, mapper, context, target):\n self.create_slug(target, self.id)","sub_path":"silverflask/models/SiteTree.py","file_name":"SiteTree.py","file_ext":"py","file_size_in_byte":6471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"54248384","text":"from django.conf import settings\nfrom django.conf.urls import include, re_path\nfrom django.contrib import admin\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.urls import path\nfrom django.views.decorators.cache import cache_page\nfrom django.views.generic import TemplateView\n\nfrom pyclist.sitemaps import sitemaps\nfrom pyclist.views import test\n\nadmin.autodiscover()\n\nurlpatterns = [\n re_path(r'', include('clist.urls')),\n re_path(r'', include('my_oauth.urls')),\n re_path(r'', include('true_coders.urls')),\n re_path(r'', include('ranking.urls')),\n re_path(r'', include('events.urls')),\n re_path(r'', include('chats.urls')),\n\n re_path(r'^telegram/', include('tg.urls')),\n\n re_path(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n re_path(r'^admin/', admin.site.urls),\n\n re_path(r'^robots\\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')),\n\n re_path(r'^googleee727737cf7b6a5a.html$', TemplateView.as_view(template_name='googleee727737cf7b6a5a.html')),\n\n re_path(r'^imagefit/', include('imagefit.urls')),\n re_path(r'^webpush/', include('webpush.urls')),\n\n path('o/', include('oauth2_provider.urls', namespace='oauth2_provider')),\n\n path('sitemap.xml',\n sitemap if settings.DEBUG else cache_page(86400)(sitemap),\n {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap'),\n]\n\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns = [\n re_path(r'^__debug__/', include(debug_toolbar.urls)),\n re_path(r'__test__/', test, name='__test__'),\n ] + urlpatterns\n","sub_path":"pyclist/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"215072146","text":"import sys\n\ndef find_val_by_key(cle, dic):\n for key, value in dic.items():\n if cle.lower() == key.lower():\n return value\n return None\n\ndef find_key_by_val(val, dic):\n for key, value in dic.items():\n if value.lower() == val.lower():\n return key\n return None\n\ndef getCc(state, stLetters, capital_cities):\n capital = find_val_by_key(stLetters, capital_cities)\n\n print(capital, \"is the capital of\", state)\n\ndef getSt(capital, ccLetters, states):\n state = find_key_by_val(ccLetters, states)\n\n print(capital, \"is the capital of\", state)\n\ndef arrParser(arr, st, cc):\n for item in arr:\n item = item.strip()\n\n if len(item) > 0:\n stLetters = find_val_by_key(item, st)\n ccLetters = find_key_by_val(item, cc)\n\n if not stLetters and not ccLetters:\n print(item, \"is neither a capital city nor a state\")\n\n if stLetters:\n state = find_key_by_val(stLetters, st)\n getCc(state, stLetters, cc)\n elif ccLetters:\n capital = find_val_by_key(ccLetters, cc)\n getSt(capital, ccLetters, st)\n\ndef allIn():\n if len(sys.argv) != 2:\n exit()\n\n states = {\n \"Oregon\" : \"OR\",\n \"Alabama\" : \"AL\",\n \"New Jersey\": \"NJ\",\n \"Colorado\" : \"CO\"\n }\n\n capital_cities = {\n \"OR\": \"Salem\",\n \"AL\": \"Montgomery\",\n \"NJ\": \"Trenton\",\n \"CO\": \"Denver\"\n }\n\n arr = sys.argv[1].split(\",\")\n\n arrParser(arr, states, capital_cities)\n\nif __name__ == '__main__':\n allIn()\n","sub_path":"d01/ex05/all_in.py","file_name":"all_in.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"480940608","text":"from bs4 import BeautifulSoup\nimport os\nimport sys\nimport requests\nimport csv\nfrom requests.exceptions import ConnectionError \nfrom io import StringIO\nfrom PIL import Image\nimport time\n\n# crawling musicbrainz\n\ndef getCover(tag, w):\n base = 'http://musicbrainz.org'\n baseurl = 'https://musicbrainz.org/tag/' + tag + '/release'\n print(baseurl)\n r = requests.get(baseurl)\n soup = BeautifulSoup(r.content, 'html.parser')\n\n # count total pages\n pagelist = soup.find(\"ul\", \"pagination\")\n if (pagelist != None):\n print(pagelist.find_all(\"a\")[-2])\n pagenum= int(pagelist.find_all(\"a\")[-2].contents[0])\n if pagenum>10:\n pagenum = 10\n else:\n pagenum = 1\n print('pagenum: ' , pagenum)\n \n for page in range(1,pagenum+1):\n if (page != 1):\n r = requests.get(baseurl+'?page=' + str(page))\n soup = BeautifulSoup(r.content, 'html.parser')\n itemlist_temp = soup.find(\"div\", \"tabs\")\n itemlist = itemlist_temp.find_next_siblings(\"ul\")\n itemlist = itemlist[0].find_all(\"li\")\n itemnum = len(itemlist)\n print('itemnum: ', itemnum)\n\n for i in range(itemnum):\n line = []\n href_temp = itemlist[i].find(\"a\")\n href = href_temp['href']\n imgname = href.split('/')[-1]\n line.append(imgname)\n print(imgname)\n\n # download cover image\n img_url = requests.get(base+href+'/cover-art')\n img_content = img_url.content\n img_soup = BeautifulSoup(img_content, 'html.parser')\n imglink_temp = img_soup.find(\"p\", \"small\")\n if imglink_temp != None:\n imglink = imglink_temp.find(\"a\")['href'][2:]\n print(imglink)\n re = requests.get('http://'+imglink)\n\n if re.status_code ==200:\n with open (\"/Users/kyungyunlee/dev/dcgan_albumcoverart/img/\" + imgname+ '.jpg', 'wb') as f:\n f.write(re.content)\n\n # get tags\n album_url = requests.get(base+href+'/tags')\n print(base+href+'/tags')\n album_content = album_url.content\n soup2 = BeautifulSoup(album_content, 'html.parser')\n tags_temp= soup2.find(\"ul\", \"tag-list\")\n tags = tags_temp.find_all(\"bdi\")\n print(len(tags))\n taglen = 10\n if len(tags) < 11:\n taglen = len(tags)\n for j in range(taglen):\n line.append(tags[j].contents[0])\n if len(tags) < 11:\n for k in range(10-len(tags)):\n line.append(tag)\n \n print(line)\n w.writerow(line)\n\nif __name__ == '__main__':\n MSDtags = ['beautiful', 'punk', 'indie', 'male vocalists', 'female vocalist', 'heavy metal', 'pop', 'sad', '00s','ambient', 'alternative', 'hard rock', 'electronic', 'blues', 'folk', 'classic rock', 'alternative rock', '90s', '60s', 'indie rock', 'electronica', 'female vocalists', 'easy listening', 'dance', 'funk', 'House', '80s', 'party', 'Mellow', 'electro', 'chillout', 'happy', 'oldies', 'rnb', 'jazz', '70s', 'instrumental', 'indie pop' ,'sexy', 'Hip-Hop', 'chill', 'guitar', 'country', 'metal','soul','catchy','rock','acoustic','Progressive-rock','experimental' ]\n # write to csv file\n with open('covertags2.csv', 'w') as csvfile:\n w = csv.writer(csvfile, delimiter= ',')\n for tag in MSDtags:\n try:\n getCover(tag,w)\n except TypeError:\n print('typeerror')\n time.sleep(10)\n except ConnectionError:\n time.sleep(10)\n except KeyboardInterrupt:\n print('\\Exited by KeyboardInterrupt')\n sys.exit(0)\n","sub_path":"cover_crawl2.py","file_name":"cover_crawl2.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"509551203","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom shiyanlou.items import UserItem\n\n\nclass UsersSpider(scrapy.Spider):\n name = 'users'\n allowed_domains = ['shiyanlou.com']\n \n @property\n def start_urls(self):\n \"\"\"\n 实验楼注册的用户数目前大约六十几万,为了爬虫的效率,\n 取id 在 524800~525000 之间的新用户\n 每隔10 取一个 ,最后大概爬取 20个用户的数据\n \"\"\"\n url_tmpl = 'https://www.shiyanlou.com/users/{}/'\n return (url_tmpl.format(i) for i in range(525000, 524800, -10))\n\n def parse(self, response):\n item = UserItem(\n name = response.xpath('//div[@class=\"user-meta\"]/span/text()').extract()[0].strip(),\n level = response.xpath('//div[@class=\"user-meta\"]/span/text()').extract()[1].strip(),\n status = response.xpath('//div[@class=\"user-status\"]/span/text()').extract_first(default='null').strip(),\n school_job = response.xpath('//div[@class=\"user-status\"]/span[2]/text()').extract_first(default='null').strip(),\n join_date = response.css('span.user-join-date::text').extract_first().strip(),\n learn_courses_num = response.xpath('//span[@class=\"tab-item\"]/text()').re_first('\\D+(\\d+)\\D+')\n )\n if len(response.css('div.user-avatar img').extract()) == 2:\n item['is_vip'] = True\n yield item\n","sub_path":"shiyanlou_add_users/shiyanlou/spiders/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"97501511","text":"import sys\n\ndef reducer():\n \"\"\"\n Read each line from std in and sum the daily ridership (second field)\n until the key is different from the current key. When that happens,\n divide the summed results by the number of days in the month to get the average. Then, push into a list of tuples.\n \n On completion, we sort the tuple list and return the top 10 values\n \"\"\"\n \n entries = 0\n old_key = None\n final_results = []\n\n for line in sys.stdin:\n unit, hourly = line.split('\\t')\n if old_key and old_key != unit:\n final_results.append(\n (old_key, entries)\n )\n entries = 0\n old_key = unit\n entries += float(hourly)\n final_results.append(\n (old_key, entries)\n )\n\n print(sorted(final_results, key=lambda x: x[1]))[-10:]\n\nif __name__ == '__main__':\n reducer()\n","sub_path":"notes/reference/moocs/udacity/ud359-intro-to-data-science/final_project/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"91766770","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis is to determine the longest substring in alphabetical order:\n\"\"\"\n\n\nstring = 'ilkdwqpfjgmjfplk'\n\n\nif len(string) == 1:\n print (string)\n \n\nelse:\n \n string = string + ' '\n \n MoveIndexA = 0; MoveIndexB = 0; stringIndexA = 0; stringIndexB = 1; longestLength = 0;\n while MoveIndexB < len(string) - 2:\n# print(MoveIndexA, MoveIndexB, string[MoveIndexA],string[MoveIndexB])\n\n if string[MoveIndexB] <= string[MoveIndexB+1]:\n MoveIndexB += 1\n elif string[MoveIndexB] > string[MoveIndexB+1]:\n MoveIndexB += 1\n MoveIndexA = MoveIndexB \n \n if MoveIndexB - MoveIndexA > longestLength:\n longestLength = MoveIndexB - MoveIndexA \n stringIndexA = MoveIndexA\n stringIndexB = MoveIndexB +1\n \n\n print(stringIndexA, stringIndexB)\n\n\n\nprint (\"Longest substring in alphabetical order is: {}\".format(string[stringIndexA:stringIndexB]))\n\n\"\"\"\n\n\"\"\"\n\n","sub_path":"ps1/q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"355478350","text":"import datetime\n\nBITMEX = \"bitmex\"\nMIN_DATE = datetime.date(2016, 5, 14)\n\nURL = \"https://s3-eu-west-1.amazonaws.com/public.bitmex.com/data/trade/\"\n\nAPI_URL = \"https://www.bitmex.com/api/v1\"\nMAX_API_RESULTS = 500\n\nuBTC = 0.000001 # 0.001 mXBT\n\nXBT = \"XBT\"\nETH = \"ETH\"\n\nXBTUSD = \"XBTUSD\"\nETHUSD = \"ETHUSD\"\nBCHUSD = \"BCHUSD\"\nLTCUSD = \"LTCUSD\"\nXRPUSD = \"XRPUSD\"\n\nJANUARY = \"F\"\nFEBRUARY = \"G\"\nMARCH = \"H\"\nAPRIL = \"J\"\nMAY = \"K\"\nJUNE = \"M\"\nJULY = \"N\"\nAUGUST = \"Q\"\nSEPTEMBER = \"U\"\nOCTOBER = \"V\"\nNOVEMBER = \"X\"\nDECEMBER = \"Z\"\n\nMONTHS = [\n JANUARY,\n FEBRUARY,\n MARCH,\n APRIL,\n MAY,\n JUNE,\n JULY,\n AUGUST,\n SEPTEMBER,\n OCTOBER,\n NOVEMBER,\n DECEMBER,\n]\n","sub_path":"crypto_exchange_etl/data_providers/bitmex/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"259998129","text":"class MPSOptimizerParameters(object):\n \"\"\"\n MPSOptimizerParameters is used to pass in optional parameters for MPSOptimizer,\n as it saves having a large number of optional parameters.\n \"\"\"\n def __init__(self, cutoff=1000,\n reg=0.001, lr_reg=0.99, min_singular_value=10**(-4),\n verbosity=0, armijo_coeff=10**(-4), use_hessian=False,\n armijo_iterations=10, path=\"MPSconfig\", updates_per_step=1):\n \"\"\"\n :param cutoff: float\n The cutoff value for the gradient. Anything above this is clipped off.\n Deprecated.\n :param reg: float\n The amount of regularisation to include in the cost function.\n The higher this value is, the more the regularization of the weight matrices matters.\n :param lr_reg: float, should be between 0 and 1\n lr_reg is the learning rate regularisation, and this value determines how much the\n learning rate decreases by as more training is done.\n A value of 0 keeps the learning rate constant.\n When the value is greater than 0, instead of the learning rate getting smaller than the\n provided learning rate as more steps are done, the learning rate is boosted at the start\n and as the number of steps goes on, approaches the provided learning rate.\n :param min_singular_value: float\n Below this, values in the decomposed singular values are ignored.\n Set this value high if you want a compact model,\n and low if you want a more accurate model.\n :param verbosity: integer\n This value controls how much the optimizer prints out during training.\n Set it to 0 to not have anything printed out, and a positive number n to have\n the first n loops printed out.\n Set this to a negative value to have the optimizer print out logging information\n every time it loops.\n :param armijo_coeff: float\n The coefficient for the tangential part of the armijo equation.\n The higher this value is, the more stringent the armijo condition is.\n :param use_hessian: bool\n Controls whether to use the Hessian or not in calculating the gradient\n \"\"\"\n self.cutoff = cutoff\n self.reg = reg\n self.lr_reg = lr_reg\n self.min_singular_value = min_singular_value\n self.verbosity = verbosity\n self.armijo_coeff = armijo_coeff\n self.use_hessian = use_hessian\n self.armijo_iterations = armijo_iterations\n self.path = path\n self.updates_per_step = updates_per_step\n\nclass MPSTrainingParameters(object):\n \"\"\"\n MPSTrainingParameters is used to pass in optional parameters for MPSOptimizer in the training step,\n as it saves having a large number of optional parameters.\n \"\"\"\n def __init__(self, rate_of_change=1000, initial_weights=None, verbose_save=True,\n _logging_enabled=False,):\n \"\"\"\n :param rate_of_change: float\n The rate of change for the optimisation.\n Different values should be tried, as there is no 'right answer' that works for\n all situations, and depending on the data set, the same value can cause\n overshooting, or make the optimisation slower than it should be.\n :param initial_weights: list\n The initial weights for the network, if it is desired to override the default values\n from mps.prepare(self, data_source, iterations = 1000).\n Deprecated.\n :param _logging_enabled: boolean\n Whether certain things are logged to Tensorboard/ to a Chrome timeline.\n \"\"\"\n self.rate_of_change = rate_of_change\n self.initial_weights = initial_weights\n self._logging_enabled = _logging_enabled\n self.verbose_save = verbose_save\n","sub_path":"trmps/optimizers/parameterObjects.py","file_name":"parameterObjects.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"260202841","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# get_ipython().system('pip3 install numpy')\n# get_ipython().system('pip3 install xgboost')\n# get_ipython().system('pip3 install pandas')\n# get_ipython().system('pip3 install iFeature')\n\n\n# In[12]:\n\n\nimport iFeature as ifea\nimport os\nimport re\nimport xgboost as xgb\nimport pandas as pd\nimport numpy\nimport pickle\nimport sys\n\ninput_file = \"E:/Java/java/pippin.erc.monash.edu/web/WEB-INF/python/user00001/input.fasta\"\n#input_file = sys.argv[1]\nprint(sys.argv[1])\nindexOfLast = input_file.rfind(\"/\")\noutputFolder = input_file[0:indexOfLast]\nprint(outputFolder)\n\n\n# # download ifeature values\ndef calculateIFeature(fname):\n os.system(\"python ./iFeature.py --file %s --type %s --order alphabetically --out %s\" % (\n input_file, \"APAAC\", outputFolder + \"/calculatedFeatures/APAAC.txt\"))\n os.system(\"python ./iFeature.py --file %s --type %s --order alphabetically --out %s\" % (\n input_file, \"CKSAAGP\", outputFolder + \"/calculatedFeatures/CKSAAGP.txt\"))\n os.system(\"python ./iFeature.py --file %s --type %s --order alphabetically --out %s\" % (\n input_file, \"CKSAAP\", outputFolder + \"/calculatedFeatures/CKSAAP.txt\"))\n os.system(\"python ./iFeature.py --file %s --type %s --order alphabetically --out %s\" % (\n input_file, \"CTDD\", outputFolder + \"/calculatedFeatures/CTDD.txt\"))\n os.system(\"python ./iFeature.py --file %s --type %s --order alphabetically --out %s\" % (\n input_file, \"DPC\", outputFolder + \"/calculatedFeatures/DPC.txt\"))\n os.system(\"python ./iFeature.py --file %s --type %s --order alphabetically --out %s\" % (\n input_file, \"GTPC\", outputFolder + \"/calculatedFeatures/GTPC.txt\"))\n os.system(\"python ./iFeature.py --file %s --type %s --order alphabetically --out %s\" % (\n input_file, \"NMBroto\", outputFolder + \"/calculatedFeatures/NMBroto.txt\"))\n os.system(\"python ./iFeature.py --file %s --type %s --order alphabetically --out %s\" % (\n input_file, \"DDE\", outputFolder + \"/calculatedFeatures/DDEtest.txt\"))\n\n\ncalculateIFeature(input_file)\n\n# get_ipython().system('python ./iFeature.py --file input.fasta --type DDE --out calculatedFeatures/DDEtest.txt')\n# !python ./iFeature.py --file input.fasta --type DDE --out output/DDEtest.txt\n\n\n# read the sequences\nfrom Bio import SeqIO\n\nrecord_dict = SeqIO.to_dict(SeqIO.parse(input_file, \"fasta\"))\n\n# define the motif feature patterns\nmotif = {\n \"MKTLLLTLVVVTIVCLDLGY\": \"MKTLLLTLVVVTIVCLDLGY\",\n \"WGGQGTPKDATDRCCFVHDCCY\": \"WGGQGTPKDATDRCCFVHDCCY\",\n \"KPGVDIKCCSTDKCN\": \"KPGVDIKCCSTDKCN\",\n \"MFTVFLLVVLATTVV\": \"MFTVFLLVVLATTVV\",\n \"IVCGKNDPCLRAICECDRAAAICFRENLNTYNKNYMYYSDSRCTEESEQC\": \"IVCGKNDPCLRAICECDRAAAICFRENLNTYNKNYMYYSDSRCTEESEQC\",\n \"CPPGZNJCYKKTWCD\": \"CPPGZNJCYKKTWCD\",\n \"PS00118\": \"CC[A-OQ-Z][A-Z]H[A-FH-KM-XZ][A-Z]C\",\n \"PS00119\": \"[LIVMA]C[ABDEGHJKNOQRUXZ]CD[A-FH-RT-Z][A-FH-Z][A-MO-Z][A-Z][A-PRT-Z]C\",\n \"PS00272\": \"GC[A-Z]{1,3}CP[A-Z]{8,10}CC[A-Z]{2}[PDEN]\",\n \"PS00280\": \"F[A-Z]{2}[A-HJ-Z]GC[A-Z]{6}[FY][A-Z]{5}C\",\n \"PS60004\": \"C[SREYKLIMQVN][A-Z]{2}[DGWET][A-Z][FYSPKV]C[GNDSRHTP][A-Z]{1,5}[NPGSMTAHF][GWPNIYRSKLQ][A-Z]CC[STRHGD][A-Z]{0,2}[NFLWSRYIT]C[A-Z]{0,3}[VFGAITSNRKL][FLIKRNGH][VWIARKF]C\",\n \"PS60005\": \"C[A-Z]{2}[EPSAGT][A-Z]{3}C[GSNDL][A-Z]{0,3}[PILV][A-Z][FPNDSG][GQ][A-Z]CC[A-Z]{3,4}C[FLVIA][A-Z]{1,2}[FVIWA]C\",\n \"PS60013\": \"CC[TGN][PFG][PRG][A-Z]{0,2}C[KRS][DS][RK][RQW]C[KR][PD][MLQH][A-Z]?[KR]CC \",\n \"PS60014\": \"CC[SHYN][A-Z]?[PRG][RPATV]C[ARMFTNHG][A-Z]{0,4}[QWHDGENLFYVP][RIVYLGSDW]C\",\n \"PS60015\": \"C[LAV][A-Z][DEK][A-Z]{3}C[A-Z]{6,7}CC[A-Z]{4}C[A-Z]C[A-Z]{5}C[A-Z]C\",\n \"PS60021\": \"C[KALRVG][A-Z][A-LN-Z][A-Z]{1,3}C[A-Z]{4,6}CC[A-Z]{4,6}C[A-Z]{4}[ERK]WC\",\n \"PS60022\": \"C[A-Z]{1,4}[FLIV][SEP]C[DE][EIQ][A-Z]{4,7}C[A-Z]{0,7}C[KST][A-Z]{4,18}C[YK][A-Z]{1,3}C\",\n \"PS60023\": \"CQCC[A-Z]{2}N[GA][FY]CS\",\n \"PS60025\": \"GEEE[A-Z]{2}[KE][A-DF-Z]{2}[A-Z]?E[A-Z][ILA]RE\"\n}\nmotifName = []\nfor key in motif:\n motifName = motifName + [key]\n\n# get the motif feature values of the example sequence\nmotifValue = {}\nfor proteinID in record_dict.keys():\n motifValue[proteinID] = []\n for feature in motif:\n match = re.search(r'%s' % motif[feature], str(record_dict[proteinID].seq))\n if match:\n motifValue[proteinID].append(\"1\")\n else:\n motifValue[proteinID].append(\"0\")\n\n# In[8]:\n\n\n# get 102 sequence feature values\npath = \"102iFeatureTypes.txt\"\nlines = [line for line in open(path)]\nmylist = []\n\nfor line in lines:\n line = line.replace(\"\\n\", \"\")\n splited = line.split(\"\\t\")\n mylist.append(splited)\n\nfeatures = []\niFeatureValues = {}\nfor key in motifValue.keys():\n iFeatureValues[key] = []\n\nfor i in range(len(mylist)):\n path = outputFolder + \"/calculatedFeatures/\" + mylist[i][0] + \".txt\"\n with open(path) as f:\n content = f.readlines()\n for j in range(len(mylist[i])):\n if j > 0:\n if (mylist[i][j] in content[0]):\n sss = content[0].split(\"\\t\");\n ind = 0\n for s in sss:\n if s == mylist[i][j]:\n break;\n ind = ind + 1\n if \"DDE\" == mylist[i][0]:\n sss[ind] = sss[ind] + \"_DDE\"\n if \"DPC\" == mylist[i][0]:\n sss[ind] = sss[ind] + \"_DPC\"\n if \"NMBROTO\" == mylist[i][0]:\n sss[ind] = sss[ind] + \"nmbroto\"\n features = features + [sss[ind]]\n\n for k in range(1, len(content)):\n valuesss = []\n ddd = content[k].split(\"\\t\")\n valuesss.append(ddd[ind])\n iFeatureValues[ddd[0]] = iFeatureValues[ddd[0]] + [ddd[ind]]\n\n# In[9]:\n\n\n# combine motif and ifeature together\nfeatureName = motifName + features\n\nfor key in motifValue.keys():\n motifValue[key] = motifValue[key] + iFeatureValues[key]\n\n# In[10]:\n\n\n# training and save the model to a file\n\n# fix the order of the features to the same order in the train\ndf = pd.read_csv(\"PrePostFinal111.csv\")\nX = df.iloc[:, :-1]\ny = df.iloc[:, -1]\n\ndata_dmatrix = xgb.DMatrix(data=X, label=y)\nfeatureN = data_dmatrix.feature_names\n\nnewName = []\nfor w in X.columns.tolist():\n if \"-gp1\" in w:\n w = w.replace(\"-gp1\", \"\")\n newName = newName + [w]\n\nX.columns = newName\n\n# reorder the training data according tot he columns in test data\ncolumnsList = X.columns.tolist();\nX = X[featureName]\n\n# # save trained model to file\n# data_dmatrix = xgb.DMatrix(data=X,label=y)\n# param = {'max_depth':6, 'eta':1, 'silent':1, 'objective':'binary:logistic'}\n# num_round = 100\n# classifier = xgb.train(param, data_dmatrix, num_round)\n# pickle.dump(classifier, open(\"xgboostPrePost.dat\", \"wb\"))\n# print(\"XGBoost classifier already trained and saved to a file xgboostPrePost.dat\")\n\n\n# In[11]:\n\n\n# load model from file\nloaded_model = pickle.load(open(\"xgboostPrePost.dat\", \"rb\"))\n\n# generate test data\ndataTest = pd.DataFrame(list(motifValue.values()))\ndataTest.columns = X.columns.tolist()\ndataTest = dataTest.convert_objects(convert_numeric=True)\ntestMatrix = xgb.DMatrix(dataTest)\n\n# make predictions for test data: 0 is for post, 1 is for pre\npredictProb = loaded_model.predict(testMatrix) # the predicted probabilities for the pre\n\npredictClass = [round(value) for value in predictProb] # the predicted class labels\n\nclasses = []\nfor w in predictClass:\n if w == 0.0:\n w = \"Pre\"\n else:\n w = \"Post\"\n classes.append(w)\n\n# pre is 1, post is o\nproteinsList = list(record_dict.keys()) # protein ID for the test\n\nresult = []\nfor i in range(len(proteinsList)):\n rei = [proteinsList[i]] + [1 - predictProb[i]] + [predictProb[i]] + [classes[i]]\n result = result + [rei]\n\n# put the result into a table\nresultDF = pd.DataFrame(result)\nresultDF.columns = [\"Protein ID\", \"Prob(Pre)\", \"Prob(Post)\", \"Predicted Class\"]\nprint(resultDF)\nresultDF.to_csv(outputFolder + \"/predict_output.csv\", sep='\\t')\n\n# In[ ]:\n\n\n\n\n","sub_path":"pippin/web/WEB-INF/python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"209634585","text":"\"\"\"\nNiceMenu.py\t\n(c) Copyright 2010 Jeff Buttars. All Rights Reserved.\n\"\"\"\n\n# System Imports\nimport threading\nimport vim\nimport subprocess\nfrom Queue import Queue, Empty\n\n# Regional Imports\n\ncompl_res = None\n\nclass LogThread( threading.Thread ):\n\t\"\"\"\"\"\"\n\n\tdef run( self ):\n\t\t\"\"\"docstring for run\"\"\"\n\t\tself.alive = True\n\t\tfd = open( '/tmp/vim.log', 'w' )\n\t\twhile self.alive:\n\t\t\t# try forever until we get some data\n\t\t\tlogEntry = NiceMenu_logQ.get(True)\n\t\t\tfd.write( \"%s\\n\" % logEntry )\n\t\t\tfd.flush()\n\n\t\tnmlog( \"LogThread stopping\", 'debug')\n\t\tfd.close()\n\t#run()\n\t\t\n#LogThread\n\nclass CmdThread( threading.Thread ):\n\n\tdef run( self ):\n\t\tnmlog( \"CmdThread.run()\", 'debug' )\n\t\tself.alive = True\n\t\tself.curPos = None\n\t\twhile self.alive:\n\t\t\ttry:\n\t\t\t\t# try forever until we get some data\n\t\t\t\titem = NiceMenu_cmdQ.get(True)\n\t\t\t\tnmlog( \"CmdThread.run() item:%s\" % item, 'debug' )\n\t\t\t\ttry:\n\t\t\t\t\tself.processCMD( item['cmd'], item['args'] )\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tnmlog( \"CmdThread.run() caught exception:%s\" % e, 'debug' )\n\t\t\texcept Exception:\n\t\t\t\tnmlog( \"CmdThread.run() caught exception:%s\" % e, 'debug' )\n\t\t\t\tpass\n\n\n\t\tnmlog( \"CmdThread.run() stopping\", 'debug' )\n\t#run()\n\n\tdef getCurWord( self, line, pos ):\n\t\t\"\"\"docstring for getCurWord\"\"\"\n\t\tnmlog( \"NiceMenu.CmdThread.getCurWord (line:%s,pos:%s)\" % (line, pos), 'debug' )\n\t\tline = vim.current.line\n\t\twlist = line[0:int(pos[2],10)].split()\n\n\t\tif not wlist:\n\t\t\tcw = \"\"\n\t\telse:\n\t\t\tcw = wlist[-1]\n\n\t\tnmlog( \"NiceMenu.CmdThread.getCurWord %s\" % cw, 'debug' )\n\t\treturn cw\n\t#getCurWord()\n\n\tdef getWordMin( self ):\n\t\t\"\"\"docstring for getWordMin\"\"\"\n\t\t#try:\n\t\t\t#wm = vim.eval( \"b:NiceMenuMin\" )\n\t\t#except:\n\t\t\t#wm = vim.eval( \"g:NiceMenuMin\" )\n\t\t#return wm \n\t\treturn int(vim.eval( \"g:NiceMenuMin\" ), 10 )\n\t#getWordMin()\n\n\tdef getOmniWord( self, spoint ):\n\t\t\"\"\"docstring for getOmniWord\"\"\"\n\t\tif not self.curPos:\n\t\t\treturn \"\"\n\t\t\t\n\t\tline = self.curPos['line']\n\t\tpos = self.curPos['pos']\n\n\t\tnmlog( \"NiceMenu.CmdThread.getOmniWord line:'%s', pos:%s, spoint:%s\" % (line,pos,spoint), 'debug' )\n\t\tol = line[int(spoint,10):int(pos[2],10)-1]\n\n\t\tnmlog( \"NiceMenu.CmdThread.getOmniWord '%s'\" % ol, 'debug' )\n\t\treturn ol\n\t#getOmniWord()\n\n\tdef procKeyPress( self, args ):\n\t\t\"\"\"docstring for procKeyPress\"\"\"\n\t\tline = args['line']\n\t\tnpos = args['pos']\n\n\t\t# to see what npos looks like, see help: getpos()\n\t\tnmlog( \"NiceMenu.CmdThread.processCMD, cmd:NMCMD_KEY_PRESS, pos%s\" % npos, 'debug' )\n\n\t\tif self.curPos:\n\t\t\tcurPos = self.curPos['pos']\n\t\t\tif curPos and npos[1] == curPos[1] and npos[2] == curPos[2] and npos[3] == curPos[3]:\n\t\t\t\tnmlog( \"NiceMenu.CmdThread.processCMD bad pos, npos:%s, curPos:%s\" % ( npos, curPos ), 'debug' )\n\t\t\t\treturn False\n\t\t\n\t\tcword = self.getCurWord( line, npos )\n\t\tif len(cword) < self.getWordMin():\n\t\t\tnmlog( \"NiceMenu.CmdThread.processCMD word to short, %s\" % ( self.getWordMin() ), 'debug' )\n\t\t\treturn False \n\n\t\t# If it's just a number, don't deal with it.\n\t\t# TODO: make optional\n\t\tif cword.isdigit():\n\t\t\tnmlog( \"NiceMenu.CmdThread.processCMD %s is a digit, no complete\" % ( cword ), 'debug' )\n\t\t\treturn False \n\n\t\t#TODO: make optional\n\t\t#If we're inside a string, not at the end, don't try to complete\n\t\tspace_idx = int(npos[2],10)\n\t\tif len(line) >= space_idx and not line[space_idx-1].isspace():\n\t\t\tnmlog( \"NiceMenu.CmdThread.processCMD %s:%s is in a string, no complete\" % ( cword,npos ), 'debug' )\n\t\t\treturn False\n\n\t\t# remember what we want to complete\n\t\tself.curPos = args \n\n\t\t# Stop any existing key timers before we start a new one.\n\t\tcancel()\n\n\t\tglobal NiceMenu_ptimer\n\t\t#delay = vim.eval(\"NiceMenuGetDelay()\")\n\t\tdelay = None\n\t\tif not delay:\n\t\t\tdelay = '.8'\n\t\tnmlog( \"NiceMenu.CmdThread.procKeyPress starting key timer, %s\" % ( delay ), 'debug' )\n\t\tNiceMenu_ptimer = threading.Timer( float(delay), keyTimer )\n\t\tNiceMenu_ptimer.start()\n\n\t\treturn True\n\t#procKeyPress()\n\n\n\tdef processCMD( self, cmd, args=None ):\n\t\t\"\"\"docstring for processCMD\"\"\"\n\t\tnmlog( \"NiceMenu.CmdThread.processCMD, cmd:%s, args:%s\" % ( cmd, args ), 'debug' )\n\n\t\tif cmd == NMCMD_KEY_PRESS:\n\t\t\treturn self.procKeyPress( args )\n\t\telif cmd == NMCMD_KEY_TOUT:\n\t\t\tnmlog( \"NiceMenu.CmdThread.processCMD, cmd:NMCMD_KEY_TOUT\", 'debug' )\n\n\t\t\t# Check if we're inside a comment\n\n\t\t\tomnifunc = self.remoteExp( \"&omnifunc\" )\n\n\t\t\t# test if we have a completion at this cursor position\n\t\t\t# returns -1 if no completion can be made.\n\t\t\tres = self.remoteExp( \"%s(1,'')\" % omnifunc )\n\n\t\t\tnmlog( \"NiceMenu.CmdThread.processCMD, cmd:NMCMD_KEY_TOUT, checking omnifunc. remote_exp:%s(1,''), res:%s\" % ( omnifunc, res ), 'debug' )\n\t\t\tif res == -1:\n\t\t\t\tnmlog( \"NiceMenu.CmdThread.processCMD, cmd:NMCMD_KEY_TOUT, no omnicompletion available\" )\n\t\t\t\treturn\n\n\t\t\tvim.command('let b:completionPos = %s' % (res) )\n\t\t\t#clist = self.remoteExp(\"%s(0,'%s')\" % (omnifunc, self.getOmniWord( res )))\n\t\t\t#nmlog( \"NiceMenu.CmdThread.processCMD, cmd:NMCMD_KEY_TOUT, omnicompletion results: %s\" % (clist) )\n\t\t\t#compl = vim.command( \"let b:NiceMenuCompRes = %s(%s,'%s')\" % (\n\t\t\t\t\t#item['func'], item['arg1'], item['arg2']) )\n\t\t\t#return\n\n\t\t\t# Drop our completion function onto the Q\n\t\t\tnmlog( \"NiceMenu.CmdThread.processCMD, cmd:NMCMD_KEY_TOUT, omnicompletion available, queueing command.\" )\n\t\t\tNiceMenu_ActionQ.put_nowait( {'curPos':self.curPos, 'type':'omnifunc', \n\t\t\t\t'func':omnifunc, 'arg1':0, 'arg2':self.getOmniWord( res ) } )\n\n\t\t\t# Calling back to the main vim process via NiceMenuAction()\n\t\t\t# It will pull the fist item off the action queu and run it for us.\n\t\t\t#NiceMenu_ActionQ.put( {'curPos':self.curPos, 'type':'omni', 'data':res } )\n\n\t\t\t#res = self.remoteExp( \"NiceMenuAction()\" )\n\t\t\tNiceMenuAction()\n\t\t\t#servername = vim.eval( \"v:servername\" )\n\t\t\t#vim.command( 'call remote_expr(\"%s\", \"NiceMenuAction()\")' % ( servername ) )\n\t\t\t#nmlog( \"NiceMenu.CmdThread.processCMD, cmd:NMCMD_KEY_TOUT, dumping omnicompletion results: %s\" % (compl_res) )\n\n\t\telif cmd == NMCMD_LEFT_INSERT:\n\t\t\tnmlog( \"NiceMenu.CmdThread.processCMD, cmd:NMCMD_LEFT_INSERT\", 'debug' )\n\t\t\tself.curPos = None\n\t\t\tcancel()\n\n\tdef remoteExp( self, exp ):\n\t\t\"\"\"docstring for remoteExp\"\"\"\n\t\tnmlog( \"NiceMenu.CmdThread.remoteExp %s\" % exp, 'debug' )\n\t\tservername = vim.eval( \"v:servername\" )\n\t\tif not servername:\n\t\t\tnmlog( \"NiceMenu.CmdThread.remoteExp no servername, unable to process remote expr \", 'error' )\n\t\tres = vim.eval( \"remote_expr(\\\"%s\\\", \\\"%s\\\")\" % ( servername, exp ) )\n\t\t#nmlog( \"NiceMenu.CmdThread.remoteExp %s, %s returned:%s\" % (servername,exp,res), 'debug' )\n\t\treturn res\n\t#remoteExp()\n\n\n#processCMD()\n\n# CmdThread\n\ndef keyTimer():\n\t\"\"\"docstring for keyTimer\"\"\"\n\tnmlog( \"NiceMenu.keyTimer() expired\", 'debug' )\n\tsendCmd( NMCMD_KEY_TOUT )\n#keyTimer()\n\ndef cancel():\n\t\"\"\"docstring for Cancel\"\"\"\n\tnmlog( \"NiceMenu.Cancel\", 'debug' )\n\tglobal NiceMenu_ptimer\n\tif NiceMenu_ptimer:\n\t\tNiceMenu_ptimer.cancel()\n#cancel()\n\ndef sendCmd( cmd, args=None ):\n\t\"\"\"docstring for NiceMenu.SendCmd\"\"\"\n\tnmlog( \"NiceMenu.SendCmd, %s, %s\" % ( cmd, args ), 'debug' )\n\tNiceMenu_cmdQ.put_nowait( { 'cmd':cmd, 'args':args } )\n#sendCmd()\n\ndef nmlog( msg, level='info' ):\n\t\"\"\"docstring for nmlog\"\"\"\n\tNiceMenu_logQ.put_nowait( msg )\n#nmlog()\n\ndef showMenu():\n\n\t# Don't show the menu if we're not in insert mode.\n\tif 'i' != vim.eval('mode()'):\n\t\tnmlog( 'NiceMenu.ShowMenu bad context, not in insert mode. mode=%s' % vim.eval('mode()'), 'debug' )\n\t\treturn\n\n\ttry:\n\t\t#subprocess.call( [\"gvim\", \"--servername\", \"%s\"%sname, \"--remote-send\", '=NiceMenuAsyncCpl()'] )\n\t\t#nmq_key_trigger.put_nowait( \"key trigger: \" )\n\t\tpass\n\n\texcept Exception as e:\n\t\tnmlog( \"NiceMenu.ShowMenu exception:%s\" % e, 'error' )\n\n# showMenu()\n\ndef NiceMenuAction():\n\t\"\"\"docstring for NiceMenuAction\"\"\"\n\n\titem = None\n\ttry:\n\t\titem = NiceMenu_ActionQ.get_nowait()\n\texcept Exception:\n\t\t#print( \"NiceMenuAction: nothing on the action q\" )\n\t\titem = None\n\t\tnmlog( \"NiceMenuAction: nothing on the action q\", 'debug' )\n\n\t# Check for spelling suggestions\n\n\twin = vim.current.window\n\tcpos = win.cursor\n\tnpos = (cpos[0], cpos[1]-1)\n\twin.cursor = npos\n\tcword = vim.eval( \"expand('')\" )\n\twin.cursor = cpos\n\n\t#vim.command('set spell')\n\t#spell_list = vim.command( \"let b:nm_spelllist = spellsuggest('%s')\" % (cword) )\n\t#spell_list = vim.eval( \"spellsuggest('%s')\" % (cword) )\n\t#vim.command('set nospell')\n\t#vim.command('let b:completionPos = %s' % (cpos[1]+1))\n\t#nmlog( \"NiceMenuAction: cword:%s, spell list:%s\" % (cword, spell_list), 'debug' )\n\n\n\tservername = vim.eval( \"v:servername\" )\n\n\tif item and item['type'] == 'omnifunc':\n\t\tnmlog( \"NiceMenuAction: trying: let b:completionList=%s(%s,'%s')\" % ( item['func'], item['arg1'], item['arg2'] ), 'debug' )\n\t\t#vim.command( \"let b:completionList=[]\" )\n\t\t#vim.command( \"let b:completionList=%s(%s,'%s')\" % ( item['func'], item['arg1'], item['arg2']) )\n\t\t#vim.command('let b:completionPos = %s' % (item['arg1']))\n\t\t#vim.command( \"call sort(b:completionList)\" )\n\n\n\t\tif vim.eval('b:completionList'):\n\n\t\t\t#vim.command( \"let b:completionList+=b:nm_spelllist\" )\n\n\t\t\tvim.command( \"set completefunc=NiceMenuCompletefunc\" )\n\t\t\tnmlog( \"NiceMenuAction: displaying completion...\" )\n\t\t\t#vim.command( \"call remote_send( \\\"%s\\\", '' )\" % ( servername ) )\n\t\t\t#vim.command( \"call remote_expr( \\\"%s\\\", feedkeys(\\\"\\\\\\\") )\" % ( servername ) )\n\t\t\t#vim.command( 'call feedkeys( \"\\\\\" )' )\n\t\t\t# Use a subprocess server call because for reasons\n\t\t\t# I don't know about calling feedkeys or remote_send/expr \n\t\t\t# has a huge and un acceptable delay.\n\t\t\tsubprocess.call( [\"gvim\", \"--servername\", \"%s\"%servername, \"--remote-send\", ''] )\n\t\t\tnmlog( \"NiceMenuAction: sent keys\", 'debug' )\n\t\telse:\n\t\t\tnmlog( \"NiceMenuAction: falling back easy completion\", 'debug' )\n\t\t\tsubprocess.call( [\"gvim\", \"--servername\", \"%s\"%servername, \"--remote-send\", ''] )\n\telse:\n\t\tnmlog( \"NiceMenuAction: falling back easy completion\", 'debug' )\n\t\tsubprocess.call( [\"gvim\", \"--servername\", \"%s\"%servername, \"--remote-send\", ''] )\n#NiceMenuAction()\n\ndef NiceMenuShutdown():\n\t\"\"\"docstring for NiceMenuShutdown\"\"\"\n\tnmlog(\"NiceMenuShutdown\", 'debug')\n\tlogThread.alive = False\n\tcmdThread.alive = False\n\t\n\tif NiceMenu_ptimer:\n\t\tNiceMenu_ptimer.cancel()\n\t# pump the queues so the threads will shutdown.\n\tNiceMenu_cmdQ.put_nowait( \"shutdown\" )\n\tNiceMenu_logQ.put_nowait( \"shutdown\" )\n#NiceMenuShutdown()\n\n# Initialize the global environment\nNMCMD_KEY_PRESS = 1\nNMCMD_KEY_TOUT = 2\nNMCMD_LEFT_INSERT = 4\n\ncurPos = None\n\nNiceMenu_cmdQ = Queue()\nNiceMenu_ActionQ = Queue()\nNiceMenu_logQ = Queue()\nNiceMenu_ptimer = None\n\ncmdThread = CmdThread()\ncmdThread.start()\n\nlogThread = LogThread()\nlogThread.start()\n\n\n","sub_path":"nicemenu/NiceMenu.py","file_name":"NiceMenu.py","file_ext":"py","file_size_in_byte":10593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"430736853","text":"import time\nimport a3dc_module_interface as a3\nfrom modules.packages.a3dc.segmentation import threshold_adaptive\nfrom modules.packages.a3dc.utils import error\nfrom modules.packages.a3dc.ImageClass import VividImage\nfrom modules.packages.a3dc.constants import SEPARATOR\n\n\nMETHODS=['Mean', 'Gaussian']\n\ndef adaptive_threshold(image, method , blocksize=5, offset=0):\n\n print('Thresholding: '+image.metadata['Name'])\n print('Method: Adaptive' + method)\n print('Settings: \\n\\t\\tBlocksize:%s \\n\\t\\tOffset:%s' % (str(blocksize),str(offset)))\n \n outputArray = threshold_adaptive(image.array, method, blocksize, offset)\n\n return VividImage(outputArray, image.metadata)\n\ndef init_config(methods=METHODS):\n \n config = [ a3.Input('Input Image', a3.types.ImageFloat)]\n\n method_param=a3.Parameter('Method', a3.types.enum)\n for idx, m in enumerate(methods):\n method_param.setIntHint(str(m), idx)\n config.append(method_param)\n \n #Add inputfield for BlockSize\n param_blocksize=a3.Parameter('BlockSize', a3.types.float)\n param_blocksize.setIntHint('min', 2)\n param_blocksize.setIntHint('max', 800)\n param_blocksize.setIntHint('stepSize', 1),\n config.append(param_blocksize)\n \n \n #Add inputfield for Offset\n param_offset=a3.Parameter('Offset', a3.types.float)\n param_offset.setIntHint('min', 0)\n param_offset.setIntHint('max', 800)\n param_offset.setIntHint('stepSize', 1),\n config.append(param_offset)\n \n config.append(a3.Output('Output Image', a3.types.ImageFloat)) \n \n return config\n\n\n\ndef module_main(ctx):\n try:\n #Inizialization\n tstart = time.clock()\n print(SEPARATOR)\n print('Adaptive thresholding started!')\n \n #Create Image object\n img =VividImage.from_multidimimage(a3.inputs['Input Image'])\n \n \n #Get method and mode\n method=METHODS[a3.inputs['Method'][-1]]\n \n #Run thresholding \n output_img=adaptive_threshold(img, method , blocksize=a3.inputs['BlockSize'], offset=a3.inputs['Offset'])\n \n #Change Name in metadata\n #output_img.metadata['Name']=img.metadata['Name']+'_adaptive_thr'\n \n #Set output\n a3.outputs['Output Image']=output_img.to_multidimimage()\n \n #Finalization\n tstop = time.clock()\n print('Processing finished in ' + str((tstop - tstart)) + ' seconds!')\n print('Adaptive thresholding was successfully!')\n print(SEPARATOR)\n \n except Exception as e:\n raise error(\"Error occured while executing '\"+str(ctx.type())+\"' module '\"+str(ctx.name())+\"' !\",exception=e)\n \nconfig = init_config()\n\n\n\na3.def_process_module(config, module_main)\n","sub_path":"src/app/modules/__archive/selector/module_AdaptiveThreshold.py","file_name":"module_AdaptiveThreshold.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"221922620","text":"# coding: utf8\n\nimport discord\nimport random\n\nfrom texts import get_text\nfrom discord.ext import commands\n\n\ndescription = '''All credits for the Rocket League: Stats webiste goes to AeonLucid !\n\nYou can invite me on your server by sending me an invite by PM.'''\nbot = commands.Bot(command_prefix='/', description=description)\n\n@bot.event\nasync def on_message(message):\n if message.channel.is_private and message.content.startswith('https://discord.gg'):\n try:\n invite = await bot.get_invite(message.content)\n await bot.accept_invite(invite)\n await bot.send_message(message.channel, 'I joined your channel ! Thank you for the invite ;)')\n except:\n pass # do nothing, although you can show the error if you want\n finally:\n return\n await bot.process_commands(message)\n\n\n\n@bot.event\nasync def on_ready():\n \n bot.load_extension(\"misc\")\n\n print(' _ _ _ |')\n print(' | | | | _ | | _ |')\n print(' _ _ _ ___ | | _ | | _ ____ | |_ | | _ ___ | |_ |')\n print('| | | | / _ \\ | | / )| | / )/ _ )| _) | || \\ / _ \\ | _) |')\n print('| | | || |_| || |< ( | |< (( (/ / | |__ | |_) )| |_| || |__ |')\n print(' \\____| \\___/ |_| \\_)|_| \\_)\\____) \\___)|____/ \\___/ \\___) |')\n\n print('-------------------------------------------------------------|')\n print('+++++ ' + bot.user.name + ' is ONLINE ')\n print('+++++ ID: ' + bot.user.id + ' ')\n print('-------------------------------------------------------------|')\n\n\nbot.run('wokketbot@gmail.com', 'RockmyB0T')","sub_path":"wokketbot/wokketbot.py","file_name":"wokketbot.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"157197629","text":"import time\nfrom pathlib import Path\n\nimport pytest\n\nfrom portfolio_optimizer import settings\nfrom portfolio_optimizer.getter import local_dividends\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef security_data_path(tmpdir_factory):\n saved_path = settings.DATA_PATH\n temp_dir = tmpdir_factory.mktemp('new_dividends')\n settings.DATA_PATH = Path(temp_dir)\n yield\n settings.DATA_PATH = saved_path\n\n\ndef test_get_dividends_first_time():\n df = local_dividends.get_dividends(['GAZP', 'MRKC'])\n assert len(df.columns) == 2\n assert df.index.is_monotonic_increasing\n assert df.index.unique\n assert df.loc['2002-05-13', 'GAZP'] == 0.44\n assert df.loc['2017-06-21', 'MRKC'] == 0.0442\n\n\ndef test_forced_update_fake_new_rows(monkeypatch):\n dividends_object = local_dividends.LocalDividends('GAZP')\n dividends_object.df = dividends_object.df.reindex(dividends_object.df.index[:-1])\n monkeypatch.setattr(local_dividends, 'UPDATE_PERIOD_IN_DAYS', 1 / (60 * 60 * 24))\n time.sleep(1)\n dividends_object.update_local_history()\n df = dividends_object.df\n assert df.index.is_monotonic_increasing\n assert df.index.unique\n assert df.loc['2002-05-13'] == 0.44\n assert df.loc['2017-07-20'] == 8.04\n\n\ndef test_forced_update_now_new_rows(monkeypatch):\n monkeypatch.setattr(local_dividends, 'UPDATE_PERIOD_IN_DAYS', 1 / (60 * 60 * 24))\n time.sleep(1)\n test_get_dividends_first_time()\n\n\ndef test_get_dividends_no_update():\n test_get_dividends_first_time()\n","sub_path":"src/portfolio_optimizer/getter/tests/test_dividends.py","file_name":"test_dividends.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"224504229","text":"from LineDrawAlgorithm import Image\nfrom BaryCentric import BaryCentricTriangle\nimport numpy as np\nimport skimage.io as sio\nimport re\n\nheight = 800\nwidth = 800\n\n\nclass Texture(object):\n\n def open_file(self, obj_file):\n with open (obj_file, 'r') as obj:\n data = obj.read()\n lines = data.splitlines()\n texture = []\n faces = []\n vertices = []\n for line in lines:\n if line:\n if line[1] == 't':\n texture.append(line[4:])\n elif line[0] == 'f':\n line_f = re.split('/| ', line)\n faces.append([line_f[1], line_f[4], line_f[7], line_f[2], line_f[5], line_f[8]])\n elif line[0] == 'v':\n vertices.append(line[2:])\n return texture, faces, vertices\n\n def get_color(self, ver0, ver1, ver2, texture, image_texture):\n color = []\n textimg_width = image_texture.shape[1]\n textimg_height = image_texture.shape[0]\n ver0_x = int(float(texture[ver0].split()[0]) * textimg_width)\n ver0_y = int(float(texture[ver0].split()[1]) * textimg_height)\n ver1_x = int(float(texture[ver1].split()[0]) * textimg_width)\n ver1_y = int(float(texture[ver1].split()[1]) * textimg_height)\n ver2_x = int(float(texture[ver2].split()[0]) * textimg_width)\n ver2_y = int(float(texture[ver2].split()[1]) * textimg_height)\n ver0_color = image_texture[ver0_x][ver0_y]\n ver1_color = image_texture[ver1_x][ver1_y]\n ver2_color = image_texture[ver2_x][ver2_y]\n for i in range(0, 3):\n color_each = int((float(ver0_color[i]) + float(ver1_color[i]) + float(ver2_color[i])) / 3)\n color.append(color_each)\n return color\n\n\n\n def triangle(self, pts, image, zbuffer, BaryCentricTriangle, color):\n bboxmin = np.array([np.inf, np.inf])\n bboxmax = np.array([-np.inf, -np.inf])\n clamp = np.array([image.width - 1, image.height])\n for i in range(0, 3):\n for j in range(0, 2):\n bboxmin[j] = max(0, min(bboxmin[j], pts[i][j]))\n bboxmax[j] = min(clamp[j], max(bboxmax[j], pts[i][j]))\n p = [[], [], []]\n for p[0] in range(int(bboxmin[0]), int(bboxmax[0]) + 1):\n for p[1] in range(int(bboxmin[1]), int(bboxmax[1] + 1)):\n bc_screen = BaryCentricTriangle.barycentric(pts, p)\n if bc_screen[0] < 0 or bc_screen[1] < 0 or bc_screen[2] < 0:\n continue\n p[2] = 0\n for i in range(0, 3):\n p[2] += pts[i][2] * bc_screen[i]\n if zbuffer[p[0] + p[1] * width] < p[2]:\n zbuffer[p[0] + p[1] * width] = p[2]\n image.set_pixel(p[0], p[1], color)\n\n def face_zbuff(self, image, obj_file, BaryCentricTriangle):\n light_dir = np.array([0, 0, -1])\n texture, faces, vertices = self.open_file(obj_file)\n zbuffer = np.zeros(shape=(image.width * image.height)) - np.inf\n image_texture = sio.imread('african_head_diffuse.tga', plugin='matplotlib')\n\n for face in faces:\n screen_coords = [[], [], []]\n shape_ver0 = int(face[0]) - 1\n shape_ver1 = int(face[1]) - 1\n shape_ver2 = int(face[2]) - 1\n texture_ver0 = int(face[3]) - 1\n texture_ver1 = int(face[4]) - 1\n texture_ver2 = int(face[5]) - 1\n color = self.get_color(texture_ver0, texture_ver1, texture_ver2, texture, image_texture)\n world_coords = np.array([[float(vertices[shape_ver0].split()[0]),\n float(vertices[shape_ver0].split()[1]),\n float(vertices[shape_ver0].split()[2])\n ],\n [float(vertices[shape_ver1].split()[0]),\n float(vertices[shape_ver1].split()[1]),\n float(vertices[shape_ver1].split()[2])\n ],\n [float(vertices[shape_ver2].split()[0]),\n float(vertices[shape_ver2].split()[1]),\n float(vertices[shape_ver2].split()[2])\n ]\n ])\n for j in range(0, 3):\n screen_coords[j] = [(world_coords[j][0] + 1) * image.width / 2,\n (world_coords[j][1] + 1) * image.height / 2,\n world_coords[j][2]]\n # BaryCentricTriangle.triangle(screen_coords, image, BaryCentricTriangle, np.random.randint(256, size=3))\n\n n = np.cross(world_coords[2] - world_coords[0], world_coords[1] - world_coords[0])\n n_norm = n / np.linalg.norm(n)\n intensity = np.dot(n_norm, light_dir)\n if intensity > 0:\n self.triangle(screen_coords, image, zbuffer, BaryCentricTriangle, (intensity * color[0], intensity * color[1], intensity * color[2]))\n\n\nif __name__ == '__main__':\n image = Image(1024, 1024)\n barycentric = BaryCentricTriangle()\n textures = Texture()\n textures.face_zbuff(image, \"african_head.obj\", barycentric)\n image.write(\"face_texture.jpg\")","sub_path":"Texture.py","file_name":"Texture.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"328652173","text":"# define occupancy value\nEMPTY = 0\nGATHERER = 1\nGENERATOR = 2\nRESOURCE = 3\n\nclass Grid:\n def __init__(self, width, height, occupancy_value):\n self.width = width\n self.height = height\n self.cells = []\n \n # initialize grid to all specified occupancy value\n for row in range(0, self.height):\n self.cells.append([])\n for col in range(0, self.width):\n self.cells[row].append(occupancy_value)\n\n def set_cell(self, point, value):\n self.cells[point.y][point.x] = value\n\n\n def get_cell(self, point):\n return self.cells[point.y][point.x]\n\n","sub_path":"occ_grid.py","file_name":"occ_grid.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"152797455","text":"import argparse\nimport shutil\nimport mido\n\n\"\"\"\n Sample Runs:\n python main.py -t arduino_std -cp COM6 -br 31250 -pc basic.json\n\"\"\"\n\ndef int_or_str(text):\n \"\"\"Helper function for argument parsing.\"\"\"\n try:\n return int(text)\n except ValueError:\n return text\n\ndef arg_parser():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('-ld', '--list-devices', action='store_true',\n help='list audio devices and exit')\n parser.add_argument('-d', '--device', type=int_or_str,\n help='input device (numeric ID or substring)')\n parser.add_argument('-t', '--type', type=str, default='arduino_std',\n help='what drum pad module to run (default %(default).py)')\n parser.add_argument('-pc', '--pad-config', type=str, default='basic.json',\n help='the pad configuration json file (default %(default))')\n parser.add_argument('-cp', '--com-port', type=str, default='COM5',\n help='the serial port to communicate with arduino (default %(default))')\n parser.add_argument('-br', '--baud-rate', type=int, default=31250,\n help='the baud rate to communicate with arduino (default %(default))') \n parser.add_argument('-lmp', '--list-midi-ports', action='store_true',\n help='list midi output ports and exit')\n parser.add_argument('-mp', '--midi-port', type=str, default=u\"berdrums 1\",\n help='the virtual midi port to send output msg (default %(default))')\n parser.add_argument('-omt', '--output-msg-type', type=str, default=u\"json\",\n help='the type of output message to display on shell (default %(default))')\n\n # layered triggers\n parser.add_argument('-fc', '--freq-config', type=str, default=u\"basic.json\",\n help='the slap frequency configuration json file (default %(default))')\n return parser\n\ndef main():\n parser = arg_parser() \n args = parser.parse_args() \n\n try:\n import sounddevice as sd\n\n if args.list_devices:\n print(sd.query_devices())\n parser.exit(0)\n\n if args.list_midi_ports:\n mido.get_output_names()\n parser.exit(0)\n\n # run drum pad modules\n if args.type == 'arduino_std':\n import arduino_std.main as arduino_std\n arduino_std.main(args.com_port, args.midi_port, args.baud_rate, \n args.pad_config, args.output_msg_type)\n\n if args.type == 'arduino_firmata':\n import arduino_firmata.main as arduino_firmata\n arduino_firmata.main(args.com_port, args.midi_port, args.pad_config)\n\n if args.type == 'layered_triggers':\n import layered_triggers.main as layered_triggers \n layered_triggers.main(args.com_port, args.midi_port, \n args.baud_rate, args.freq_config)\n\n except KeyboardInterrupt:\n parser.exit('Interrupted by user')\n except Exception as e:\n parser.exit(type(e).__name__ + ': ' + str(e))\n\nif __name__ == '__main__':\n main()","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"332705698","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom portal.views import calendar\n\n\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'common.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^admin_tools/', include('admin_tools.urls')), \n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', include(admin.site.urls)),\n url(r'^tinymce/', include('tinymce.urls')),\n url(r'^photologue/', include('photologue.urls', namespace='photologue')),\n url(r'^schedule/', include('schedule.urls')),\n url(r'^calendar/', calendar),\n)\n","sub_path":"common/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"356832064","text":"# Copyright 2014 IBM Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ConfigParser\nimport unittest\nimport yaml\n\nimport fixtures\nimport mock\n\nfrom elastic_recheck import bot\nfrom elastic_recheck import elasticRecheck\nfrom elastic_recheck import tests\nimport elastic_recheck.tests.unit.fake_gerrit as fg\n\n\ndef _set_fake_config(fake_config):\n fake_config.add_section('ircbot')\n fake_config.add_section('gerrit')\n # Set fake ircbot config\n fake_config.set('ircbot', 'nick', 'Fake_User')\n fake_config.set('ircbot', 'pass', '')\n fake_config.set('ircbot', 'server', 'irc.fake.net')\n fake_config.set('ircbot', 'port', 6667)\n fake_config.set('ircbot', 'channel_config',\n 'fake_recheck_watch_bot.yaml')\n # Set fake gerrit config\n fake_config.set('gerrit', 'user', 'fake_user')\n fake_config.set('gerrit', 'query_file', 'fake_query_file')\n fake_config.set('gerrit', 'host', 'fake_host.openstack.org')\n fake_config.set('gerrit', 'key', 'abc123def456')\n\n\n# NOTE(mtreinish) Using unittest here because testtools TestCase.assertRaises\n# doesn't support using it as a context manager\nclass TestBot(unittest.TestCase):\n def setUp(self):\n super(TestBot, self).setUp()\n self.fake_config = ConfigParser.ConfigParser({'server_password': None})\n _set_fake_config(self.fake_config)\n self.channel_config = bot.ChannelConfig(yaml.load(\n open('recheckwatchbot.yaml')))\n with mock.patch('launchpadlib.launchpad.Launchpad'):\n self.recheck_watch = bot.RecheckWatch(\n None,\n self.channel_config,\n self.fake_config.get('gerrit', 'user'),\n self.fake_config.get('gerrit', 'query_file'),\n self.fake_config.get('gerrit', 'host'),\n self.fake_config.get('gerrit', 'key'),\n False)\n\n def test_read_channel_config_not_specified(self):\n self.fake_config.set('ircbot', 'channel_config', None)\n with self.assertRaises(bot.ElasticRecheckException) as exc:\n bot._main([], self.fake_config)\n raised_exc = exc.exception\n self.assertEqual(str(raised_exc), \"Channel Config must be specified \"\n \"in config file.\")\n\n def test_read_channel_config_invalid_path(self):\n self.fake_config.set('ircbot', 'channel_config', 'fake_path.yaml')\n with self.assertRaises(bot.ElasticRecheckException) as exc:\n bot._main([], self.fake_config)\n raised_exc = exc.exception\n error_msg = \"Unable to read layout config file at fake_path.yaml\"\n self.assertEqual(str(raised_exc), error_msg)\n\n def test__read_no_event_no_msg(self):\n with self.assertRaises(bot.ElasticRecheckException) as exc:\n self.recheck_watch._read()\n raised_exc = exc.exception\n error_msg = 'No event or msg specified'\n self.assertEqual(str(raised_exc), error_msg)\n\n\nclass TestBotWithTestTools(tests.TestCase):\n\n def setUp(self):\n super(TestBotWithTestTools, self).setUp()\n self.useFixture(fixtures.MonkeyPatch(\n 'gerritlib.gerrit.Gerrit',\n fg.Gerrit))\n self.fake_config = ConfigParser.ConfigParser({'server_password': None})\n _set_fake_config(self.fake_config)\n self.channel_config = bot.ChannelConfig(yaml.load(\n open('recheckwatchbot.yaml')))\n with mock.patch('launchpadlib.launchpad.Launchpad'):\n self.recheck_watch = bot.RecheckWatch(\n None,\n self.channel_config,\n self.fake_config.get('gerrit', 'user'),\n self.fake_config.get('gerrit', 'query_file'),\n self.fake_config.get('gerrit', 'host'),\n self.fake_config.get('gerrit', 'key'),\n False)\n\n def fake_print(self, channel, msg):\n reference = (\"openstack/keystone change: https://review.openstack.org/\"\n \"64750 failed because of: \"\n \"gate-keystone-python26: \"\n \"https://bugs.launchpad.net/bugs/123456, \"\n \"gate-keystone-python27: unrecognized error\")\n self.assertEqual(reference, msg)\n\n def fake_display(self, channel, msg):\n return True\n\n def test_error_found(self):\n self.useFixture(fixtures.MonkeyPatch(\n 'elastic_recheck.bot.RecheckWatch.print_msg',\n self.fake_print))\n self.useFixture(fixtures.MonkeyPatch(\n 'elastic_recheck.bot.RecheckWatch.display',\n self.fake_display))\n with mock.patch.object(\n elasticRecheck.Stream, '_does_es_have_data') as mock_data:\n mock_data.return_value = True\n stream = elasticRecheck.Stream(\"\", \"\", \"\")\n event = stream.get_failed_tempest()\n self.assertIsNone(event.bug_urls_map())\n # Add bugs\n for job in event.failed_jobs:\n if job.name == 'gate-keystone-python26':\n job.bugs = ['123456']\n self.assertTrue(self.recheck_watch.display('channel', event))\n self.recheck_watch.error_found('channel', event)\n\n def test_message_config(self):\n data = {'messages': {'test': 'message'}}\n config = bot.MessageConfig(data)\n self.assertEqual(config['test'], data['messages']['test'])\n","sub_path":"elastic_recheck/tests/unit/test_bot.py","file_name":"test_bot.py","file_ext":"py","file_size_in_byte":5905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"410203865","text":"import sys\n\ndef build_list():\n result = []\n while True:\n try:\n inputValue = int(input('Please, enter integer to be added or < 0 to quit: '))\n if inputValue >= 0:\n result.append(inputValue)\n continue\n else:\n break\n except ValueError:\n print('Please, type a number')\n except KeyboardInterrupt:\n print('Exiting script...')\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n raise\n return result\n\ndef main():\n list1 = build_list()\n print(list1)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python_practices/list_built_with_input_loop_2.py","file_name":"list_built_with_input_loop_2.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"311130734","text":"import argparse\r\nimport numpy as np\r\nimport os\r\nimport tabulate\r\nimport torch\r\nimport torch.nn.functional as F\r\n\r\nimport data\r\nimport models\r\nimport curves\r\nimport utils\r\n\r\nparser = argparse.ArgumentParser(description='DNN curve evaluation')\r\nparser.add_argument('--dir', type=str, default='VGG16_poi_single_target_5_2bad_testset_split', metavar='DIR',\r\n help='training directory (default: /tmp/eval)')\r\n\r\nparser.add_argument('--num_points', type=int, default=61, metavar='N',\r\n help='number of points on the curve (default: 61)')\r\n\r\nparser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',\r\n help='dataset name (default: CIFAR10)')\r\nparser.add_argument('--use_test', action='store_true', default=True,\r\n help='switches between validation and test set (default: validation)')\r\nparser.add_argument('--transform', type=str, default='VGG', metavar='TRANSFORM',\r\n help='transform name (default: VGG)')\r\nparser.add_argument('--data_path', type=str, default='Data', metavar='PATH',\r\n help='path to datasets location (default: None)')\r\n\r\nparser.add_argument('--model', type=str, default='VGG16', metavar='MODEL',\r\n help='model name (default: None)')\r\nparser.add_argument('--curve', type=str, default='Bezier', metavar='CURVE',\r\n help='curve type to use (default: None)')\r\nparser.add_argument('--num_bends', type=int, default=3, metavar='N',\r\n help='number of curve bends (default: 3)')\r\n\r\nparser.add_argument('--ckpt', type=str, default='Res_single_true_10_same1/checkpoint-100.pt', metavar='CKPT',\r\n help='checkpoint to eval (default: None)')\r\n\r\nargs = parser.parse_args()\r\n\r\nos.makedirs(args.dir, exist_ok=True)\r\n\r\ntorch.backends.cudnn.benchmark = True\r\n\r\narchitecture = getattr(models, args.model)\r\ncurve = getattr(curves, args.curve)\r\nmodel = curves.CurveNet(\r\n 10,\r\n curve,\r\n architecture.curve,\r\n args.num_bends,\r\n architecture_kwargs=architecture.kwargs,\r\n)\r\n\r\nmodel.cuda()\r\ncheckpoint = torch.load(args.ckpt)\r\nmodel.load_state_dict(checkpoint['model_state'])\r\n\r\n\r\nspmodel = architecture.base(num_classes=10, **architecture.kwargs)\r\n\r\nparameters = list(model.net.parameters())\r\nsppara = list(spmodel.parameters())\r\n#for i in range(0, len(sppara)):\r\n# ttt= i*3\r\n# weights = parameters[ttt:ttt + model.num_bends]\r\n# spweights = sppara[i]\r\n# for j in range(1, model.num_bends - 1):\r\n# alpha = j * 1.0 / (model.num_bends - 1)\r\n# alpha = 0\r\n# spweights.data.copy_(alpha * weights[-1].data + (1.0 - alpha) * weights[0].data)\r\n\r\nts = np.linspace(0.0, 1.0, 11)\r\n\r\nfor kss, t_value in enumerate(ts):\r\n coeffs_t = model.coeff_layer(t_value)\r\n\r\n for i in range(0, len(sppara)):\r\n ttt= i*3\r\n weights = parameters[ttt:ttt + model.num_bends]\r\n spweights = sppara[i]\r\n for j in range(1, model.num_bends - 1):\r\n spweights.data.copy_(coeffs_t[0] * weights[0].data + coeffs_t[1] * weights[1].data + coeffs_t[2] * weights[2].data)\r\n\r\n print('saving model. %.2f' % t_value)\r\n utils.save_checkpoint(\r\n args.dir,\r\n int(t_value*10),\r\n model_state=spmodel.state_dict(),\r\n )\r\n","sub_path":"backdoor/backdoor-cifar/save_model.py","file_name":"save_model.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"458847993","text":"import math\nxUm = 0\nxDois = 0\n\na = int(input(\"Digite o valor de A: \"))\n\nb = int(input(\"Digite o valor de B: \"))\n\nc = int(input(\"Digite o valor de C: \"))\n\ndelta = (b**2) - (4*a*c)\n\nif delta < 0:\n print(\"esta equação não possui raízes reais\")\nelse:\n if delta > 0:\n xUm = (-b + delta) / (2*a)\n xDois = (-b - delta) / (2*a)\n if xUm > xDois:\n print(\"as raizes da equação são\",xDois, \"e\",xUm)\n else :\n print(\"as raizes da equação são\",xUm, \"e\",xDois)\n else :\n if delta == 0:\n print(\"a raiz desta equação é\", xUm)\n\n","sub_path":"Parte1/bhaskara.py","file_name":"bhaskara.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"259056773","text":"#!/usr/bin/env python3\n\nimport os\nimport pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime, timedelta\n\n\ndef all_issues(df, dirname):\n\n # select unique issue numbers to remove duplicates caused by\n # issues having multiple labels\n df_unique = df.drop_duplicates('number')\n\n # group by date\n df_dt = df_unique.groupby(pandas.Grouper(key='created_dt', freq='W')) \\\n .count().cumsum()\n\n # create a figure to summarize technical debt\n fig = plt.figure()\n ax = plt.gca()\n plt.xticks(rotation=45)\n plt.subplots_adjust(bottom=0.15)\n plt.ylabel('issue count')\n plt.xlabel('date')\n plt.title('HydroShare Issue Status')\n\n xdata = df_dt.index\n\n ydata = df_dt.number.values\n plt.plot(xdata, df_dt.number, color='k', linestyle='-', label='all')\n\n ydata = df_dt.closed.values\n plt.plot(xdata, ydata, color='b', linestyle='-', label='closed')\n\n ydata = df_dt.open.values\n plt.plot(xdata, df_dt.open, color='r', linestyle='-', label='open')\n plt.legend()\n plt.tight_layout()\n outpath = os.path.join(dirname, 'hs-issue-status.png')\n plt.savefig(outpath)\n\n\ndef open_issues(df, dirname):\n\n # plot a summary of open issues\n df_open = df[df.state == 'open']\n\n # group open bugs by date\n df_open_bug = df_open[df_open.label == 'bug']\n df_open_bug_list = list(df_open_bug.number.values)\n df_open_bug = df_open_bug.groupby(pandas.Grouper(key='created_dt',\n freq='W')) \\\n .count().cumsum()\n\n# # group open enhancements by date\n# df_open_enh = df_open[df_open.label == 'enhancement']\n# df_open_enh_list = list(df_open_enh.number.values)\n# df_open_enh = df_open_enh.groupby(pandas.Grouper(key='created_dt',\n# freq='W')) \\\n# .count().cumsum()\n\n # group all open issues that are not bugs or enhancements by date\n df_open_non = df_open[~df_open.label.isin(['bug', 'enhancement'])]\n df_open_non = df_open_non.drop_duplicates('number')\n\n # remove all issue numbers that exist in enhancements and bugs lists\n bug_enh_tickets = list(df_open_bug_list) #+ list(df_open_enh_list)\n df_open_non = df_open_non[~df_open_non.isin(bug_enh_tickets)]\n\n df_open_non = df_open_non.groupby(pandas.Grouper(key='created_dt',\n freq='W')) \\\n .count().cumsum()\n\n print('Found %d non-bug, '\n 'non-enhancement issues' % (len(df_open_non.number)))\n\n fig = plt.figure()\n plt.xticks(rotation=45)\n plt.subplots_adjust(bottom=0.15)\n plt.ylabel('issue count')\n plt.xlabel('date')\n plt.title('HydroShare Open Issues Summary')\n ax = plt.gca()\n\n xdata = df_open_non.index\n ydata = df_open_non.number.values\n plt.plot(xdata, ydata, color='k', linestyle='-', label='non-bug, '\n 'non-enhancement')\n\n xdata = df_open_bug.index\n ydata = df_open_bug.number.values\n plt.plot(xdata, ydata, color='r', linestyle='-', label='bugs')\n\n# xdata = df_open_enh.index\n# ydata = df_open_enh.number.values\n# plt.plot(xdata, ydata, color='b', linestyle='-', label='enhancements')\n\n plt.legend()\n plt.tight_layout()\n outpath = os.path.join(dirname, 'hs-open-issues-summary.png')\n plt.savefig(outpath)\n\n","sub_path":"scripts/github/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"220432448","text":"\"\"\" Coordinate Tic-Tac-Toe Board\n\nImplements a TTT board as tuples\n\"\"\"\n\nfrom base_board import BaseTTTBoard\n\n\nclass CoordsTTTBoard(BaseTTTBoard):\n \"\"\" Stored as a list of coordinate token three-tuples \"\"\"\n def __init__(self):\n self._shared_init()\n self._board = []\n\n def __repr__(self):\n \"\"\" REPR the class \"\"\"\n return 'CoordsTTTBoard()'\n\n def place_token(self, row, column, player):\n \"\"\" Place token player at (row, column)\n\n Throws ValueError exception if already occupied\n >>> b = CoordsTTTBoard()\n >>> b.place_token(1, 1, 'X')\n >>> b._board[0]\n (1, 1, 'X')\n \"\"\"\n\n for sq_item in self._board:\n if sq_item[0] == row and sq_item[1] == column and sq_item[2] != self._BLANK:\n raise ValueError('Token already placed')\n\n self._board.append((row, column, player))\n\n def _get_square_value(self, square_number):\n \"\"\" Get value of squares (0-8) on board\n\n Square pattern:\n 0 1 2\n 3 4 5\n 6 7 8\n\n :return str: player, else self._BLANK\n\n >>> s = CoordsTTTBoard()\n >>> s.place_token(1, 1, 'X')\n >>> s._get_square_value(4)\n 'X'\n >>> s = CoordsTTTBoard()\n >>> s.place_token(1, 1, 'X')\n >>> s._get_square_value(3) == s._BLANK\n True\n \"\"\"\n col = square_number // 3\n row = square_number % 3\n\n square_val = [x[2] for x in self._board if x[0] == row and x[1] == col]\n if len(square_val) > 0:\n player = square_val[0]\n else:\n player = self._BLANK\n\n return player\n","sub_path":"practice/tic-tac-toe/coords_board.py","file_name":"coords_board.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"114343184","text":"\"\"\"\n @Author : liujianhan\n @Date : 20/9/6 19:20\n @Project : leetcode_in_python\n @FileName : 381.O(1)时间插入、删除和获取随机元素-允许重复(H).py\n @Description : 设计一个支持在平均 时间复杂度 O(1) 下, 执行以下操作的数据结构。\n 注意: 允许出现重复元素。\n insert(val):向集合中插入元素 val。\n remove(val):当 val 存在时,从集合中移除一个 val。\n getRandom:从现有集合中随机获取一个元素。每个元素被返回的概率应该与其在集合中的数量呈线性相关。\n 示例:\n\n // 初始化一个空的集合。\n RandomizedCollection collection = new RandomizedCollection();\n\n // 向集合中插入 1 。返回 true 表示集合不包含 1 。\n collection.insert(1);\n\n // 向集合中插入另一个 1 。返回 false 表示集合包含 1 。集合现在包含 [1,1] 。\n collection.insert(1);\n\n // 向集合中插入 2 ,返回 true 。集合现在包含 [1,1,2] 。\n collection.insert(2);\n\n // getRandom 应当有 2/3 的概率返回 1 ,1/3 的概率返回 2 。\n collection.getRandom();\n\n // 从集合中删除 1 ,返回 true 。集合现在包含 [1,2] 。\n collection.remove(1);\n\n // getRandom 应有相同概率返回 1 和 2 。\n collection.getRandom();\n\"\"\"\nimport random\n\n\n# 160ms, 19.2MB\nclass RandomizedCollection:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.data = []\n self.dict = {}\n\n def insert(self, val: int) -> bool:\n \"\"\"\n Inserts a value to the collection. Returns true if the collection did not already contain the specified element.\n \"\"\"\n if val not in self.dict:\n self.dict[val] = {len(self.data)}\n else:\n self.dict[val].add(len(self.data))\n self.data.append(val)\n if len(self.dict[val]) == 1:\n return True\n return False\n\n def remove(self, val: int) -> bool:\n \"\"\"\n Removes a value from the collection. Returns true if the collection contained the specified element.\n \"\"\"\n if val in self.dict:\n idx = self.dict[val].pop()\n if idx != len(self.data) - 1:\n last = self.data[-1]\n self.data[idx] = last\n self.dict[last].remove(len(self.data) - 1)\n self.dict[last].add(idx)\n self.data.pop()\n else:\n self.data.pop()\n\n if len(self.dict[val]) == 0:\n self.dict.pop(val)\n\n return True\n return False\n\n def getRandom(self) -> int:\n \"\"\"\n Get a random element from the collection.\n \"\"\"\n return self.data[random.randint(0, len(self.data) - 1)]\n\n\n# Your RandomizedCollection object will be instantiated and called as such:\n# obj = RandomizedCollection()\n# param_1 = obj.insert(val)\n# param_2 = obj.remove(val)\n# param_3 = obj.getRandom()\n","sub_path":"02-算法思想/设计/381.O(1)时间插入、删除和获取随机元素-允许重复(H).py","file_name":"381.O(1)时间插入、删除和获取随机元素-允许重复(H).py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"547013702","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport json\nimport argparse\n\n# Source: http://stackoverflow.com/questions/14313510/how-to-calculate-moving-average-using-numpy?rq=1\ndef moving_average(a, n):\n \"\"\"Compute the moving average of an array a of numbers using a window length n\"\"\"\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n\ndef main(stats_path, n, xmax):\n f = open(stats_path)\n contents = json.load(f)\n\n averaged_episode_rewards = moving_average(contents['episode_rewards'], n)\n fig = plt.figure()\n plt.plot(range(len(averaged_episode_rewards)), averaged_episode_rewards)\n plt.xlim(xmax=xmax)\n min_aer = min(averaged_episode_rewards)\n max_aer = max(averaged_episode_rewards)\n plt.ylim(ymin=min(0, min_aer), ymax=max(0, max_aer + 0.1 * max_aer))\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Total reward\")\n plt.title(\"Total reward per episode\")\n fig.canvas.set_window_title(\"Total reward per episode\")\n\n fig = plt.figure()\n averaged_episode_lengths = moving_average(contents['episode_lengths'], n)\n plt.plot(range(len(averaged_episode_lengths)), averaged_episode_lengths)\n plt.xlim(xmax=xmax)\n max_ael = max(averaged_episode_lengths)\n plt.ylim(ymin=0, ymax=(max_ael + 0.1 * max_ael))\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Length\")\n plt.title(\"Length per episode\")\n fig.canvas.set_window_title(\"Length per episode\")\n plt.show()\n\ndef ge_1(value):\n \"\"\"Require the value for an argparse argument to be an integer >=1.\"\"\"\n ivalue = int(value)\n if ivalue < 1:\n raise argparse.ArgumentTypeError(\"%s must be an integer of at least 1.\" % value)\n return ivalue\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"stats_path\", metavar=\"stats\", type=str, help=\"Path to the stats JSON file.\")\nparser.add_argument(\"running_mean_length\", metavar=\"rml\", type=ge_1, help=\"Running mean length\")\nparser.add_argument(\"--xmax\", type=ge_1, default=None, help=\"Maximum episode for which to show results.\")\n\nif __name__ == '__main__':\n try:\n args = parser.parse_args()\n except:\n sys.exit()\n main(args.stats_path, args.running_mean_length, args.xmax)\n","sub_path":"plot_statistics.py","file_name":"plot_statistics.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"581187688","text":"from keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import load_model\nfrom models import BasicRNN, BidirectionalRNN, EncDecRNN, EmbeddingRNN\nimport numpy as np\nimport pickle as pkl\n\n\nclass Translator:\n \"\"\"\n Translator\n \"\"\"\n \n def __init__(self, Model, name, embed=True):\n \"\"\"\n :param Model: model object to fit and use\n \"\"\"\n self.Model = Model\n self.name = name\n self.embed = embed\n self.model = None\n self.x_tokenizer = None\n self.y_tokenizer = None\n self.max_y_sequence_length = None\n \n def fit(self, x_sentences, y_sentences, epochs=1):\n \n # Preprocess sentences\n preproc_x_sentences, preproc_y_sentences = self._preprocess(x_sentences, y_sentences)\n \n self.max_y_sequence_length = preproc_y_sentences.shape[1]\n x_vocab_size = len(self.x_tokenizer.word_index)\n y_vocab_size = len(self.y_tokenizer.word_index)\n \n # Reshape the input \n tmp_x = self._pad(preproc_x_sentences, self.max_y_sequence_length)\n if not self.embed:\n tmp_x = tmp_x.reshape((-1, preproc_y_sentences.shape[-2], 1))\n \n print(tmp_x.shape)\n self.model = self.Model(tmp_x.shape, self.max_y_sequence_length, x_vocab_size + 1, y_vocab_size + 1)\n self.model.fit(tmp_x, preproc_y_sentences, self.name, epochs=20, batch_size=512)\n \n def translate(self, sentence):\n \n sentence = [self.x_tokenizer.word_index[word] for word in sentence.split()]\n sentence = self._pad([sentence], self.max_y_sequence_length)\n if not self.embed:\n sentence = sentence.reshape(1, -1, 1)\n predictions = self.model.predict(sentence)\n \n index_to_words = {id: word for word, id in self.y_tokenizer.word_index.items()}\n index_to_words[0] = ''\n \n translation = ' '.join([index_to_words[np.argmax(x)] for x in predictions[0]])\n print(translation)\n \n def save(self):\n save_dict = {'embed': self.embed,\n 'x_tokenizer': self.x_tokenizer,\n 'y_tokenizer': self.y_tokenizer,\n 'max_y_sequence_length': self.max_y_sequence_length}\n \n with open('{}.pkl'.format(self.name), 'wb') as handle:\n pkl.dump(save_dict, handle)\n \n def load(self):\n with open('{}.pkl'.format(self.name), 'rb') as handle:\n load_dict = pkl.load(handle)\n \n self.embed = load_dict['embed']\n self.x_tokenizer = load_dict['x_tokenizer']\n self.y_tokenizer = load_dict['y_tokenizer']\n self.max_y_sequence_length = load_dict['max_y_sequence_length']\n self.model = load_model('{}.h5'.format(self.name))\n \n def _preprocess(self, x, y):\n \"\"\"\n Preprocess x and y\n :param x: Feature List of sentences\n :param y: Label List of sentences\n :return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer)\n \"\"\"\n preprocess_x, self.x_tokenizer = self._tokenize(x)\n preprocess_y, self.y_tokenizer = self._tokenize(y)\n\n preprocess_x = self._pad(preprocess_x)\n preprocess_y = self._pad(preprocess_y)\n\n # Keras's sparse_categorical_crossentropy function requires the labels to be in 3 dimensions\n preprocess_y = preprocess_y.reshape(*preprocess_y.shape, 1)\n\n return preprocess_x, preprocess_y\n \n def _tokenize(self, x):\n \"\"\"\n Tokenize x\n :param x: List of sentences/strings to be tokenized\n :return: Tuple of (tokenized x data, tokenizer used to tokenize x)\n \"\"\"\n keras_tokenizer = Tokenizer()\n keras_tokenizer.fit_on_texts(x)\n return keras_tokenizer.texts_to_sequences(x), keras_tokenizer\n\n def _pad(self, x, length=None):\n \"\"\"\n Pad x\n :param x: List of sequences.\n :param length: Length to pad the sequence to. If None, use length of longest sequence in x.\n :return: Padded numpy array of sequences\n \"\"\"\n padded_x = pad_sequences(x, maxlen=length, padding='post')\n return padded_x\n\n","sub_path":"translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"572978783","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2018, pranali and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nfrom bnd.api.data_list import load_data\nimport ast\n\n\nclass ShiftScheduleManual(Document):\n\tdef onload(self):\n\t\tdlist=load_data()\n\t\t#frappe.msgprint(\"hii\")\n\t\tself.get(\"__onload\").data_list = dlist\n\n\n\n\n\n\n\n\n\n@frappe.whitelist()\ndef passing_templatedata_to_python(data):\n\t#frappe.msgprint(\"function is working\")\n\td=ast.literal_eval(data)\n\taction=\"\"\n\tdoc=\"\"\n\t# frappe.msgprint(str(d))\n\t\n\t\n\tfor i in range(0,len(d)):\n\t\tdoc = frappe.get_doc({\n\n \"doctype\": \"Shift Schedule\",\n \"shift_time\": d[i][\"Shift\"],\n \"employee_name\":d[i][\"Employee\"],\n \"store\": d[i][\"Store\"],\n \"attendance_date\" : d[i][\"Day\"],\n \"company\": d[i][\"Company\"],\n \"employee\" : d[i][\"Empid\"],\n \"naming_series\" : \"SHT-\"\n \n\t})\n\n\t\texisting_data=frappe.get_all(\"Shift Schedule\",fields=[\"name\",\"attendance_date\",\"employee_name\",\"shift_time\"],filters= {\"attendance_date\": d[i][\"Day\"],\"employee_name\":d[i][\"Employee\"]})\n\t\tif len(existing_data)!=0:\n\t\t\tstore= d[i][\"Store\"]\n\t\t\tshift =d[i][\"Shift\"]\n\t\t\temp=d[i][\"Employee\"]\n\t\t\tattendance_date=d[i][\"Day\"]\n\t\t\tfrappe.db.sql(\"UPDATE `tabShift Schedule`set store=%s, shift_time=%s where employee_name=%s and attendance_date=%s\",(store,shift,emp,attendance_date))\n\t\t\taction= \"Updated\"\n\t\telse:\n\t\t\tdoc.insert()\n\t\t\tdoc.submit()\n\t\t\taction= \"Inserted\"\n\n\t\n\t\n\t\n\tif action:\n\t\tfrappe.msgprint(\"Record \"+action+\" Sucessfully\")\n\telse:\n\t\tfrappe.msgprint(\"Select from date and to date of schedule for section and set shift schedule to upload.\",raise_exception=1)\n\t\t\n\treturn \"Done\"\n\t\n\t\n\n\n\n\n\n\n\n@frappe.whitelist()\ndef load_existing_data(from_date,to_date,start_date):\n\t#frappe.msgprint(from_date,to_date,start_date)\n\t#fdate = frappe.utils.data.format_datetime (from_date, \"yyyyMMdd\")\n\t#tdate = frappe.utils.data.format_datetime (to_date, \"yyyyMMdd\")\n\t#sdate = frappe.utils.data.format_datetime (start_date, \"yyyyMMdd\")\n\t#frappe.msgprint(\"fdate \"+fdate+\" todate \"+tdate)\n\tmyt_sql=\"CALL getschedule(\"+from_date+\", \"+to_date+\", \"+start_date+\");\"\n\t#frappe.msgprint(str(myt_sql))\n\tlastweekdetails=frappe.db.sql(myt_sql, as_dict=1)\n\t#frappe.msgprint(lastweekdetails)\n\treturn lastweekdetails\n","sub_path":"bnd/bnd/doctype/shift_schedule_manual/shift_schedule_manual.py","file_name":"shift_schedule_manual.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"337884211","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author;鸿\n\nimport urllib\nimport importlib, sys\n\nimportlib.reload(sys)\nimport re\nimport jieba\nimport collections # 词频统计库\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfparser import PDFDocument\nfrom pdfminer.pdfdevice import PDFDevice\nfrom pdfminer.pdfinterp import PDFResourceManager\nfrom pdfminer.pdfinterp import PDFPageInterpreter\nfrom pdfminer.converter import PDFPageAggregator\nfrom pdfminer.layout import LTTextBoxHorizontal\nfrom pdfminer.layout import LAParams\nfrom pdfminer.pdfinterp import PDFTextExtractionNotAllowed\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\nplt.rcParams['font.sans-serif']=['SimHei']#中文显示\nplt.rcParams['axes.unicode_minus'] = False\ndef parse(DataIO, save_path):\n parser = PDFParser(DataIO)\n doc = PDFDocument()\n parser.set_document(doc)\n doc.set_parser(parser)\n doc.initialize()\n if not doc.is_extractable:\n raise PDFTextExtractionNotAllowed\n else:\n rsrcmagr = PDFResourceManager()\n laparams = LAParams()\n device = PDFPageAggregator(rsrcmagr, laparams=laparams)\n interpreter = PDFPageInterpreter(rsrcmagr, device)\n\n for page in doc.get_pages():\n interpreter.process_page(page)\n layout = device.get_result()\n for x in layout:\n try:\n if (isinstance(x, LTTextBoxHorizontal)):\n with open('%s' % (save_path), 'a') as f:\n result = x.get_text()\n f.write(result + \"\\n\")\n except:\n print(\"Failed\")\n\n\nif __name__ == '__main__':\n # 解析本地PDF文本,保存到本地TXT\n # with open(r'Java实战入门.pdf', 'rb') as pdf_html:\n # parse(pdf_html, r'Java实战入门.txt')\n with open('Java实战入门.txt','r',encoding='gbk') as f:\n mytext = f.readlines()\n # 文本预处理\n # pattern = re.compile(u'\\t|\\n|\\.|-|:|;|\\)|\\(|\\?|\"') # 定义正则表达式匹配模式\n mytext = re.findall('[\\u4e00-\\u9fa5]', str(mytext)) # 将符合模式的字符去除\n mytext_str = ''.join(mytext)\n # 文本分词\n seg_list_exact = jieba.cut(mytext_str, cut_all=False) # 精确模式分词\n object_list = []\n remove_words = [u'的', u',', u'和', u'是', u'随着', u'对于', u'对', u'等', u'能', u'都', u'。', u' ', u'、', u'中', u'在', u'了',\n u'通常', u'如果', u'我们', u'需要'] # 自定义去除词库\n\n for word in seg_list_exact: # 循环读出每个分词\n if word not in remove_words: # 如果不在去除词库中\n object_list.append(word) # 分词追加到列表\n\n object_list_str = ''.join(object_list)\n # print(object_list_str)\n # 词频统计\n word_counts = collections.Counter(object_list) # 对分词做词频统计\n word_counts_top15 = word_counts.most_common(15) # 获取前15最高频的词\n print(word_counts_top15) # 输出检查\n\n # mycloud = WordCloud(font_path='./fonts/simhei.ttf').generate(str(object_list))\n # plt.imshow(mycloud)\n # plt.axis('off') # 关闭词云图坐标显示\n # plt.savefig('out.jpg', dpi=1000, edgecolor='blue', bbox_inches='tight', quality=95) # 保存词云图(到工作路径下)\n # plt.show()\n\n","sub_path":"阶段二/2+pdf+wordcloud.py","file_name":"2+pdf+wordcloud.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"508061087","text":"#没有理解题意\nucnum = int(input())\nans = list()\nfor i in range(ucnum):\n num = int(input())\n strs = input().split()\n lists = [int(j) for j in strs]\n anslist = list()\n anslist.append(-1)\n for j in range(1,len(lists)):\n temp = lists[0:j]\n# print(\"operate:{}\".format(temp))\n if min(temp)>lists[j]:\n anslist.append(-1)\n continue\n temp.reverse()\n for t in temp:\n if t= len(out): break\n\n offset = 0\n\n for note, duration in notes:\n env = make_adsr(duration)\n sample = sample_scale[note][:len(env)] * env\n\n out[offset:offset + len(sample)] += sample\n offset += len(sample)\n\n return out\n\n\ndef write_mary():\n import aifc\n\n sample = make_mary()\n final_sample = (sample * (2 ** 15 - 1)).astype(dtype='>i2')\n\n with aifc.open('mary.caff', 'wb') as out:\n out.setframerate(44100)\n out.setnchannels(1)\n out.setsampwidth(2)\n\n out.writeframes(final_sample.tobytes())\n\n\ndef test_aiff():\n import aifc\n\n with aifc.open('mary.caff', 'rb') as fd:\n print(fd.getparams())\n\n\ndef main():\n import aifc\n\n # test_aiff()\n # return\n\n write_mary()\n return\n\n pa = pyaudio.PyAudio()\n\n # sample = shifted_sample(0.)\n sample = make_mary()\n\n # with aifc.open('test.caff', 'rb') as fd:\n # sample = np.frombuffer(fd.readframes(fd.getnframes()), dtype='>i2')\n # sample = sample.astype(np.float32) / (2 ** 15)\n # print(sample.shape)\n\n\n current_offset = 0\n\n def callback(in_data, frame_count, time_info, status):\n nonlocal current_offset\n start = current_offset\n current_offset = start + frame_count\n\n return (sample[start:current_offset], pyaudio.paContinue)\n\n stream = pa.open(\n format = pyaudio.paFloat32,\n channels = CHANNELS,\n rate = int(RATE / 2),\n stream_callback = callback,\n output = True,\n )\n\n stream.start_stream()\n\n while stream.is_active():\n time.sleep(0.1)\n\n stream.stop_stream()\n stream.close()\n\n pa.terminate()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/005_voice.py","file_name":"005_voice.py","file_ext":"py","file_size_in_byte":5114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"589679316","text":"import py_util\nfrom KBEDebug import *\n#import public_config\nfrom config import public_config\nimport math\nimport random\n\nclass MapDataMgr:\n\tdef __init__(self):\n\t\t#地图表\n\t\tself._map_data = _readXml('/data/xml/map_setting.xml', 'id_i')\n\n\t\tself.randomScene = {}\n\t\tfor k, v in pairs(self._map_data):\n\t\t\tif v['isRandom'] and v['isRandom'] == 1:\n\t\t\t\tINFO_MSG(\"MapDataMgr:initData random scene k=\" % (k))\n\n\t\t\tself.randomScene[k] = true\n\n\t\t# --读取所有的场景配置表\n\t\tself.load_space_data()\n\n\tdef load_space_data(self):\n\t\ttmp={}\n\t\tfn_prefix='/data/spaces/'\n\t\tbm_fn_prefix='/data/blockmap'\n\n\t\tfor map_id,data in self._map_data:\n\t\t\tif data['spaceName'] and data['spaceName']!='':\n\t\t\t\tfn=fn_prefix+data['spaceName']+'.xml'\n\n\t#获取一个map_id的原始map_id(map_id可能是原始map_id+分线数)\n\tdef GetSrcMapId(self,map_id):\n\t\ttmp_mid=str(map_id)\n\n\t\tif tmp_mid:\n\t\t\ttmp=tmp_mid.split('_')\n\t\t\tif tmp[0]:\n\t\t\t\treturn tmp[0]\n\n\t\treturn map_id\n\n\t#获取配置表的地图配置表的数据\n\tdef getMapCfgData(self,map_id):\n\t\treturn self._map_data[map_id]\n\n\tdef getMapData(self):\n\t\treturn self._map_data\n\n\t#获取一个entity在场景配置里的信息\n\tdef GetEntityCfgData(self,map_id,eid):\n\t\ttmp=self._map_data[map_id]\n\t\tif tmp:\n\t\t\ttmp2=tmp['entities']\n\t\t\tif tmp2:\n\t\t\t\treturn emp2[eid]\n\n\t#获取指定一个场景的Entity配置数据\n\tdef GetMapEntityCfgData(self,map_id):\n\t\ttmp=self._map_data[map_id]\n\t\tif tmp:\n\t\t\ttmp2=tmp['entities']\n\t\t\tif tmp2:\n\t\t\t\treturn tmp2\n\n\t#WB: world boss \n\tdef IsWBMap(self,map_id):\n\t\tid=self.GetSrcMapId(map_id)\n\t\tdata=self.getMapCfgData(id)\n\t\tif not data:\n\t\t\tERROR_MSG(\"MapDataMgr:IsWBMap map_id=\" % (map_id))\n\t\t\treturn False\n\t\treturn public_config.MAP_TYPE_WB==data['type']\n\n\t#判断玩家是否处于多人副本场景\n\tdef is_in_mpins(self,map_id):\n\t\tsrc_map_id=self.GetSrcMapId(map_id)\n\t\tdata=self._map_data[src_map_id]\n\t\tif data:\n\t\t\treturn data['type']==public_config.MAP_TYPE_MUTI_PLAYER_NOT_TEAM\n\t\treturn False\n\t#判断玩家是否处于普通场景\n\tdef is_in_normal_map(self,map_id):\n\t\tdata=self._map_data[map_id]\n\t\tif data:\n\t\t\treturn data['type']==public_config.MAP_TYPE_NORMAL\n\n\t\treturn False\n\n","sub_path":"scripts/common/data/map_data.py","file_name":"map_data.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"485016118","text":"from __future__ import print_function\n\n# standard library\nimport datetime\nimport json\nimport random\nimport string\nimport tempfile\n\n# 3rd party library\nimport boto3 # pre-installed in AWS Lambda environment\n\nS3_BUCKET_NAME = \"deep-security-logs\"\n\ndef create_s3_key_name(timestamp):\n \"\"\"\n Create an S3 key name based on the specified timestamp\n \"\"\"\n\n # generate a random string to avoid key name conflicts\n # from @mattgemmell at http://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python\n nonce = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(8))\n\n # make sure we have a unique key name, ideally full timestamp + nonce\n key_name = '{}-{}.txt'.format(str(timestamp), nonce)\n if type(timestamp) == type(datetime.datetime.now()):\n key_name = '{}-{}.txt'.format(timestamp.strftime(\"%Y/%m/%d/%H/%Y-%m-%d-%H-%M-%S-%f\"), nonce)\n\n return key_name\n\ndef write_event_to_s3_bucket(event, timestamp, bucket_name):\n \"\"\"\n Write the Deep Security JSON event to the specified S3 bucket\n \"\"\"\n result = None\n\n # get a unique key name based on the event's timestamp\n key_name = create_s3_key_name(timestamp)\n\n # convert the event to a string for storage\n event_str = None\n try:\n event_str = unicode(json.dumps(event)).encode(\"utf-8\")\n except Exception as err:\n print(\"Could not convert event to string for storage. Threw exception: {}\".format(err))\n\n if event_str:\n # create a temporary file in order to upload to S3\n tmp_file = tempfile.NamedTemporaryFile(delete=True)\n try:\n tmp_file.write(event_str)\n tmp_file.seek(0) # make sure the temporary file is readable\n\n s3 = boto3.client('s3')\n s3.upload_file(tmp_file.name, bucket_name, key_name)\n print(\"Wrote event to S3 as {}\".format(key_name))\n\n result = key_name\n except Exception as err:\n print(\"Could not write file to S3. Threw exception: {}\".format(err))\n finally:\n tmp_file.close() # clean up\n\n return result\n\ndef lambda_handler(event, context):\n \"\"\"\n Parse the incoming SNS notification for a Deep Security event\n \"\"\"\n timestamp_format = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n \n if type(event) == type({}):\n if event.has_key('Records'):\n print(\"Processing {} records\".format(len(event['Records'])))\n for i, record in enumerate(event['Records']):\n print(\"Record {}/{}\".format(i, len(event['Records'])))\n\n if record.has_key('Sns'):\n timestamp = datetime.datetime.now()\n time_received = record['Sns']['Timestamp'] if record['Sns'].has_key('Timestamp') else None\n if time_received: \n try:\n timestamp = datetime.datetime.strptime(time_received, timestamp_format)\n except: pass # we can silently fail and try to catch later\n\n if record['Sns'].has_key('Message'):\n record_docs = json.loads(record['Sns']['Message'])\n\n # some versions of this feature send single events instead of an array\n if type(record_docs) == type({}): record_docs = [record_docs]\n\n for record_doc in record_docs:\n if record_doc.has_key('LogDate'):\n # LogDate is the actually timestamp of the event. We need a timestamp for the\n # event and the order of preference is:\n # 1. LogDate\n # 2. Time received by Amazon SNS\n # 3. Time processed by AWS Lambda\n #\n # When both LogDate and time received by Amazon SNS are present, we'll also\n # calculate the delivery delay and record that with the event as 'DeliveryDelay'\n time_generated = record_doc['LogDate']\n try:\n tg = datetime.datetime.strptime(time_generated, timestamp_format)\n timestamp = tg # update the timestamp to the actual event time instead of the time is was received\n tr = datetime.datetime.strptime(time_received, timestamp_format)\n d = tr - tg\n print(\"Event delivery delay: {}\".format(d))\n record_doc['DeliveryDelay'] = '{}'.format(d)\n except Exception as err:\n print(err)\n\n save_result = write_event_to_s3_bucket(event=record_doc, timestamp=timestamp, bucket_name=S3_BUCKET_NAME)\n if save_result:\n print(\"Wrote event to S3: {}\".format(save_result))\n else:\n print(\"Could not write event to S3\")\n else:\n # in case of failure, simply output the log to CloudWatch Logs\n print(\"Received event: \" + json.dumps(event, indent=2))\n \n return True","sub_path":"lambda-save-ds-event-to-s3.py","file_name":"lambda-save-ds-event-to-s3.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"645194307","text":"# -*- coding: utf-8 -*-\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.html import format_html\n\nfrom apps.clients.models import Clientes\nfrom apps.products.models import Productos\nfrom apps.sedes.models import Sedes\n\n\nclass Compras(models.Model):\n id_cliente = models.ForeignKey(\n Clientes,\n verbose_name=(_(u\"Cliente\")),\n db_column='id_cliente',\n blank=True,\n null=True\n )\n id_producto = models.ForeignKey(\n Productos,\n verbose_name=(_(u\"Producto\")),\n db_column='id_producto',\n blank=True,\n null=True\n )\n id_sede = models.ForeignKey(\n Sedes,\n verbose_name=(_(u\"Sede\")),\n db_column='id_sede',\n blank=True,\n null=True\n )\n precio = models.IntegerField(\n verbose_name=(_(u\"Precio del producto\")),\n blank=True,\n null=True\n )\n descripcion = models.TextField(\n verbose_name=(_(u\"Descripción\")),\n blank=True,\n null=True\n )\n fecha = models.DateTimeField(\n verbose_name=(_(u\"Fecha de compra\")),\n blank=True,\n null=True\n )\n\n def _clientdetails(self):\n\n if self.id_cliente:\n text = '%s' % (settings.WEBPAGE, self.id_cliente.id, self.id_cliente.nombres)\n else:\n text = \"\"\n return format_html(text)\n\n _clientdetails.short_description = (_(u\"Cliente\"))\n _clientdetails.allow_tags = True\n clientdetails = property(_clientdetails)\n\n\n def _costproduct(self):\n cost = self.id_producto.precio if not self.precio and self.id_producto else self.precio\n return cost\n\n _costproduct.short_description = (_(u\"Precio del producto\"))\n costproduct = property(_costproduct)\n\n def _productdetails(self):\n\n if self.id_producto:\n text = '%s' % (settings.WEBPAGE, self.id_producto.id, self.id_producto.producto)\n else:\n text = \"\"\n return format_html(text)\n\n _productdetails.short_description = (_(u\"Producto comprado\"))\n _productdetails.allow_tags = True\n productdetails = property(_productdetails)\n\n def _sedesdetails(self):\n if self.id_sede:\n text = '%s' % (settings.WEBPAGE, self.id_sede.id, self.id_sede.sede)\n else:\n text = \"\"\n return format_html(text)\n\n _sedesdetails.short_description = (_(u\"Cliente\"))\n _sedesdetails.allow_tags = True\n sedesdetails = property(_sedesdetails)\n\n class Meta:\n verbose_name = (_(\"Compra\"))\n verbose_name_plural = (_(\"Compras\"))\n db_table = 'compras'\n\n","sub_path":"wsgi/apps/compras/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"266189930","text":"import clr\nimport re\nfrom System.IO import File, Directory\n\nclr.AddReference('RevitAPI')\nfrom Autodesk.Revit.DB import *\n\nclr.AddReference('RevitServices')\nimport RevitServices\nfrom RevitServices.Persistence import DocumentManager\nfrom RevitServices.Transactions import TransactionManager\n\nclr.AddReference('RevitNodes')\nimport Revit\nclr.ImportExtensions(Revit.Elements)\n\ndoc = DocumentManager.Instance.CurrentDBDocument\napp = doc.Application\n\ndocPath = doc.PathName\ndocPath = re.findall(r\"^.*\\\\\", docPath)[0]\n\ncategories = IN[0]\nif not isinstance(categories, list):\n categories = [categories]\nsaveBool = IN[1]\noutList = []\n\ncollector = FilteredElementCollector(doc).OfClass(Family)\nfor category in categories:\n elements = []\n for family in collector:\n if family.FamilyCategoryId.ToString() == category.Id.ToString():\n elements.append(family)\n\n famDirectory = docPath + category.Name + \"\\\\\"\n if saveBool:\n if not Directory.Exists(famDirectory):\n Directory.CreateDirectory(famDirectory)\n\n for element in elements:\n eName = element.Name\n famPath = famDirectory + eName + \".rfa\"\n famPathBackup = famDirectory + eName + \".0001.rfa\"\n if saveBool:\n try:\n if not Directory.Exists(famDirectory):\n Directory.CreateDirectory(famDirectory)\n except:\n outList.append(\"Could not create directory\")\n try:\n if File.Exists(famPath):\n File.Delete(famPath)\n famDoc = doc.EditFamily(element)\n famDoc.SaveAs(famPath)\n famDoc.Close(False)\n outList.append(famPath)\n if File.Exists(famPathBackup):\n File.Delete(famPathBackup)\n except:\n outList.append(\"Export Error: \" + eName)\n else:\n outList.append(\"Test: \" + famPath)\n\nOUT = outList\n","sub_path":"SaveFamiliesOfCategory.py","file_name":"SaveFamiliesOfCategory.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"646762592","text":"# The Global Secondary Index of OnHand table is wrong\n# its set to `by_supplier_id_and_product_id`\n# it has to be only `by_supplier_id` as `product_id` attribute was removed from the model\n# this script creates the new GSI and deletes the only one, non-destructively\n\nimport boto3\nimport sys\nimport os\nimport shlex\nfrom subprocess import Popen, PIPE\nimport time\nfrom botocore.exceptions import ClientError\n\n# Helper functions #\n\ndef get_exitcode_stdout_stderr(cmd):\n \"\"\"\n Execute the external command and get its exitcode, stdout and stderr.\n \"\"\"\n args = shlex.split(cmd)\n\n proc = Popen(args, stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n exitcode = proc.returncode\n\n return exitcode, out, err\n\n# -------------------- #\n\n\ndef create_dist_table(dynamo_client, table_name):\n resp = dynamo_client.create_table(\n AttributeDefinitions=[\n {\n 'AttributeName': 'EntityID',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'Version',\n 'AttributeType': 'S'\n }\n ],\n TableName=table_name,\n KeySchema=[\n {\n 'AttributeName': 'EntityID',\n 'KeyType': 'HASH'\n },\n {\n 'AttributeName': 'Version',\n 'KeyType': 'RANGE'\n },\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 2,\n 'WriteCapacityUnits': 2\n }\n )\n return resp\n\n\ndef wait_till_creation(dynamo_client, table_name):\n for _ in range(90):\n try:\n resp = dynamo_client.describe_table(\n TableName=table_name,\n )\n if resp['Table']['TableStatus'] == 'ACTIVE':\n break\n except ClientError as ex:\n if ex.response['Error']['Code'] != 'ResourceNotFoundException':\n raise ex\n\n time.sleep(10)\n\n\nargs = sys.argv\nif len(args) >= 3:\n region = args[1]\n stage = args[2]\n os.environ['STAGE'] = stage\n\n dynamodb = boto3.client('dynamodb', region_name=region)\n\n print('Create brewoptix-distributors table')\n create_dist_table(dynamodb, 'brewoptix-distributors')\n wait_till_creation(dynamodb, 'brewoptix-distributors')\n\n print(\"creating GSI: by_distributor_id_and_order_date\")\n dynamodb.update_table(\n AttributeDefinitions=[\n {\n 'AttributeName': 'distributor_id',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'order_date',\n 'AttributeType': 'N'\n }\n ],\n TableName='brewoptix-purchase-orders',\n GlobalSecondaryIndexUpdates=[\n {\n 'Create': {\n 'IndexName': 'by_distributor_id_and_order_date',\n 'KeySchema': [\n {\n 'AttributeName': 'distributor_id',\n 'KeyType': 'HASH'\n },\n {\n 'AttributeName': 'order_date',\n 'KeyType': 'RANGE'\n }\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 2,\n 'WriteCapacityUnits': 2\n }\n }\n }\n ]\n )\n time.sleep(300)\n print(\"creating GSI: by_distributor_id_and_pack_date\")\n dynamodb.update_table(\n AttributeDefinitions=[\n {\n 'AttributeName': 'distributor_id',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'pack_date',\n 'AttributeType': 'N'\n }\n ],\n TableName='brewoptix-purchase-orders',\n GlobalSecondaryIndexUpdates=[\n {\n 'Create': {\n 'IndexName': 'by_distributor_id_and_pack_date',\n 'KeySchema': [\n {\n 'AttributeName': 'distributor_id',\n 'KeyType': 'HASH'\n },\n {\n 'AttributeName': 'pack_date',\n 'KeyType': 'RANGE'\n }\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 2,\n 'WriteCapacityUnits': 2\n }\n }\n }\n ]\n )\n time.sleep(300)\n print(\"creating GSI: by_distributor_id_and_ship_date\")\n dynamodb.update_table(\n AttributeDefinitions=[\n {\n 'AttributeName': 'distributor_id',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'ship_date',\n 'AttributeType': 'N'\n }\n ],\n TableName='brewoptix-purchase-orders',\n GlobalSecondaryIndexUpdates=[\n {\n 'Create': {\n 'IndexName': 'by_distributor_id_and_ship_date',\n 'KeySchema': [\n {\n 'AttributeName': 'distributor_id',\n 'KeyType': 'HASH'\n },\n {\n 'AttributeName': 'ship_date',\n 'KeyType': 'RANGE'\n }\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 2,\n 'WriteCapacityUnits': 2\n }\n }\n }\n ]\n )\n\n","sub_path":"deployment_scripts/9_BREWAPI_102/create_multiple_gsi.py","file_name":"create_multiple_gsi.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"62709072","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#Copyright (c) 2011, MARIMORE Inc Tokyo, Japan.\n#Contributed by \n# Mohd Kamal Bin Mustafa \n#All rights reserved.\n#\n#Redistribution and use in source and binary forms, with or without modification, \n#are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice, \n# this list of conditions and the following disclaimer in the documentation \n# and/or other materials provided with the distribution.\n# * Neither the name of the MARIMORE Inc nor the names of its contributors \n# may be used to endorse or promote products derived from this software \n# without specific prior written permission.\n#\n#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \n#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, \n#THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE \n#ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \n#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \n#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; \n#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON \n#ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \n#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \n#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nThis module provide partial implementation of the Flickr API using Oauth to\nauthenticate and authorize the API call.\n\nInitial reference from:-\n\n * http://mkelsey.com/2011/07/03/Flickr-oAuth-Python-Example.html\n\"\"\"\n\nimport time\nimport sys\nimport mimetools\nimport os\nimport json\n\nfrom xml.etree import ElementTree as ET\nfrom urlparse import parse_qs, parse_qsl\n\nimport oauth2 as oauth\nimport requests\n\nAPI_URL = \"http://api.flickr.com/services/rest\"\nUPLOAD_URL = \"http://api.flickr.com/services/upload/\"\nREQUEST_TOKEN_URL = \"http://www.flickr.com/services/oauth/request_token\"\nUSER_AUTHORIZATION_URL = \"http://www.flickr.com/services/oauth/authorize\"\nACCESS_TOKEN_URL = \"http://www.flickr.com/services/oauth/access_token\"\n\nclass FlickrError(Exception):\n pass\n\nclass PhotosetNotFound(Exception):\n pass\n\nclass FlickrAPI(object):\n def __init__(self, api_key, api_secret, token_key=None, token_secret=None):\n self.api_key = api_key\n self.api_secret = api_secret\n self.token_key = token_key\n self.token_secret = token_secret\n\n def sign_request(self, url, parameters, method='GET', token=None):\n consumer = oauth.Consumer(key=self.api_key, secret=self.api_secret)\n\n defaults = {\n 'oauth_timestamp': str(int(time.time())),\n 'oauth_nonce': oauth.generate_nonce(),\n 'oauth_consumer_key': consumer.key,\n }\n\n if self.token_key and self.token_secret and not token:\n token = oauth.Token(self.token_key, self.token_secret)\n defaults['oauth_token'] = token.key\n\n if token:\n defaults['oauth_token'] = token.key\n\n defaults.update(parameters)\n\n req = oauth.Request(method=method, url=url, parameters=defaults)\n signature_method = oauth.SignatureMethod_HMAC_SHA1()\n req.sign_request(signature_method, consumer, token)\n\n return req\n\n def get_request_token(self, callback_url=None):\n parameters = {\n 'oauth_callback': callback_url,\n }\n\n req = self.sign_request(REQUEST_TOKEN_URL, parameters)\n resp = requests.get(req.to_url())\n\n if resp.status_code == 200:\n resp_params = parse_qs(resp.content)\n oauth_token = resp_params['oauth_token'][0]\n oauth_token_secret = resp_params['oauth_token_secret'][0]\n return oauth_token, oauth_token_secret\n\n raise FlickrError(resp.content)\n\n def get_access_token(self, request_token, request_token_secret, oauth_verifier):\n \"\"\"Exchange `request_token` for an access token.\n\n `request_token` and `request_token_secret` should be the value\n returned by `get_request_token` method.\n \"\"\"\n parameters = {\n 'oauth_token': request_token,\n 'oauth_verifier': oauth_verifier,\n }\n token = oauth.Token(request_token, request_token_secret)\n token.set_verifier(oauth_verifier)\n\n req = self.sign_request(ACCESS_TOKEN_URL, parameters, token=token)\n resp = requests.get(req.to_url())\n\n if resp.status_code == 200:\n resp_params = dict(parse_qsl(resp.content))\n oauth_token = resp_params['oauth_token']\n oauth_token_secret = resp_params['oauth_token_secret']\n return oauth_token, oauth_token_secret\n\n raise FlickrError(resp.content)\n\n def get_sizes(self, photo_id, size='Medium'):\n params = {\n 'method':'flickr.photos.getSizes',\n 'photo_id': photo_id,\n }\n\n req = self.sign_request(API_URL, params)\n resp = requests.get(req.to_url())\n\n xml = ET.XML(resp.content)\n for element in xml[0]:\n if element.attrib['label'] == size:\n photo_url = element.attrib['source']\n return photo_url\n\n return None\n\n def upload_photo(self, inputfile, **kwargs):\n parameters = kwargs\n req = self.sign_request(UPLOAD_URL, parameters, method='POST')\n headers = req.to_header()\n\n files = {\n 'photo': open(inputfile, 'rb'),\n }\n resp = requests.post(UPLOAD_URL, data=kwargs, files=files, headers=headers)\n\n xml = ET.XML(resp.content)\n if xml.attrib['stat'] == 'ok':\n class Photo(object): pass\n photo = Photo()\n photo.id = xml[0].text\n return photo\n\n return None\n\n def delete_photo(self, photo_id, **kwargs):\n params = {\n 'method':'flickr.photos.delete',\n 'photo_id': photo_id,\n 'format': 'json',\n 'nojsoncallback': '1',\n }\n\n req = self.sign_request(API_URL, params, method='POST')\n headers = req.to_header()\n resp = requests.post(API_URL, data=params, headers=headers)\n\n json_obj = json.loads(resp.content)\n if json_obj:\n stat = json_obj.get('stat', None)\n code = json_obj.get('code', None)\n if stat == 'ok':\n return True\n else:\n raise FlickrError(\"stat:%s, code: %s\" % (stat, code))\n\n raise FlickrError(resp.content)\n\n \n def add_to_photoset(self, photoset_id, photo_id):\n params = {\n 'method': 'flickr.photosets.addPhoto',\n 'photoset_id': photoset_id,\n 'photo_id': photo_id,\n 'format': 'json',\n 'nojsoncallback': '1',\n }\n \n req = self.sign_request(API_URL, params, method='POST')\n headers = req.to_header()\n resp = requests.post(API_URL, data=params, headers=headers)\n \n json_obj = json.loads(resp.content)\n if json_obj:\n stat = json_obj.get('stat', None)\n code = json_obj.get('code', None)\n message = json_obj.get('message', None)\n \n # Check for Photoset Not Found\n if code == 1:\n raise PhotosetNotFound(stat)\n\n if stat == 'ok':\n return True\n else:\n raise FlickrError(\"stat: %s, code: %s, message: %s\" % (stat, code, message))\n \n raise FlickrError(resp.content)\n\n def get_photoset_info(self, photoset_id):\n params = {\n 'method': 'flickr.photosets.getInfo',\n 'photoset_id': photoset_id,\n 'format': 'json',\n 'nojsoncallback': '1',\n }\n\n req = self.sign_request(API_URL, params, method='POST')\n headers = req.to_header()\n resp = requests.post(API_URL, data=params, headers=headers)\n \n json_obj = json.loads(resp.content)\n if json_obj:\n stat = json_obj.get('stat', None)\n code = json_obj.get('code', None)\n message = json_obj.get('message', None)\n if stat == 'ok':\n return True\n return False\n\n def create_photoset(self, title, description, primary_photo_id):\n params = {\n 'method': 'flickr.photosets.create',\n 'title': title,\n 'description': description,\n 'primary_photo_id': primary_photo_id,\n 'format': 'json',\n 'nojsoncallback': '1',\n }\n\n req = self.sign_request(API_URL, params, method='POST')\n headers = req.to_header()\n resp = requests.post(API_URL, data=params, headers=headers)\n json_obj = json.loads(resp.content)\n if json_obj:\n stat = json_obj.get('stat', None)\n code = json_obj.get('code', None)\n message = json_obj.get('message', None)\n if stat == 'ok':\n photoset = json_obj.get('photoset', None)\n return photoset.get('id')\n else:\n raise FlickrError(\"stat: %s, code: %s, message: %s\" % (stat, code, message))\n raise FlickrError(resp.content)\n\n def list_photoset(self, page=None, per_page=None):\n params = {\n 'method': 'flickr.photosets.getList',\n 'format': 'json',\n 'nojsoncallback': '1',\n }\n\n req = self.sign_request(API_URL, params, method='GET')\n resp = requests.get(req.to_url())\n \n json_obj = json.loads(resp.content)\n if json_obj:\n stat = json_obj.get('stat', None)\n code = json_obj.get('code', None)\n message = json_obj.get('message', None)\n if stat == 'ok':\n return json_obj\n return None\n","sub_path":"pyflickr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"129296210","text":"# -*- coding: utf-8 -*-\n\nL = [\n ['Apple', 'Google', 'Microsoft'],\n ['Java', 'Python', 'Ruby', 'PHP'],\n ['Adam', 'Bart', 'Lisa']\n]\n\n# ´ňÓˇApple:\nprint(L[0][0])\n# ´ňÓˇPython:\nprint(L[1][1])\n# ´ňÓˇLisa:\nprint(L[2][2])","sub_path":"Basic/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"625815569","text":"import pandas as pd\n\n# number of nodes (max: 50)\nn = 50\n\n# read network data\ndf_US_airport_ranking = pd.read_csv(\n '../data/US_Airport_Ranking.csv').head(n)\ndf_US_airport_iata = pd.read_csv('./data/US_Airport_IATA.csv')\ndf_route = pd.read_csv('../data/route.csv')\n\n# link Airport Name and IATA in df_US_airport_ranking\nfor index, row in df_US_airport_ranking.iterrows():\n df_US_airport_ranking.at[index, 'IATA'] = df_US_airport_iata[df_US_airport_iata['Airport Name']\n == row['Airport Name']]['IATA'].values[0]\n\n# extract route data according to Source IATA\ndf_route_source = pd.DataFrame()\nfor index, row in df_US_airport_ranking.iterrows():\n df_route_source = pd.concat(\n [df_route_source, df_route[df_route[\"Source airport\"] == str(row[\"IATA\"])]])\n\n# extract route data according to Destination IATA\ndf_route_source_dest = pd.DataFrame()\nfor index, row in df_US_airport_ranking.iterrows():\n df_route_source_dest = pd.concat(\n [df_route_source_dest, df_route_source[df_route_source[\"Destination airport\"] == str(row[\"IATA\"])]])\n\n# create adjecent matrix according to flights\ndf_ad_matrix = pd.DataFrame(\n index=df_US_airport_ranking['IATA'].values, columns=df_US_airport_ranking['IATA'].values)\ndf_ad_matrix = df_ad_matrix.fillna(0)\nfor index, row in df_route_source_dest.iterrows():\n df_ad_matrix.at[row['Source airport'], row['Destination airport']] += 1\n\n# create weight adjecent matrix according to passengers\nfor index, row in df_ad_matrix.iterrows():\n df_ad_matrix[index] = df_ad_matrix[index].values * \\\n df_US_airport_ranking[df_US_airport_ranking['IATA']\n == index]['Passenger'].values[0]\n\n# normalization of adjecent matrix\ndf_ad_matrix = df_ad_matrix.T / df_ad_matrix.values.max()\ndf_ad_matrix.to_csv('./data/US_Airport_Ad_Matrix.csv')\n","sub_path":"create-sis-network/create_sis_network.py","file_name":"create_sis_network.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"532437223","text":"import database.connect \r\n\r\ndef select_push_target(registration_table,\r\n\t\t\t\t\t\tresults_table,\r\n\t\t\t\t\t\tsuccess_data_path):\r\n\t\"\"\"\r\n\tThis method is to inform job pusher which targets \r\n\tto push, which need meet three requirements:\r\n\t1. status is job_success\r\n\t2. job files (.log and .inp) located as expected\r\n\t3. results table doesn't have this job at\r\n\t that level of theory\r\n\tReturns a list of targets with necessary meta data\r\n\t\"\"\"\r\n\treg_query = {\"status\":\"job_success\"}\r\n\ttargets = list(registration_table.find(reg_query))\r\n\r\n\tselected_targets = []\r\n\tfor target in targets:\r\n\t\taug_inchi = str(target['aug_inchi'])\r\n\t\tspec_name = aug_inchi.replace('/', '_slash_')\r\n\t\tspec_path = os.path.join(success_data_path, spec_name)\r\n\t\tlog_path = os.path.join(spec_path, 'input.log')\r\n\t\tinp_path = os.path.join(spec_path, 'input.inp')\r\n\t\tif os.path.exists(log_path) and os.path.exists(inp_path):\r\n\t\t\tlevel_of_theory = autoqm.utils.get_level_of_theory(inp_path)\r\n\r\n\t\t\t# query results table\r\n\t\t\tres_query = {\"aug_inchi\":aug_inchi, \r\n\t\t\t\t\t\t\"level_of_theory\":level_of_theory}\r\n\t\t\tres_entries = list(results_table.find(res_query))\r\n\t\t\tif len(res_entries) == 0:\r\n\t\t\t\t# means no records of this target\r\n\t\t\t\t# in results table\r\n\t\t\t\tselected_targets.append(target)\r\n\treturn selected_targets","sub_path":"database/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"253302944","text":"from vbp.views import DISPLAY_LENGTH\nfrom django.shortcuts import render, HttpResponse\nfrom .models import Company, Drug, Sales, CURRENT_YEAR\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Sum, Q\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.core import serializers\nimport json\n\n@login_required\ndef index(request):\n TOP_N = 10\n companies = Company.objects.all()\n try:\n companies_ranked = sorted(\n companies, key=lambda x: x.latest_annual_netsales, reverse=True\n )[:TOP_N] # 按最新年份销售由高到低排序\n except:\n companies_ranked = None\n sales_ranked = Sales.objects.filter(year=CURRENT_YEAR).order_by(\"-netsales_value\")[:TOP_N] \n # drugs_ranked = sorted(drugs, key=lambda x: x.annual_netsales, reverse=True) # 按最新年份销售由高到低排序\n context = {\n \"companies_ranked\": companies_ranked,\n \"sales_ranked\": sales_ranked,\n \"CURRENT_YEAR\": CURRENT_YEAR,\n }\n return render(request, \"rdpac/index.html\", context)\n\n\n@login_required\ndef drug_detail(request, drug_id):\n drug = Drug.objects.get(pk=drug_id)\n sales = drug.sales.all()\n context = {\n \"drug\": drug,\n \"sales\": sales,\n }\n return render(request, \"rdpac/drug_detail.html\", context)\n\n\n@login_required\ndef company(request):\n pass\n\n\n@login_required\ndef drug(request):\n pass\n\n\n@login_required\ndef company_detail(request, company_id):\n company = Company.objects.get(pk=company_id)\n \n context = {\n \"company\": company,\n }\n return render(request, \"rdpac/company_detail.html\", context)\n\n\n@login_required\ndef search(request, kw):\n print(kw)\n # kw = request.POST.get(\"kw\")\n company_result = Company.objects.filter(\n Q(name_en__icontains=kw) # 搜索公司英文名\n | Q(name_cn__icontains=kw) # 搜索公司中文名\n | Q(abbr__icontains=kw) # 搜索公司简称\n ).distinct()\n\n # 下方两行代码为了克服MSSQL数据库和Django pagination在distinc(),order_by()等queryset时出现重复对象的bug\n sr_ids = [company.id for company in company_result]\n company_result2 = Company.objects.filter(id__in=sr_ids)\n\n drug_result = Drug.objects.filter(\n Q(molecule_en__icontains=kw) # 搜索药品英文通用名\n | Q(molecule_cn__icontains=kw) # 搜索药品中文通用名\n | Q(product_name_en__icontains=kw) # 搜索药品英文产品名\n | Q(product_name_cn__icontains=kw) # 搜索药品中文产品名\n )\n\n # 下方两行代码为了克服MSSQL数据库和Django pagination在distinc(),order_by()等queryset时出现重复对象的bug\n sr_ids = [drug.id for drug in drug_result]\n drug_result2 = Drug.objects.filter(id__in=sr_ids)\n\n objs = list(company_result2) + list(drug_result2)\n try:\n data = serializers.serialize(\"json\", objs, ensure_ascii=False)\n res = {\n \"data\": data,\n \"code\": 200,\n }\n print(objs)\n except Exception as e:\n res = {\n \"errMsg\": e,\n \"code\": 0,\n }\n return HttpResponse(\n json.dumps(res, ensure_ascii=False),\n content_type=\"application/json charset=utf-8\",\n )\n \n","sub_path":"rdpac/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"478394526","text":"import numpy as np\r\nimport agent as a\r\nimport time\r\n\r\ngen_cnt = 0\r\ngen_avg = []\r\nstart = 0\r\n\r\ndef return_stime():\r\n global start\r\n return start\r\n\r\ndef set_stime():\r\n global start\r\n start = time.time()\r\n\r\ndef set_gencnt(val):\r\n global gen_cnt\r\n gen_cnt = val\r\n\r\ndef gen_cnt_val():\r\n global gen_cnt\r\n return gen_cnt\r\n\r\ndef gen_avg_val():\r\n global gen_avg\r\n return gen_avg\r\n\r\ndef add_gen_avg(val):\r\n global gen_avg\r\n gen_avg.append(val)\r\n\r\ndef readp_size():\r\n with open(\"SETTINGS.txt\") as f:\r\n for i in range(3):\r\n f.readline()\r\n popstr = f.readline()\r\n return int(popstr[16:])\r\n\r\ndef readm_rate():\r\n with open(\"SETTINGS.txt\") as f:\r\n for i in range(2):\r\n f.readline()\r\n mutstr = f.readline()\r\n return float(mutstr[14:]) / 100\r\n\r\ndef write_result(trialnum, found):\r\n with open(\"RESULT.txt\", 'a') as f:\r\n f.write(\"TRIAL \" + str(trialnum) + '\\n')\r\n f.write(\"GENS: \" + str(gen_cnt + 1) + '\\n')\r\n f.write(\"FOUND: \")\r\n if found:\r\n f.write(\"True\\n\")\r\n else:\r\n f.write(\"False\\n\")\r\n end = time.time()\r\n f.write(\"EX_TIME: \" + str(end - return_stime()) + 's' + '\\n')\r\n for i in range(gen_cnt):\r\n f.write(\"gen\" + str(i + 1) + \" avg: \" + str(gen_avg[i]) + '\\n')\r\n if gen_cnt == 0:\r\n f.write(\"gen\" + '1' + \" avg: \" + str(gen_avg[0]) + '\\n')\r\n f.write('\\n')\r\n set_stime()\r\n\r\npopulation_size = readp_size()\r\nmutation_rate = readm_rate()\r\n\r\ndef sigmoid(x):\r\n return 1 / (1 + np.exp(-x))\r\n\r\nclass NeuralNetwork:\r\n\r\n def __init__(self, L1 = np.array([]), L2 = np.array([])):\r\n self.W1 = np.array([])\r\n self.W2 = np.array([])\r\n if L1.size == 0 or L2.size == 0:\r\n self.W1 = np.random.uniform(-5, 5, (4, 4))\r\n self.W2 = np.random.uniform(-5, 5, 4)\r\n else:\r\n self.W1 = L1\r\n self.W2 = L2\r\n def calc(self, inputs):\r\n Z = np.array(inputs)\r\n Z = Z.dot(self.W1)\r\n Z = sigmoid(Z)\r\n Z = Z.dot(self.W2)\r\n return Z\r\n\r\ndef next_gen(agents):\r\n \r\n global gen_cnt\r\n global gen_avg\r\n gen_cnt += 1\r\n\r\n new_gen = []\r\n fitness_sum = 0\r\n max_index = 0\r\n avg_score = 0\r\n for i in range(len(agents)):\r\n if agents[i].fitness > agents[max_index].fitness:\r\n max_index = i\r\n avg_score += agents[i].fitness\r\n agents[i].fitness = agents[i].fitness ** 2\r\n fitness_sum += agents[i].fitness\r\n\r\n avg_score /= len(agents)\r\n gen_avg.append(avg_score)\r\n\r\n for agent in agents:\r\n agent.fitness /= fitness_sum\r\n new_gen.append(agents[max_index])\r\n print(\"max fitness: \", new_gen[0].fitness, \" gen: \", gen_cnt, end='')\r\n print('\\n')\r\n\r\n for i in range(len(agents) - 1):\r\n parent = pick_one(agents)\r\n new_agent = a.Bird(parent.nn.W1, parent.nn.W2)\r\n new_agent.color = parent.color\r\n mutate(new_agent)\r\n new_gen.append(new_agent)\r\n return new_gen\r\n\r\ndef mutate(agent):\r\n\r\n for val in agent.color:\r\n num = np.random.uniform(0, 1)\r\n if num <= mutation_rate:\r\n val = np.random.uniform(0, 255)\r\n\r\n\r\n for weight_row in agent.nn.W1:\r\n for weight in weight_row:\r\n num = np.random.uniform(0, 1)\r\n if num <= mutation_rate:\r\n weight += np.random.uniform(-0.1, 0.1)\r\n\r\n for weight in agent.nn.W2:\r\n num = np.random.uniform(0, 1)\r\n if num <= mutation_rate:\r\n weight += np.random.uniform(-0.1, 0.1)\r\n\r\ndef pick_one(agents):\r\n num = np.random.uniform(0, 1)\r\n index = 0\r\n while num > 0:\r\n num -= agents[index].fitness\r\n index += 1\r\n index -= 1\r\n return agents[index]\r\n\r\ndef random_agents():\r\n agents = []\r\n for i in range(population_size):\r\n new_agent = a.Bird()\r\n agents.append(new_agent)\r\n return agents\r\n","sub_path":"AI.py","file_name":"AI.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"341288638","text":"from django.conf.urls import url\nfrom . import views # views.py .. def() in Current {DIR}\n\nurlpatterns = [\n url(r'^$', views.indexBlog, name='indexBlog'),\n url(r'^index/$', views.indexBlog, name='indexBlog'),\n url(r'^format/$', views.format, name='templates'),\n\n url(r'^post/$', views.post_list, name='post_list'),\n url(r'^post/(?P[0-9]+)/$', views.post_detail, name='post_detail'),\n url(r'^post/new/$', views.post_new, name='post_new'),\n url(r'^post/(?P[0-9]+)/edit/$', views.post_edit, name='post_edit'),\n\n url(r'^test/$', views.test, name='test'),\n url(r'^eggs/$', views.eggs, name='eggs'),\n url(r'^bracket/$', views.bracket, name='bracket'),\n\n\n]\n\n# url(regEX' ', (views.py - def name), name='alt')\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"61948404","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n\n Copyright 2021 Recurve Analytics, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\"\"\"\nimport os\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\nfrom .settings import ACC_COMPONENTS_ELECTRICITY, ACC_COMPONENTS_GAS, database_location\n\n__all__ = (\n \"get_db_connection\",\n \"get_deer_load_shape\",\n \"get_filtered_acc_elec\",\n \"get_filtered_acc_gas\",\n)\n\n\ndef get_db_connection(database_year=\"2020\"):\n \"\"\"Get the db connection for a given year\n\n Parameters\n ----------\n database_year: str\n The year corresponding to the database that contains the avoided costs data.\n Requires that year's database to have already been downloaded\n using the `flexvalue downloaded-avoided-costs-data-db --year 20XX` command.\n\n Returns\n -------\n sqlalchemy.engine.Engine\n \"\"\"\n full_db_path = os.path.join(database_location(), f\"{database_year}.db\")\n if not os.path.exists(full_db_path):\n raise ValueError(f\"Can not find SQLite file at this path: {full_db_path}\")\n database_url = f\"sqlite:///{full_db_path}\"\n return create_engine(database_url)\n\n\ndef execute_query(database_year, query):\n \"\"\"Execute arbitrary query on the avoided costs db\n\n Parameters\n ----------\n database_year: str\n The year corresponding to the database that contains the avoided costs data.\n Requires that year's database to have already been downloaded\n using the `flexvalue downloaded-avoided-costs-data-db --year 20XX` command.\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n con = get_db_connection(database_year=database_year)\n return pd.read_sql(query, con=con).drop(\"local_pkid_\", axis=1)\n\n\ndef get_deer_load_shape(database_year):\n \"\"\"Returns all of the deer load shape 8760 load profiles\n\n Parameters\n ----------\n database_year: str\n The year corresponding to the database that contains the avoided costs data.\n Requires that year's database to have already been downloaded\n using the `flexvalue downloaded-avoided-costs-data-db --year 20XX` command.\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n con = get_db_connection(database_year=database_year)\n return pd.read_sql_table(\"deer_load_shapes\", con=con).set_index(\"hour_of_year\")\n\n\ndef get_filtered_acc_elec(database_year, utility, climate_zone, start_year, end_year):\n \"\"\"Returns the electricity avoided costs data\n\n Parameters\n ----------\n database_year: str\n The year corresponding to the database that contains the avoided costs data.\n Requires that year's database to have already been downloaded\n using the `flexvalue downloaded-avoided-costs-data-db --year 20XX` command.\n utility: str\n Which uility to filter by when loading avoided costs data\n climate_zone: str\n Which climate zone to filter by when loading avoided costs data\n start_year: int\n Which year to start the filter of avoided costs data\n end_year: int\n Which year to end the filter of avoided costs data\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n columns = [\n \"year\",\n \"month\",\n \"hour_of_day\",\n \"hour_of_year\",\n *ACC_COMPONENTS_ELECTRICITY,\n \"marginal_ghg\",\n ]\n columns_str = \", \".join(columns)\n climate_zone = (\n climate_zone if climate_zone.startswith(\"CZ\") else f\"CZ{climate_zone}\"\n )\n sql_str = f\"\"\" \n SELECT * \n FROM acc_electricity\n WHERE utility = '{utility}'\n AND climate_zone = '{climate_zone}'\n AND year >= {start_year}\n AND year <= {end_year}\n \"\"\"\n con = get_db_connection(database_year=database_year)\n df = pd.read_sql(sql_str, con=con)\n if df.empty:\n raise ValueError(\n \"Can not find avoided costs for\\n:\"\n f\"utility:{utility}\\nclimate_zone:{climate_zone}\\nstart_year:{start_year}\\n\"\n f\"end_year:{end_year}\"\n )\n return df\n\n\ndef get_filtered_acc_gas(database_year, start_year, end_year):\n \"\"\"Returns gas avoided costs data\n\n Parameters\n ----------\n database_year: str\n The year corresponding to the database that contains the avoided costs data.\n Requires that year's database to have already been downloaded\n using the `flexvalue downloaded-avoided-costs-data-db --year 20XX` command.\n start_year: int\n Which year to start the filter of avoided costs data\n end_year: int\n Which year to end the filter of avoided costs data\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n columns = [\n \"year\",\n \"month\",\n *ACC_COMPONENTS_GAS,\n ]\n columns_str = \", \".join(columns)\n sql_str = f\"\"\" \n SELECT * \n FROM acc_gas\n WHERE year >= {start_year}\n AND year <= {end_year}\n \"\"\"\n con = get_db_connection(database_year=database_year)\n return pd.read_sql(sql_str, con=con)\n\n\ndef get_all_valid_utility_climate_zone_combinations(database_year, utility=None):\n \"\"\"Returns all utility-climate zone combinations\n\n Parameters\n ----------\n database_year: str\n The year corresponding to the database that contains the avoided costs data.\n Requires that year's database to have already been downloaded\n using the `flexvalue downloaded-avoided-costs-data-db --year 20XX` command.\n utility: str\n (optional) Which uility to filter by when loading avoided costs data\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n where_str = f\"WHERE utility = '{utility}'\" if utility else \"\"\n query = f\"\"\"\n SELECT * \n FROM acc_electricity_utilities_climate_zones\n {where_str}\n \"\"\"\n return execute_query(database_year, query)\n\n\ndef get_all_valid_deer_load_shapes(database_year):\n \"\"\"Returns all valid DEER load shapes\n\n Parameters\n ----------\n database_year: str\n The year corresponding to the database that contains the avoided costs data.\n Requires that year's database to have already been downloaded\n using the `flexvalue downloaded-avoided-costs-data-db --year 20XX` command.\n\n Returns\n -------\n list\n \"\"\"\n query = \"\"\"\n SELECT *\n FROM deer_load_shapes\n limit 1\n \"\"\"\n valid_deer_load_shapes = execute_query(database_year, query)\n all_columns_w_utilities = list(\n valid_deer_load_shapes.drop(\"hour_of_year\", axis=1).columns\n )\n # TODO (ssuffian): Reshape db so it is a query by utility column\n return list(\n set(\n [\n c.replace(\"PGE_\", \"\")\n .replace(\"SDGE_\", \"\")\n .replace(\"SCG_\", \"\")\n .replace(\"SCE_\", \"\")\n for c in all_columns_w_utilities\n ]\n )\n )\n","sub_path":"flexvalue/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":7278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"84632913","text":"import copy\r\n\r\n\r\ndef load_data():\r\n with open('data.txt', 'r') as f:\r\n temp = f.read().splitlines()\r\n data = []\r\n for row in temp:\r\n data.append([x for x in row])\r\n return data\r\n\r\n\r\ndef get_state(seat):\r\n if seat == 'L':\r\n return 'empty'\r\n if seat == '#':\r\n return 'occupied'\r\n if seat == '.':\r\n return 'floor'\r\n\r\n\r\ndef get_neighbours(x, y, data):\r\n height = len(data)\r\n width = len(data[0])\r\n neighbours = []\r\n for a in (-1,0,1):\r\n for b in (-1,0,1):\r\n if a == 0 and a == b:\r\n continue\r\n neighbour = None\r\n for i in range(1, height):\r\n if y + (a * i) < 0:\r\n continue \r\n if x + (b * i) < 0:\r\n continue\r\n if y + (a * i) >= height:\r\n continue\r\n if x + (b * i) >= width:\r\n continue\r\n\r\n neighbour = get_cell(y+(a*i), x+(b*i), data)\r\n if neighbour != '.':\r\n break\r\n neighbours.append(neighbour)\r\n return neighbours\r\n\r\n\r\ndef get_cell(y, x, data):\r\n return data[y][x]\r\n\r\n\r\ndef get_next_state(current_state, neighbours):\r\n if current_state == 'empty':\r\n if neighbours.count('#') == 0:\r\n return '#'\r\n if current_state == 'occupied':\r\n if neighbours.count('#') >= 5:\r\n return 'L'\r\n return current_state\r\n\r\n\r\ndef main():\r\n data = load_data()\r\n i = 0\r\n count_changes = -1\r\n while count_changes != 0:\r\n count_changes = 0\r\n next_stage = copy.deepcopy(data)\r\n for y, row in enumerate(data):\r\n for x, seat in enumerate(row):\r\n current_state = get_state(seat)\r\n neighbours = get_neighbours(x, y, data)\r\n next_state = get_next_state(current_state, neighbours)\r\n if next_state != current_state:\r\n count_changes += 1\r\n next_stage[y][x] = next_state\r\n\r\n print(count_changes)\r\n data = copy.deepcopy(next_stage)\r\n i += 1\r\n \r\n count_occupied = 0\r\n for row in data:\r\n for cell in row:\r\n if cell == '#':\r\n count_occupied += 1\r\n print(count_occupied)\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"11/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"586212432","text":"# Advent of code\n# Author = Rob Lorimer\nYEAR=2020; DAY=3; PART=2\n\nfrom typing import NamedTuple\n# add project directory to path\nimport sys, os\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom utils import aoc\n\nclass Point(NamedTuple):\n x: int\n y: int\n \n# global vars for input size\nwidth = 0; height = 0\n\n# recursively count trees encountered for a given slope\ndef tree_counter(tree_locations, location=Point(0,0), tree_count=0, slope=None):\n global width; global height\n tree_count = tree_count + 1 if location in tree_locations else tree_count # count a tree if location is found in tree_locations\n new_location = Point((location.x + slope.x) % width, location.y + slope.y) # use modulus to account for repeating horizontal pattern\n return tree_counter(tree_locations, new_location, tree_count, slope) if new_location.y < height else tree_count # continue recursion until the bottom is reached\n\n@aoc.puzzle_timer()\ndef solve(input): \n global width; global height\n lines = list(input.splitlines())\n width = len(lines[0]); height = len(lines)\n tree_locations = [Point(x, y) for y in range(height) for x in range(width) if lines[y][x] == '#'] # co-ordinates of all trees\n return tree_counter(tree_locations, slope=Point(1,1)) \\\n * tree_counter(tree_locations, slope=Point(3,1)) \\\n * tree_counter(tree_locations, slope=Point(5,1)) \\\n * tree_counter(tree_locations, slope=Point(7,1)) \\\n * tree_counter(tree_locations, slope=Point(1,2))\n\nif __name__ == '__main__':\n puzzle_input = aoc.puzzle_input(YEAR, DAY)\n print(f'Part {PART}: ', solve(puzzle_input))","sub_path":"original_solutions/day03_part2.py","file_name":"day03_part2.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"440343191","text":"import os\n\n# set working directory to TRS directory\nwd = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # gets the path of *this file* and then gets the dir it's in and then gets the parent dir\n\n# file paths\ncounties = os.path.join(wd, r\"data\\geodata.gdb\\CA_Counties\")\nPLSS_sections = os.path.join(wd, r\"data\\geodata.gdb\\PLSSFirstDivision\")\nPLSS_township = os.path.join(wd, r\"data\\geodata.gdb\\PLLSTownship\")\n\n# Field Name for unique ID\nTRS_ID_fieldname = 'FRSTDIVID'\n\n# temp_output\ntemp = os.path.join(wd, r\"data\\temp.gdb\")","sub_path":"scripts/local_vars.py","file_name":"local_vars.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"204583714","text":"import sqlite3 \r\nimport datetime\r\nfrom datetime import datetime,timedelta\r\n#\r\ndatabase = \"017_jan08_2020_db_no_sort\"\r\nreceiving_database = \"summary_database\"\r\n#\r\nconn = sqlite3.connect(database) # one table only 'first_table'\r\nc = conn.cursor()\r\n#\r\nconn_2 = sqlite3.connect(receiving_database)\r\nc_2 = conn_2.cursor()\r\n#\r\nc_2.execute(\"CREATE TABLE IF NOT EXISTS summary_for_real (column_one TEXT,column_two REAL,column_three REAL, column_four REAL, column_five text,column_six REAL )\") \r\nprint (\"++++++++++++++++++\")\r\nprint (database)\r\n#\r\ndef dynamic_data_entry_averages():\r\n conn_2.execute(\"INSERT INTO summary(column_one,column_two,column_three,column_four, column_five,column_six) VALUES (?,?,?,?,?,?)\",( date_var, col_two_avg , col_thr_avg , col_for_avg ,database,number_for_db ) )\r\n conn_2.commit()\r\n#\r\ndate_var = '2020-01-08'# \r\nsearch_number = 150\r\n\r\nprint(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%% tell them apart %%%%%%%%%%%%%%%%\")\r\nvar_one = 34 # need something here\r\nprint()\r\nnumber = 0\r\ntotal_count = 0\r\nnum_added_db = 0\r\nprint (\"++++++++++++++++++++++++++\")\r\nwhile (var_one ):\r\n \r\n var_one = None #this needs to null .. will it be reset?\r\n c.execute (\"SELECT column_one,column_five FROM first_table WHERE column_one > '%s'\" %date_var)\r\n \r\n number =0 \r\n \r\n col_two_sum = 0 \r\n col_two_count = 0\r\n \r\n col_thr_sum = 0\r\n col_thr_count = 0\r\n col_for_sum = 0\r\n col_for_count = 0\r\n \r\n for row in c.fetchmany(search_number):\r\n## print (row[0])\r\n## print (row[1])\r\n five_var = (row[1])\r\n # print (type(row[1]))\r\n #five_var = int(five_var)\r\n # print (type(five_var))\r\n var_one = row[0] \r\n if five_var!= None:\r\n if ((five_var)>1800000 and (five_var)< 1818500):\r\n col_two_sum = col_two_sum + five_var\r\n col_two_count= col_two_count +1\r\n if ((five_var)>1818500 and (five_var)< 1824000):\r\n col_thr_sum = col_thr_sum + five_var\r\n col_thr_count= col_thr_count +1\r\n if ((five_var)>1824000 and (five_var)< 1895000):\r\n col_for_sum = col_for_sum + five_var\r\n col_for_count= col_for_count +1\r\n \r\n number = number +1\r\n \r\n num_added_db = num_added_db + 1\r\n total_count = total_count + number\r\n number_for_db = number\r\n number = 0 # this needs to be reset\r\n \r\n date = datetime.strptime(date_var, \"%Y-%m-%d\") # this sets up the variable for altering\r\n new_date = date + timedelta(days = 1) # new_date is created here to accept the next day?\r\n #datetime.strftime(new_date, \"%Y-%m-%d\") # the object is turned back into a string no??\r\n date_var = str(new_date)\r\n date_var = date_var[0:10]\r\n \r\n if (col_two_sum > 1800000 and col_two_sum != None):\r\n \r\n col_two_avg = int(col_two_sum/(col_two_count ))\r\n else:\r\n col_two_avg = None\r\n \r\n if (col_thr_sum > 1800000 and col_thr_sum != None):\r\n ###### this is not calculating correctly\r\n col_thr_avg = int(col_thr_sum/(col_thr_count ))\r\n else:\r\n col_thr_avg = None\r\n # print (\" the average for col_thr for this day is \", col_thr_avg)\r\n if (col_for_sum > 1800000 and col_for_sum !=None): # is null less than 1500000?\r\n # this is not causing problems, but not helping\r\n \r\n col_for_avg = int(col_for_sum/(col_for_count ))\r\n else:\r\n col_for_avg = None\r\n # print(\"date \",date_var,\" \",col_two_avg,\" \", col_thr_avg,\" \", col_for_avg)\r\n # print (\"type date_var \",(type(date_var)))\r\n dynamic_data_entry_averages() \r\n \r\n \r\nprint (\"total rows pulled from db is \", total_count)\r\nprint (\"total rows added to database is \", num_added_db)\r\nprint (\" end end end end end \")\r\n\r\n\r\n \r\n","sub_path":"python_sqlite3_collect_data.py","file_name":"python_sqlite3_collect_data.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"582797927","text":"\"\"\"This file implements Deep CFR, as described in Brown et al. - Deep Counterfactual Regret Minimization\n(2019).\n\"\"\"\n\nimport collections\nimport os\nimport time\nimport typing\n\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nimport rlpoker.cfr_util\nfrom rlpoker import cfr\nfrom rlpoker.cfr_game import (\n get_available_actions, sample_chance_action, is_terminal, payoffs, which_player)\nfrom rlpoker.util import sample_action\nfrom rlpoker import buffer\nfrom rlpoker import extensive_game\nfrom rlpoker import best_response\nfrom rlpoker import neural_game\nfrom rlpoker import util\n\n\nStrategyMemoryElement = collections.namedtuple('StrategyMemoryElement', [\n 'info_set_id', 't', 'info_set_strategy'\n])\n\nAdvantageMemoryElement = collections.namedtuple('AdvantageMemoryElement', [\n 'info_set_id', 't', 'info_set_advantages'\n])\n\n\nclass RegretPredictor:\n\n \"\"\"\n A RegretPredictor can be used inside cfr_traverse.\n \"\"\"\n\n def predict_advantages(self, info_set_vector, action_indexer: neural_game.ActionIndexer) -> \\\n extensive_game.ActionFloat:\n \"\"\"\n Predicts advantages for each action available in the information set.\n\n Args:\n info_set_vector: ndarray.\n action_indexer: ActionIndexer. The mapping from actions to indices.\n\n Returns:\n ActionFloat. The predicted regret for each action in the information set.\n \"\"\"\n raise NotImplementedError(\"Not implemented in the base class.\")\n\n def compute_action_probs(self, info_set_vector, action_indexer: neural_game.ActionIndexer):\n \"\"\"\n Compute action probabilities in this information set.\n\n Args:\n info_set_vector: ndarray.\n action_indexer: ActionIndexer.\n\n Returns:\n ActionFloat. The action probabilities in this information set.\n \"\"\"\n action_advantages = self.predict_advantages(info_set_vector, action_indexer)\n return rlpoker.cfr_util.compute_regret_matching(action_advantages, highest_regret=True)\n\n def train(self, batch: typing.List[AdvantageMemoryElement],\n action_indexer: neural_game.ActionIndexer,\n info_set_vectoriser: neural_game.InfoSetVectoriser,\n current_time: int):\n \"\"\"Train on one batch of AdvantageMemoryElements.\n\n Args:\n batch: list of AdvantageMemoryElement objects.\n action_indexer: ActionIndexer. Turns actions into indices.\n info_set_vectoriser: InfoSetVectoriser. Turns info set ids into vectors.\n current_time: int. The current iteration we are training on.\n\n Returns:\n loss: float.\n \"\"\"\n pass\n\n\nclass DeepRegretNetwork(RegretPredictor):\n \n def __init__(self, state_shape: typing.Tuple[int], action_indexer: neural_game.ActionIndexer, player: int):\n \"\"\"\n A DeepRegretNetwork uses a neural network to predict advantages for actions in information sets.\n\n Args:\n state_dim: int. The dimension of the state vector.\n action_indexer: ActionIndexer.\n player: int. The player number it represents. Used for scoping.\n \"\"\"\n self.state_shape = state_shape\n self.action_indexer = action_indexer\n self.player = player\n\n self.sess = None\n\n self.scope = 'regret_network_{}'.format(self.player)\n self.tensors, self.init_op = self.build(self.state_shape, self.action_indexer.action_dim, self.scope)\n\n self.global_step = 0\n\n def initialise(self):\n \"\"\"\n Initialise the weights of the network, using the tensorflow session self.sess.\n \"\"\"\n self.sess.run(self.init_op)\n\n @staticmethod\n def build(state_shape, action_dim, scope, hidden_dim=64):\n with tf.variable_scope(scope):\n input_layer = tf.placeholder(tf.float32, shape=(None,) + state_shape, name='state')\n\n # For now, we flatten so that we can accept any state shape.\n hidden = tf.layers.flatten(input_layer, name='flatten')\n hidden = tf.layers.dense(hidden, hidden_dim, activation=tf.nn.relu)\n hidden = tf.layers.dense(hidden, hidden_dim, activation=tf.nn.relu)\n\n advantages = tf.layers.dense(hidden, action_dim)\n\n info_set_advantages = tf.placeholder(tf.float32, shape=(None, action_dim), name='info_set_advantages')\n times = tf.placeholder(tf.float32, shape=(None, 1), name='times')\n current_time = tf.placeholder(tf.float32, shape=(), name='current_time')\n\n regrets = tf.reduce_sum((info_set_advantages - advantages)**2, axis=1, name='regrets')\n\n loss = tf.reduce_mean(times * regrets) / current_time\n\n summary = tf.summary.scalar('loss', loss)\n\n train_op = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(loss)\n\n tensors = {\n 'input_layer': input_layer,\n 'advantages': advantages,\n 'train_op': train_op,\n 'loss': loss,\n 'times': times,\n 'current_time': current_time,\n 'info_set_advantages': info_set_advantages,\n 'summary': summary\n }\n\n init_op = tf.variables_initializer(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope))\n\n return tensors, init_op\n\n def set_sess(self, sess: tf.Session):\n self.sess = sess\n\n def predict_advantages(self, info_set_vector, action_indexer: neural_game.ActionIndexer) -> \\\n extensive_game.ActionFloat:\n advantages = self.sess.run(self.tensors['advantages'], feed_dict={\n self.tensors['input_layer']: [info_set_vector]\n })\n\n return extensive_game.ActionFloat({\n action: advantages[0, self.action_indexer.get_index(action)] for action in self.action_indexer.actions\n })\n\n def train(self, batch: typing.List[AdvantageMemoryElement],\n action_indexer: neural_game.ActionIndexer,\n info_set_vectoriser: neural_game.InfoSetVectoriser,\n current_time: int):\n \"\"\"Train on one batch of AdvantageMemoryElements.\n\n Args:\n batch: list of AdvantageMemoryElement objects.\n action_indexer: ActionIndexer. Turns actions into indices.\n info_set_vectoriser: InfoSetVectoriser. Turns info set ids into vectors.\n current_time: int. The current iteration we are training on.\n\n Returns:\n loss: float.\n summary: tensorflow summary.\n \"\"\"\n # Each batch is an AdvantageMemoryElement.\n info_set_vectors = [info_set_vectoriser.get_vector(element.info_set_id) for element in batch]\n times = [element.t for element in batch]\n info_set_advantages = [info_set_advantages_to_vector(action_indexer, element.info_set_advantages)\n for element in batch]\n\n _, computed_loss, summary = self.sess.run(\n [self.tensors['train_op'], self.tensors['loss'], self.tensors['summary']],\n feed_dict={\n self.tensors['input_layer']: np.array(info_set_vectors),\n self.tensors['times']: np.array(times).reshape(-1, 1),\n self.tensors['current_time']: current_time,\n self.tensors['info_set_advantages']: np.array(info_set_advantages).reshape(-1, action_indexer.action_dim)\n })\n\n self.global_step += 1\n\n return computed_loss, summary\n\n\ndef info_set_advantages_to_vector(action_indexer: neural_game.ActionIndexer,\n info_set_advantages: typing.Dict[typing.Any, float]):\n \"\"\"\n\n Args:\n action_indexer:\n info_set_advantages: dict mapping actions to advantages.\n\n Returns:\n vector with the advantage for each action in the correct index.\n \"\"\"\n advantages = np.zeros(action_indexer.action_dim)\n for action, advantage in info_set_advantages.items():\n advantages[action_indexer.get_index(action)] = advantage\n\n return advantages\n\n\ndef early_stopping(losses: typing.List[float], consecutive_increases: int=2):\n \"\"\"Returns True if and only if losses[-consecutive_increases-1:] is monotonically increasing.\n\n Args:\n losses: list of floats. The losses.\n consecutive_increases: int. The number of consecutive increases to see before early stopping.\n\n Returns:\n early_stop: bool. True if and only if we should early stop.\n \"\"\"\n # Can't early stop before we see enough losses.\n if len(losses) <= consecutive_increases:\n return False\n\n relevant_losses = losses[-consecutive_increases-1:]\n return sorted(relevant_losses) == relevant_losses\n\n\ndef early_stopping_water_mark(losses: typing.List[float], num_attempts: int=5):\n \"\"\"Returns True if and only if the loss has failed to beat the low water mark in num_attempts \n attempts.\n\n Args:\n losses: list of floats. The losses.\n num_attempts: int. The number of attempts to beat the low water mark.\n\n Returns:\n early_stop: bool. True if and only if we should early stop.\n \"\"\"\n # Can't early stop before we see enough losses.\n if len(losses) <= num_attempts:\n return False\n\n return min(losses[-num_attempts:]) > min(losses)\n\n\ndef train_network(network: DeepRegretNetwork, advantage_memory: buffer.Reservoir,\n action_indexer: neural_game.ActionIndexer,\n info_set_vectoriser: neural_game.InfoSetVectoriser,\n current_time: int,\n writer: tf.summary.FileWriter,\n batch_size=1024, num_sgd_updates=4000):\n \"\"\"Trains the given network from scratch\n\n Args:\n network: DeepRegretNetwork. The network to train.\n advantage_memory: Reservoir. Each entry should be an AdvantageMemoryElement.\n action_indexer: ActionIndexer. Turns actions into indices.\n info_set_vectoriser: InfoSetVectoriser. Turns information set ids into vectors.\n current_time: int. The current time.\n writer: tf.summary.FileWriter.\n batch_size: int. The size to use for each batch.\n num_sgd_updates: int. The number of sgd updates to do.\n\n Returns:\n mean_loss: float. The mean loss over the period.\n \"\"\"\n # First reset the network.\n network.initialise()\n\n losses = []\n\n print(\"Training.\")\n indices = list(range(len(advantage_memory)))\n for i in tqdm(range(num_sgd_updates)):\n # Shuffle the advantage memory.\n batch_indices = np.random.choice(indices, batch_size, replace=True)\n\n batch = advantage_memory.get_elements(batch_indices)\n\n loss, summary = network.train(batch, action_indexer, info_set_vectoriser, current_time=current_time)\n writer.add_summary(summary, network.global_step)\n losses.append(loss)\n\n # Early stopping.\n if early_stopping_water_mark(losses, num_attempts=20):\n print(\"Losses: {}\".format(losses))\n print(\"Early stopping.\")\n break\n\n print(\"Losses % through the data: {}\".format(\n [losses[int(frac / 100 * len(losses))] for frac in [0.0, 25.0, 50.0, 75.0, 99.99]]\n ))\n\n return np.min(losses)\n\n\ndef compute_mean_strategy(strategy_memory: buffer.Reservoir):\n \"\"\"Returns the mean strategy for each information set, weighted by time.\n\n Args:\n strategy_memory: Reservoir consisting of StrategyMemoryElement objects.\n\n Returns:\n Strategy.\n \"\"\"\n strategies = collections.defaultdict(list)\n for info_set_id, t, info_set_strategy in strategy_memory.buffer:\n strategies[info_set_id].append((t, info_set_strategy))\n\n return extensive_game.compute_weighted_strategy(strategies)\n\n\ndef deep_cfr(n_game: neural_game.NeuralGame,\n num_iters: int=100, num_traversals: int=10000,\n advantage_maxlen: int=1000000, strategy_maxlen: int=1000000,\n batch_size: int=1024, num_sgd_updates: int=100):\n \"\"\"\n Args:\n n_game: NeuralGame.\n num_iters: int. The number of iterations to run deep CFR for.\n num_traversals: int. The number of traversals per CFR iteration.\n advantage_maxlen: int. The maximum length of the advantage memories.\n strategy_maxlen: int. The maximum length of the strategy memory.\n batch_size: int. The batch size to use in training.\n num_sgd_updates: int. The number of sgd updates per training.\n\n Returns:\n strategy, exploitability.\n \"\"\"\n game, action_indexer, info_set_vectoriser = n_game\n\n advantage_memory1 = buffer.Reservoir(maxlen=advantage_maxlen)\n advantage_memory2 = buffer.Reservoir(maxlen=advantage_maxlen)\n strategy_memory = buffer.Reservoir(maxlen=strategy_maxlen)\n\n # Create summary tensors\n valid_summariser = util.TBSummariser(['exploitability'])\n\n time_str = time.strftime(\"%Y-%m-%d-%H:%M:%S\", time.gmtime())\n save_path = os.path.join('experiments', time_str)\n\n if not os.path.exists(save_path):\n print(\"Path doesn't exist, so creating: {}\".format(save_path))\n os.makedirs(save_path)\n\n log_file = os.path.join(save_path, 'nfsp.log')\n print(\"Log file {}\".format(log_file))\n\n print(\"To run tensorboard: tensorboard --logdir {}\".format(os.path.join(os.getcwd(), save_path)))\n\n with tf.Session() as sess:\n network1 = DeepRegretNetwork(info_set_vectoriser.state_shape, action_indexer, 1)\n network1.set_sess(sess)\n network2 = DeepRegretNetwork(info_set_vectoriser.state_shape, action_indexer, 2)\n network2.set_sess(sess)\n\n network1.initialise()\n network2.initialise()\n\n tf_train_writer = tf.summary.FileWriter(os.path.join(save_path, 'train'), graph=sess.graph)\n\n # Iterate over players and do cfr traversals.\n for t in range(1, num_iters + 1):\n print(\"Iteration t = {}\".format(t))\n for player in [1, 2]:\n print(\"Player: {}\".format(player))\n print(\"Traversing\")\n for i in tqdm(range(num_traversals)):\n cfr_traverse(game, action_indexer, info_set_vectoriser,\n game.root, player, network1, network2,\n advantage_memory1, advantage_memory2,\n strategy_memory, t)\n\n # Train the traversing player's network on the cfr traversals.\n network = network1 if player == 1 else network2\n network.initialise()\n advantage_memory = advantage_memory1 if player == 1 else advantage_memory2\n mean_loss = train_network(\n network, advantage_memory, action_indexer, info_set_vectoriser, t,\n tf_train_writer, batch_size, num_sgd_updates)\n\n print(\"Mean loss: {}\".format(mean_loss))\n tf_train_writer.flush()\n\n # print(\"################\")\n #\n # print(\"----------------\")\n # print(\"Advantage memory 1:\")\n # print(advantage_memory1.buffer)\n # print(\"----------------\")\n # print(\"Advantage memory 2:\")\n # print(advantage_memory2.buffer)\n # print(\"----------------\")\n #\n # print(\"################\")\n #\n\n # print(\"----------------\")\n # print(\"Predicted advantages:\")\n # for info_set_id in set(game.info_set_ids.values()):\n # print(\"{}: {}\".format(\n # info_set_id,\n # network.predict_advantages(info_set_vectoriser.get_vector(info_set_id), action_indexer))\n # )\n # print(\"----------------\")\n #\n\n print(\"Advantage memory 1 length: {}\".format(len(advantage_memory1)))\n print(\"Advantage memory 2 length: {}\".format(len(advantage_memory2)))\n print(\"Strategy memory length: {}\".format(len(strategy_memory)))\n\n mean_strategy = compute_mean_strategy(strategy_memory)\n # print(\"Strategy summary\")\n # print(mean_strategy)\n if game.is_strategy_complete(mean_strategy):\n exploitability = best_response.compute_exploitability(game, mean_strategy)\n else:\n print(\"Strategy not complete, filling uniformly.\")\n exploitability = best_response.compute_exploitability(\n game,\n mean_strategy,\n )\n print(\"Exploitability: {} mbb/h\".format(exploitability * 1000))\n\n valid_summary = valid_summariser.summarise(sess, {'exploitability': exploitability})\n tf_train_writer.add_summary(valid_summary, global_step=t)\n\n # TODO(chrisn). Train the network on the strategy memory.\n return mean_strategy, exploitability\n\n\ndef cfr_traverse(game: extensive_game.ExtensiveGame, action_indexer: neural_game.ActionIndexer,\n info_set_vectoriser: neural_game.InfoSetVectoriser,\n node: extensive_game.ExtensiveGameNode, player: int,\n network1: RegretPredictor, network2: RegretPredictor,\n advantage_memory1: buffer.Reservoir, advantage_memory2: buffer.Reservoir,\n strategy_memory: buffer.Reservoir, t: int):\n \"\"\"\n\n Args:\n game: ExtensiveGame.\n action_indexer: ActionIndexer. This maps actions to indices, so that we can use neural networks.\n info_set_vectoriser: InfoSetVectoriser. This maps information sets to vectors, so we can use neural networks.\n node: ExtensiveGameNode. The current node.\n player: int. The traversing player. Either 1 or 2.\n network1: RegretPredictor. The network for player 1.\n network2: RegretPredictor. The network for player 2.\n advantage_memory1: Reservoir. The advantage memory for player 1.\n advantage_memory2: Reservoir. The advantage memory for player 2.\n strategy_memory: Reservoir. The strategy memory (for both players).\n t: int. The current iteration of deep cfr.\n\n Returns:\n\n \"\"\"\n if is_terminal(node):\n return payoffs(node)[player]\n elif which_player(node) == 0:\n # Chance player\n a = sample_chance_action(node)\n return cfr_traverse(game, action_indexer, info_set_vectoriser, node.children[a], player,\n network1, network2,\n advantage_memory1, advantage_memory2, strategy_memory, t)\n elif which_player(node) == player:\n # It's the traversing player's turn.\n state_vector = info_set_vectoriser.get_vector(game.get_info_set_id(node))\n values = dict()\n for action in get_available_actions(node):\n child = node.children[action]\n values[action] = cfr_traverse(game, action_indexer, info_set_vectoriser, child, player,\n network1, network2,\n advantage_memory1, advantage_memory2, strategy_memory, t)\n assert values[action] is not None, print(\"Shouldn't be None! node was: {}\".format(node))\n info_set_regrets = dict()\n\n # Compute the player's strategy\n network = network1 if player == 1 else network2\n if t == 1:\n # This is the equivalent of initialising the network so it starts with all zeroes.\n info_set_strategy = extensive_game.ActionFloat.initialise_uniform(action_indexer.actions)\n else:\n info_set_strategy = network.compute_action_probs(state_vector, action_indexer)\n\n sampled_counterfactual_value = sum([info_set_strategy[action] * values[action] for action in\n get_available_actions(\n node)])\n for action in get_available_actions(node):\n info_set_regrets[action] = values[action] - sampled_counterfactual_value\n\n info_set_id = game.info_set_ids[node]\n advantage_memory = advantage_memory1 if player == 1 else advantage_memory2\n advantage_memory.append(AdvantageMemoryElement(info_set_id, t, info_set_regrets))\n\n # In traverser infosets, the value passed back up is the weighted average of all action values,\n # where action a’s weight is info_set_strategy[a]\n return sampled_counterfactual_value\n else:\n # It's the other player's turn.\n state_vector = info_set_vectoriser.get_vector(game.get_info_set_id(node))\n\n # Compute the other player's strategy\n other_player = 1 if player == 2 else 2\n network = network1 if other_player == 1 else network2\n if t == 1:\n # This is the equivalent of initialising the network so it starts with all zeroes.\n info_set_strategy = extensive_game.ActionFloat.initialise_uniform(action_indexer.actions)\n else:\n info_set_strategy = network.compute_action_probs(state_vector, action_indexer)\n\n info_set_id = game.info_set_ids[node]\n strategy_memory.append(StrategyMemoryElement(info_set_id, t, info_set_strategy))\n\n action = sample_action(info_set_strategy, available_actions=get_available_actions(node))\n return cfr_traverse(game, action_indexer, info_set_vectoriser, node.children[action], player,\n network1, network2, advantage_memory1, advantage_memory2, strategy_memory, t)\n","sub_path":"rlpoker/deep_cfr.py","file_name":"deep_cfr.py","file_ext":"py","file_size_in_byte":21370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"507286415","text":"from faker import Faker\nfrom faker.providers import BaseProvider\n\nfake = Faker('pt_BR')\n\n\nclass ConsolidadoProvider(BaseProvider):\n def consolidado(self):\n return {\n \"data\": \"2020-08-06 00:00\", \"dir\": \"C\", \"evap_piche\": 4.45,\n \"evap_piche_ar\": 13.8, \"piche\": 4.45, \"piche_ar\": 13.8,\n \"pressao\": 704.2, \"pressao_hpa\": 802.7, \"qtda\": 0, \"qtdb\": 0,\n \"qtdm\": 0, \"t10cm\": 18.6, \"t20cm\": 20.7, \"t30cm\": 20.7,\n \"t40cm\": 20.5, \"t5cm\": 16.7, \"temp_bar\": 17.0, \"tipoa\": \"\",\n \"tipob\": \"\", \"tipom\": \"\", \"tmax\": 14.0, \"tmin\": 11.5,\n \"tseco\": 12.0, \"tsfc\": 11.9, \"tumido\": 11.4, \"vento\": 0.0, \"vis\": 6\n }\n\n\ndef consolidado_factory(lenght=1):\n data = list()\n fake.add_provider(ConsolidadoProvider)\n for _ in range(lenght):\n data.append(fake.consolidado())\n return data\n","sub_path":"estacao/core/tests/factories/consolidado.py","file_name":"consolidado.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"61184485","text":"import datetime\nimport calendar\nimport pytz\nfrom tzlocal import get_localzone\n\nUTC_TZ = pytz.timezone('utc')\nLOCAL_TZ = get_localzone()\n\n\ndef utc_to_china_time(utc_dt):\n return convert_timezone(utc_dt, 'Asia/Shanghai')\n\n\ndef datetime_to_timestamp(dt):\n tz = dt.tzinfo\n if not tz:\n # dt MUST have timezone info\n raise ValueError\n return calendar.timegm(dt.utctimetuple())\n\n\ndef timestamp_to_datetime(ts, tz='utc'):\n dt = datetime.datetime.utcfromtimestamp(ts)\n utc_dt = UTC_TZ.localize(dt)\n target_tz = pytz.timezone(tz)\n return target_tz.normalize(utc_dt.astimezone(target_tz))\n\n\ndef localized_datetime(dt):\n \"\"\"Convert Local Datetime to UCT Datetime.\"\"\"\n _dt = dt\n if not dt.tzinfo:\n _dt = pytz.timezone(LOCAL_TZ.zone).localize(_dt)\n return convert_timezone(_dt, 'utc')\n\n\ndef convert_timezone(dt, target_tz):\n tz = dt.tzinfo\n if not tz:\n # dt MUST have timezone info\n raise ValueError\n tz = pytz.timezone(target_tz)\n return tz.normalize(dt.astimezone(tz))\n","sub_path":"jedi/jedi/libs/utils/dt.py","file_name":"dt.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"407923100","text":"# /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Vojtech Burian\n@summary: Common library for functions executing actions over Ripple Emulator UI (Mobile OS web emulator)\n\"\"\"\nimport time\n\nfrom selenium.webdriver.common.by import By\n\n\nclass TestRipple():\n\n def __init__(self, driver):\n self.driver = driver\n self.left_section_locator = (By.CSS_SELECTOR, '.left.sortable.main.ui-sortable')\n self.right_section_locator = (By.CSS_SELECTOR, '.right.sortable.main.ui-sortable')\n self.left_arrow_locator = (By.CSS_SELECTOR, '#ui > section.left-panel-collapse.ui-state-default.'\n 'ui-corner-all.ui-state-hover > span')\n self.right_arrow_locator = (By.CSS_SELECTOR, '#ui > section.right-panel-collapse.ui-state-default.'\n 'ui-corner-all.ui-state-hover > span')\n self.gps_header_locator = (By.CSS_SELECTOR, '#gps-container > section.h2.info-header > section.collapse-handle')\n self.gps_container_locator = (By.CSS_SELECTOR, '#gps-container > section.info.ui-widget-content.ui-corner-all')\n self.geo_latitude_locator = (By.ID, 'geo-latitude')\n self.geo_longitude_locator = (By.ID, 'geo-longitude')\n\n def prepare_for_testing(self):\n \"\"\" prepares Ripple Simulator for App testing \"\"\"\n self.expand_left_section(False)\n self.expand_right_section(False)\n self.switch_to_ripple_app()\n\n def expand_left_section(self, expand=True):\n \"\"\" expands or hides left section of Ripple UI controls \"\"\"\n left_section = self.driver.find_element(*self.left_section_locator)\n left_arrow = self.driver.find_element(*self.left_arrow_locator)\n if expand:\n if left_section.get_attribute('style') not in ('left: 0px; opacity: 1;', ''):\n left_arrow.click()\n time.sleep(2)\n else:\n if left_section.get_attribute('style') in ('left: 0px; opacity: 1;', ''):\n left_arrow.click()\n time.sleep(2)\n\n def expand_right_section(self, expand=True):\n \"\"\" expands or hides right section of Ripple UI controls \"\"\"\n right_section = self.driver.find_element(*self.right_section_locator)\n right_arrow = self.driver.find_element(*self.right_arrow_locator)\n if expand:\n if right_section.get_attribute('style') not in ('right: 0px; opacity: 1;', ''):\n right_arrow.click()\n time.sleep(2)\n else:\n if right_section.get_attribute('style') in ('right: 0px; opacity: 1;', ''):\n right_arrow.click()\n time.sleep(2)\n\n def switch_to_ripple_app(self):\n \"\"\" switches to PhoneGap HMTL app iframe (so selenium can target elements inside it) \"\"\"\n app_frame = self.driver.find_elements_by_tag_name('iframe')[0]\n self.driver.switch_to_frame(app_frame)\n\n def switch_from_ripple_app(self):\n \"\"\" switches into the default Ripple Emulator UI DOM (so selenium can target Ripple control elements) \"\"\"\n self.driver.switch_to_default_content()\n\n def set_geo_location(self, lat, long):\n \"\"\" sets location to given coordinates \"\"\"\n self.switch_from_ripple_app()\n self.expand_right_section(True)\n if not self.driver.find_element(*self.gps_container_locator).is_displayed():\n self.driver.find_element(*self.gps_header_locator).click()\n time.sleep(1)\n latitude = self.driver.find_element(*self.geo_latitude_locator)\n longitude = self.driver.find_element(*self.geo_longitude_locator)\n latitude.clear()\n longitude.clear()\n latitude.send_keys(str(lat))\n longitude.send_keys(str(long))\n self.expand_right_section(False)\n self.driver.refresh()\n self.switch_to_ripple_app()","sub_path":"salsa_webqa/library/support/ripple.py","file_name":"ripple.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"517152830","text":"import discord\nfrom discord.ext import commands\nimport bs4 as bs\nimport urllib.request\nfrom bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\nimport json\nimport io\nimport safygiphy\nfrom ext import embedtobox\n\nclass Nsfw:\n \"\"\" Nsfw commands \"\"\"\n def __init__(self, bot):\n self.bot = bot\n\n async def __local_check(self, ctx):\n if not ctx.channel.is_nsfw():\n return False\n git = self.bot.get_cog('Git')\n if not await git.starred('kyb3r/selfbot.py'):\n return False\n return True\n\n @commands.group(invoke_without_command=True)\n async def nsfw(self, ctx):\n \"\"\" Get random lewds from the web \"\"\"\n pass\n\n @nsfw.command()\n async def xbooru(self, ctx):\n \"\"\" Random image from Xbooru \"\"\"\n try:\n try:\n await ctx.message.delete()\n except discord.Forbidden:\n pass\n await ctx.channel.trigger_typing()\n query = urllib.request.urlopen(\"http://xbooru.com/index.php?page=post&s=random\").read()\n soup = bs.BeautifulSoup(query, 'html.parser')\n image = soup.find(id=\"image\").get(\"src\")\n last = str(image.split('?')[-2]).replace('//', '/').replace(':/', '://')\n em = discord.Embed(colour=discord.Colour(0xed791d))\n em.description = f'[Full Size Link*]({last})'\n em.set_image(url=last)\n em.set_footer(text='* click link at your own risk!')\n try:\n await ctx.send(embed=em)\n except discord.HTTPException:\n await ctx.send('Unable to send embeds here!')\n try:\n async with ctx.session.get(image) as resp:\n image = await resp.read()\n with io.BytesIO(image) as file:\n await ctx.send(file=discord.File(file, 'xbooru.png'))\n except discord.HTTPException:\n await ctx.send(image)\n\n except Exception as e:\n await ctx.send(f'```{e}```')\n\n @commands.command(aliases=['gelbooru'])\n async def gel(self, ctx):\n \"\"\" Random image from Gelbooru \"\"\"\n try:\n try:\n await ctx.message.delete()\n except discord.Forbidden:\n pass\n\n await ctx.channel.trigger_typing()\n query = urllib.request.urlopen(\"http://www.gelbooru.com/index.php?page=post&s=random\").read()\n soup = bs.BeautifulSoup(query, 'html.parser')\n sans = soup.find_all('div', {'class': 'highres-show'})\n partial = soup.find(id=\"image\").get(\"src\")\n image = partial.replace('//', '/').replace(':/', '://')\n\n em = discord.Embed(colour=discord.Colour(0xed791d))\n em.description = f'[Full Size Link*]({image})'\n em.set_image(url=image)\n em.set_footer(text='* click link at your own risk!')\n try:\n await ctx.send(embed=em)\n except discord.HTTPException:\n # em_list = await embedtobox.etb(em)\n # for page in em_list:\n # await ctx.send(page)\n await ctx.send('Unable to send embeds here!')\n try:\n async with ctx.session.get(image) as resp:\n image = await resp.read()\n with io.BytesIO(image) as file:\n await ctx.send(file=discord.File(file, 'gelbooru.png'))\n except discord.HTTPException:\n await ctx.send(image)\n\n except Exception as e:\n await ctx.send(f'```{e}```')\n\n @nsfw.command()\n async def gif(self, ctx, *, tag):\n \"\"\" Get a random lewd gif\n Usage: gif \n Available tags: rule34, nsfw, hentai, tits... \"\"\"\n try:\n await ctx.message.delete()\n except discord.Forbidden:\n pass\n g = safygiphy.Giphy()\n gif = g.random(tag=tag)\n color = await ctx.get_dominant_color(ctx.author.avatar_url)\n em = discord.Embed(color=color)\n em.set_image(url=str(gif.get('data', {}).get('image_original_url')))\n try:\n await ctx.send(embed=em)\n except discord.HTTPException:\n em_list = await embedtobox.etb(em)\n for page in em_list:\n await ctx.send(page)\n\n\ndef setup(bot):\n bot.add_cog(Nsfw(bot))\n","sub_path":"dataset/py/nsfw.py","file_name":"nsfw.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"279422362","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 21 10:02:43 2016\n@author: zche\n\nGiven model_?.vector, reorder the embeddings for features by mapping_8627.npy and save the array to word2vec_?.npy\n\"\"\"\n\n\nimport numpy as np\n\nmapping = np.load('mapping_8627.npy').item()\n\nfor n_dim in [50, 200, 500, 800]:\n input_file = 'model_{}.vector'.format(n_dim)\n output_file = 'word2vec_{}.npy'.format(n_dim)\n with open(input_file, 'r') as f:\n lines = f.readlines()\n output = np.zeros([len(lines), n_dim], dtype=float)\n for line in lines[1:]:\n word_id = int(line.split(' ')[0])\n word_embedding = line.split(' ')[1:]\n line_id = mapping[word_id]\n output[line_id] = word_embedding\n np.save(output_file, output)\n ","sub_path":"CNN_EHR/data/word2vec/model_vector_to_word2vec_npy.py","file_name":"model_vector_to_word2vec_npy.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"296227573","text":"\r\n#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\nimport sys\r\nsys.path.insert(0, '../')\r\n\r\n# No GPU because working locally\r\nimport os\r\n#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"\"\r\n\r\nimport time\r\nimport tensorflow as tf\r\nfrom gantools import utils\r\nfrom gantools.gansystem import GANsystem\r\nfrom gantools import blocks\r\nfrom audioinpainting.load_generator import Dataset_maestro\r\nfrom audioinpainting.model_extend import InpaintingGAN\r\n\r\n# # Parameters\r\n\r\ndownscale = 2\r\n\r\n# # Data handling\r\n# Load the data\r\n\r\nstart = time.time()\r\ndataset = Dataset_maestro(scaling=downscale, spix=1024*52, augmentation=True, maxsize=2, type='maestro', path='../data', fs_rate=48000, files=8, preprocessing=False)\r\n\r\nprint('Number of samples: {}'.format(dataset.N))\r\n\r\n\r\n#%%\r\n# # Define parameters for the WGAN\r\ntime_str = 'extend_maestro'\r\nglobal_path = '/saved_results'\r\n\r\nname = 'WGAN' + '_' + time_str\r\n\r\n#%%\r\n# ## Parameters\r\n\r\nbn = False\r\nsignal_length = 1024*52\r\nsignal_split = [1024*18, 1024*6, 1024*4, 1024*6, 1024*18]\r\nmd = 64\r\n\r\nparams_discriminator = dict()\r\nparams_discriminator['stride'] = [4,4,4,4,4]\r\nparams_discriminator['nfilter'] = [md, 2*md, 4*md, 8*md, 16*md]\r\nparams_discriminator['shape'] = [[25], [25], [25], [25], [25]]\r\nparams_discriminator['batch_norm'] = [bn, bn, bn, bn, bn]\r\nparams_discriminator['full'] = [md*4]\r\nparams_discriminator['minibatch_reg'] = False\r\nparams_discriminator['summary'] = True\r\nparams_discriminator['data_size'] = 1\r\nparams_discriminator['apply_phaseshuffle'] = True \r\nparams_discriminator['spectral_norm'] = True\r\nparams_discriminator['activation'] = blocks.lrelu\r\n\r\n\r\nparams_generator = dict()\r\nparams_generator['stride'] = [4, 4, 4, 4, 4]\r\nparams_generator['latent_dim'] = 100\r\nparams_generator['nfilter'] = [8*md, 4*md, 2*md, md, 1]\r\nparams_generator['shape'] = [[25], [25], [25], [25], [25]]\r\nparams_generator['batch_norm'] = [bn, bn, bn, bn]\r\nparams_generator['full'] = [64*md]\r\nparams_generator['summary'] = True\r\nparams_generator['non_lin'] = tf.nn.tanh\r\nparams_generator['activation'] = tf.nn.relu\r\nparams_generator['data_size'] = 1\r\nparams_generator['spectral_norm'] = True \r\nparams_generator['in_conv_shape'] =[4]\r\n\r\nparams_generator['borders'] = dict()\r\nparams_generator['borders']['nfilter'] = [md, 2*md, 4*md, 8*md, 2*md]\r\nparams_generator['borders']['batch_norm'] = [bn, bn, bn, bn, bn]\r\nparams_generator['borders']['shape'] = [[25], [25], [25], [25], [25]]\r\nparams_generator['borders']['stride'] = [4, 4, 4, 4, 4]\r\nparams_generator['borders']['data_size'] = 1\r\nparams_generator['borders']['width_full'] = 128\r\nparams_generator['borders']['activation'] = tf.nn.relu\r\n\r\n\r\nparams_optimization = dict()\r\nparams_optimization['batch_size'] = 64\r\nparams_optimization['epoch'] = 10000\r\nparams_optimization['n_critic'] = 5\r\nparams_optimization['generator'] = dict()\r\nparams_optimization['generator']['optimizer'] = 'adam'\r\nparams_optimization['generator']['kwargs'] = {'beta1':0.5, 'beta2':0.9}\r\nparams_optimization['generator']['learning_rate'] = 1e-4\r\nparams_optimization['discriminator'] = dict()\r\nparams_optimization['discriminator']['optimizer'] = 'adam'\r\nparams_optimization['discriminator']['kwargs'] = {'beta1':0.5, 'beta2':0.9}\r\nparams_optimization['discriminator']['learning_rate'] = 1e-4\r\n\r\n\r\n# all parameters\r\nparams = dict()\r\nparams['net'] = dict() # All the parameters for the model\r\nparams['net']['generator'] = params_generator\r\nparams['net']['discriminator'] = params_discriminator\r\nparams['net']['prior_distribution'] = 'gaussian'\r\nparams['net']['shape'] = [signal_length, 1] # Shape of the image\r\nparams['net']['inpainting'] = dict()\r\nparams['net']['inpainting']['split'] = signal_split\r\nparams['net']['gamma_gp'] = 10 # Gradient penalty\r\nparams['net']['fs'] = 16000//downscale\r\nparams['net']['loss_type'] ='wasserstein'\r\n\r\nparams['optimization'] = params_optimization\r\nparams['summary_every'] = 100 # Tensorboard summaries every ** iterations\r\nparams['print_every'] = 50 # Console summaries every ** iterations\r\nparams['save_every'] = 1000 # Save the model every ** iterations\r\nparams['summary_dir'] = os.path.join(global_path, name +'_summary/')\r\nparams['save_dir'] = os.path.join(global_path, name + '_checkpoints/')\r\nparams['Nstats'] = 0\r\n\r\n\r\nresume, params = utils.test_resume(False, params)\r\n\r\n#%%\r\n# # Build the model\r\nwgan = GANsystem(InpaintingGAN, params)\r\n\r\n# # Train the model\r\nwgan.train(dataset, resume=resume)\r\n\r\nend = time.time()\r\nprint('Elapse time: {} minutes'.format((end - start)/60))\r\n","sub_path":"code/experiments/myexperiments-extend-maestro.py","file_name":"myexperiments-extend-maestro.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"360121548","text":"# coding: mbcs \r\n\r\nimport wx\r\nimport MainFrame\r\nfrom todpython import * \r\n\r\n#-------------------------------------------------------------------------------\r\ndef initialize(parent, obj):\r\n frame = RpBaseEditor(MainFrame.MainFrame.instance())\r\n frame.initialize(obj)\r\n MainFrame.MainFrame.instance().auimgr.AddPane(frame, wx.aui.AuiPaneInfo().MinSize(wx.Size(200, 100)).\r\n Caption('RenderPath - Base Editor').Dockable(True).Right().CloseButton(True).MinimizeButton(True).DestroyOnClose(True).Float())\r\n MainFrame.MainFrame.instance().auimgr.Update()\r\n\r\n\r\n#-------------------------------------------------------------------------------\r\nclass RpBaseEditor(wx.Panel):\r\n def __init__(self, parent):\r\n wx.Panel.__init__(self, parent, style=wx.WANTS_CHARS)\r\n \r\n def initialize(self, obj):\r\n self.obj = obj\r\n\r\n self.mainSizer = wx.BoxSizer(wx.VERTICAL)\r\n\r\n for i in range(0, self.obj.getShaderParamNum()):\r\n name, semantic, type, value = self.obj.getShaderParamDesc(i)\r\n\r\n bsizer = wx.BoxSizer(wx.HORIZONTAL)\r\n static = wx.StaticText(id=wx.NewId(), label=name, parent=self, style=wx.ALIGN_RIGHT)\r\n static.SetMinSize(wx.Size(150, 14))\r\n bsizer.Add(static)\r\n textctrl = wx.TextCtrl(id=wx.NewId(), name=name + \":\" + type, value=value, parent=self)\r\n self.Bind(wx.EVT_TEXT_ENTER, self.OnTextEnter, id=textctrl.GetId())\r\n bsizer.Add(textctrl, 1, wx.ALL | wx.EXPAND)\r\n\r\n self.mainSizer.Add(bsizer, 0, wx.ALL | wx.EXPAND) \r\n\r\n self.SetSizer(self.mainSizer)\r\n self.Fit()\r\n\r\n def OnTextEnter(self, event):\r\n t = event.GetEventObject()\r\n n = t.GetName().split(':')\r\n param_name = str(n[0])\r\n param_type = str(n[1])\r\n\r\n if param_type == 'TYPE_FLOAT':\r\n self.obj.setFloat(param_name, float(t.GetValue()))\r\n elif param_type == 'TYPE_TEXTURE':\r\n self.obj.setTexture(param_name, str(t.GetValue()))\r\n \r\n","sub_path":"bin/win32/debug/tool/plugins/RpBase/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"600624544","text":"import os\nimport json\n\nwith open('E:\\桌面\\笔记\\Django\\DrPro\\static\\json\\index.json', 'r', encoding=\"utf-8\") as f:\n obj = json.load(f)\n list1 = []\n # print(obj, type(obj))\n for i in obj:\n # print('insert into lunbo(\"img\",\"id\",) values(\"%s\",\"%s\");' % (i.get(\"img\"), i.get(\"id\")))\n # print(i[-1])\n print(obj.index(i))\n for j in i:\n list1.append(j)\n # print(i.index(j))\n # print(j)\n # print('insert into dr_goodlist(index_id,title,src,price,discript,sale,com,material1,material2,price1,price2,is_delete) values(%s,\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",0);' % (j.get(\"index\"),j.get(\"title\"),j.get(\"src\"),j.get(\"price\"),j.get(\"discript\"),j.get(\"sale\"),j.get(\"com\"),j.get(\"material1\"),j.get(\"material2\"),j.get(\"price1\"),j.get(\"price2\")))\n # for k in (list1[:2] + list1[14:18]): # 导航栏\n # for k in list1[18:22]: # 主页大图\n # for k in list1[22:26]: # 店铺图\n # for k in list1[2:12]: # 产品图\n for k in list1:\n print(k, list1.index(k))\n # print('insert into dr_bar(title,src) values(\"%s\",\"/static/%s\");' % (k.get(\"title\"), k.get(\"src\"))) # 导航栏\n # print('insert into dr_storeimg(title,src) values(\"%s\",\"/static/%s\");' % (k.get(\"title\"), k.get(\"src\"))) # 店铺图\n # print('insert into dr_productimg(title,src,href) values(\"%s\",\"/static/%s\",\"%s\");' % (k.get(\"title\"), k.get(\"src\"), k.get(\"href\"))) # 产品图\n # for m in k:\n # print('insert into dr_indeximg(src) values(\"/static/%s\");' % m.get(\"src\")) # 主页大图\n","sub_path":"DrPro/App/addjson.py","file_name":"addjson.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"113948163","text":"#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport webapp2\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.api import users\nimport wsgiref.handlers\nfrom google.appengine.ext.webapp import template\n\n\nimport atom.url\n\nimport gdata.service\nimport gdata.alt.appengine\nimport gdata.photos.service\nimport gdata.media\nimport gdata.geo\nimport gdata.youtube\nimport gdata.youtube.service\n\nfrom getdata import *\nfrom pygooglechart import Chart\nfrom pygooglechart import SimpleLineChart\nfrom pygooglechart import Axis\n\n\nclass IndexHandler(webapp2.RequestHandler):\n def get(self):\n username = 'aayushahuja'\n gd_client = gdata.photos.service.PhotosService()\n \n albums = gd_client.GetUserFeed(user=username)\n for album in albums.entry:\n if album.title.text == 'test':\n photos = gd_client.GetFeed(\n '/data/feed/api/user/%s/albumid/%s?kind=photo' % (\n username, album.gphoto_id.text))\n title = 'about'\n subtitle = getdata(title,'NGOData').custom['subtitle'].text\n content = getdata(title,'NGOData').custom['content'].text\n self.response.content_type='text/html'\n self.response.out.write(template.render('template/index.html',{\n 'photolist':photos.entry,\n 'page_title':title.title(),\n 'page_subtitle':subtitle,\n 'page_content':content,\n }))\nclass CharitiesHandler(webapp2.RequestHandler):\n def get(self):\n \n title = 'charities'\n subtitle = getdata(title,'NGOData').custom['subtitle'].text\n content = getdata(title,'NGOData').custom['content'].text\n self.response.content_type='text/html'\n self.response.out.write(template.render('template/standard.html',{\n \n 'page_title':title.title(),\n 'page_subtitle':subtitle,\n 'page_content':content,\n }))\n\nclass ProgramsHandler(webapp2.RequestHandler):\n def get(self):\n title = 'programs'\n subtitle = getdata(title,'NGOData').custom['subtitle'].text\n content = getdata(title,'NGOData').custom['content'].text\n self.response.content_type='text/html'\n self.response.out.write(template.render('template/standard.html',{\n \n 'page_title':title.title(),\n 'page_subtitle':subtitle,\n 'page_content':content,\n }))\nclass ActivitiesHandler(webapp2.RequestHandler):\n def get(self):\n \n title = 'activities'\n subtitle = getdata(title,'NGOData').custom['subtitle'].text\n content = getdata(title,'NGOData').custom['content'].text\n self.response.content_type='text/html'\n self.response.out.write(template.render('template/standard.html',{\n \n 'page_title':title.title(),\n 'page_subtitle':subtitle,\n 'page_content':content,\n }))\n\n\nclass YoutubeHandler(webapp2.RequestHandler):\n def PrintVideoFeed(feed):\n for entry in feed.entry:\n PrintEntryDetails(entry)\n def get(self):\n username = 'tseries' # to be changed to NGO's username\n yt_service = gdata.youtube.service.YouTubeService()\n uri = 'http://gdata.youtube.com/feeds/api/users/%s/uploads' % username\n feed = yt_service.GetYouTubeVideoFeed(uri)\n list = []\n for entry in feed.entry:\n list.append(entry.GetSwfUrl())\n list = list[0:4]\n self.response.out.write(template.render('template/videos.html',{\n 'list':list,\n }))\n \n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n self.redirect('/index') \n \nclass ContactsHandler(webapp2.RequestHandler):\n def get(self):\n title = 'contacts'\n subtitle = getdata(title,'NGOData').custom['subtitle'].text\n content = getdata(title,'NGOData').custom['content'].text\n self.response.content_type='text/html'\n self.response.out.write(template.render('template/contacts.html',{\n \n 'page_title':title.title(),\n 'page_subtitle':subtitle,\n 'page_content':content, }))\n \nclass DonationHandler(webapp2.RequestHandler):\n def get(self):\n list = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep']\n list2 = []\n for title in list:\n list2.append( int(getdata(title,'funds').custom['amount'].text))\n max_y = 10000\n chart = SimpleLineChart(200, 125, y_range=[0, max_y])\n \n chart.add_data([2000,3000,5000,1200,5000,4000,1000,3000,5900])\n \n # Set the line colour to blue\n chart.set_colours(['0000FF'])\n \n # Set the vertical stripes\n chart.fill_linear_stripes(Chart.CHART, 0, 'CCCCCC', 0.2, 'FFFFFF', 0.2)\n \n # Set the horizontal dotted lines\n chart.set_grid(0, 25, 5, 5)\n \n # The Y axis labels contains 0 to 100 skipping every 25, but remove the\n # first number because it's obvious and gets in the way of the first X\n # label.\n left_axis = range(0, max_y + 1, 25)\n left_axis[0] = ''\n chart.set_axis_labels(Axis.LEFT, left_axis)\n \n # X axis labels\n chart.set_axis_labels(Axis.BOTTOM, list)\n \n url2 = chart.get_url()\n self.response.out.write(template.render('template/donate.html',{\n \n 'url2' :url2, }))\n \n\n \n\n \napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n ('/index', IndexHandler),\n ('/activities', ActivitiesHandler),\n ('/charities', CharitiesHandler),\n ('/programs', ProgramsHandler),\n ('/contacts', ContactsHandler),\n ('/donate', DonationHandler),\n ('/videos', YoutubeHandler),\n], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"295351381","text":"import re\nimport logging\nimport traceback\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout as django_logout\nfrom django.template import RequestContext\nfrom django.conf import settings\nfrom django.views.decorators.cache import cache_control\nfrom userservice.user import UserService\nfrom myuw.dao.term import get_current_quarter\nfrom myuw.dao.pws import is_student\nfrom myuw.dao.affiliation import get_all_affiliations, is_oldmyuw_user\nfrom myuw.dao.emaillink import get_service_url_for_address\nfrom myuw.dao.exceptions import EmailServiceUrlException\nfrom myuw.logger.timer import Timer\nfrom myuw.logger.logback import log_exception\nfrom myuw.logger.logresp import log_invalid_netid_response\nfrom myuw.logger.logresp import log_success_response_with_affiliation\nfrom myuw.logger.session_log import log_session\nfrom myuw.views.rest_dispatch import invalid_session\nfrom myuw.dao.uwemail import get_email_forwarding_for_current_user\nfrom myuw.dao.card_display_dates import get_card_visibilty_date_values\n\n\nlogger = logging.getLogger(__name__)\nLOGOUT_URL = \"/user_logout\"\n\n\n@login_required\n@cache_control(max_age=0, no_cache=True, no_store=True, must_revalidate=True)\ndef index(request,\n year=None,\n quarter=None,\n summer_term=None):\n\n timer = Timer()\n netid = UserService().get_user()\n if not netid:\n log_invalid_netid_response(logger, timer)\n return invalid_session()\n\n if _is_mobile(request):\n # On mobile devices, all students get the current myuw. Non-students\n # are sent to the legacy site.\n try:\n if not is_student():\n logger.info(\"%s not a student, redirect to legacy!\" % netid)\n return redirect_to_legacy_site()\n except Exception:\n log_exception(logger,\n '%s is_student' % netid,\n traceback.format_exc())\n logger.info(\"%s, redirected to legacy!\" % netid)\n return redirect_to_legacy_site()\n\n else:\n if is_oldmyuw_user():\n return redirect_to_legacy_site()\n\n context = {\n \"year\": year,\n \"quarter\": quarter,\n \"summer_term\": summer_term,\n \"home_url\": \"/\",\n \"err\": None,\n \"user\": {\n \"netid\": None,\n \"affiliations\": get_all_affiliations(request)\n },\n \"card_display_dates\": get_card_visibilty_date_values(request),\n }\n\n context[\"user\"][\"session_key\"] = request.session.session_key\n log_session(netid, request.session.session_key, request)\n try:\n my_uwemail_forwarding = get_email_forwarding_for_current_user()\n if my_uwemail_forwarding.is_active():\n c_user = context[\"user\"]\n try:\n (c_user['email_forward_url'],\n c_user['email_forward_title'],\n c_user['email_forward_icon']) = get_service_url_for_address(\n my_uwemail_forwarding.fwd)\n except EmailServiceUrlException:\n c_user['login_url'] = None\n c_user['title'] = None\n c_user['icon'] = None\n logger.info('No Mail Url: %s' % (\n my_uwemail_forwarding.fwd))\n\n except Exception:\n log_exception(logger,\n 'get_email_forwarding_for_current_user',\n traceback.format_exc())\n pass\n\n context[\"user\"][\"netid\"] = netid\n if year is None or quarter is None:\n cur_term = get_current_quarter(request)\n if cur_term is None:\n context[\"err\"] = \"No current quarter data!\"\n else:\n context[\"year\"] = cur_term.year\n context[\"quarter\"] = cur_term.quarter\n else:\n pass\n log_success_response_with_affiliation(logger, timer, request)\n return render(request, \"index.html\", context)\n\n\ndef _is_mobile(request):\n user_agent = request.META.get(\"HTTP_USER_AGENT\")\n\n if not user_agent:\n return False\n\n # This is the check we were doing in our apache config...\n if re.match('.*iPhone.*', user_agent):\n return True\n\n if re.match('.*Android.*Mobile.*', user_agent):\n return True\n return False\n\n\ndef redirect_to_legacy_site():\n legacy_url = getattr(settings,\n \"MYUW_USER_SERVLET_URL\",\n \"https://myuw.washington.edu/servlet/user\")\n return HttpResponseRedirect(legacy_url)\n\n\ndef logout(request):\n # Expires current myuw session\n django_logout(request)\n\n # Redirects to weblogin logout page\n return HttpResponseRedirect(LOGOUT_URL)\n","sub_path":"myuw/views/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"240731722","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nbridge_readings={'Distance_mm':[50012,50015,50005,502412,50007,50014]}\ndf=pd.DataFrame(bridge_readings)\n#df.plot()\n#plt.show()\nstats=df.describe()\nprint(stats)\nprint(stats.Distance_mm['std'])\ndf=df[(df['std']<50)]","sub_path":"outliers.py","file_name":"outliers.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"146863775","text":"import struct\nfrom enum import Enum\n\nfrom twisted.internet import defer\nfrom twisted.internet.protocol import Factory, Protocol\n\nfrom qtm.packet import QRTPacketType, QRTPacket, QRTEvent\n\n\nRTheader = struct.Struct(\"= size:\n self.parse_received(data[h_size:size], type_)\n data = data[size:]\n\n if len(data) < h_size:\n break\n\n size, type_ = RTheader.unpack_from(data, 0)\n\n self.received_data = data\n\n def set_on_packet(self, on_packet):\n self.on_packet = on_packet\n\n def parse_received(self, data, type_):\n type_ = QRTPacketType(type_)\n # print data, type_\n\n # never any callbacks\n if type_ == QRTPacketType.PacketEvent:\n event, = RTEvent.unpack(data)\n event = QRTEvent(ord(event))\n\n if self.logger:\n self.logger(event.name, QRTLoggerInfo.Event)\n\n if self.on_event:\n self.on_event(event)\n return\n\n # Get a deferred to return result\n d = self.request_queue.pop(0) if len(self.request_queue) > 0 else None\n\n if type_ == QRTPacketType.PacketError:\n response = data[:-1]\n if self.logger:\n self.logger(response, QRTLoggerInfo.Error)\n\n if d:\n d.errback(QRTCommandException(response))\n\n elif type_ == QRTPacketType.PacketXML:\n response = data[:-1]\n\n if d:\n d.callback(response)\n\n elif type_ == QRTPacketType.PacketCommand:\n response = data[:-1]\n\n if not self.version_checked:\n self.get_version()\n\n if self.logger:\n self.logger(response, QRTLoggerInfo.Received)\n\n if d:\n d.callback(response)\n\n elif type_ == QRTPacketType.PacketData:\n if d:\n d.callback('Ok')\n\n packet = QRTPacket(data)\n if self.on_packet:\n self.on_packet(packet)\n\n\n\nclass QRTFactory(Factory):\n protocol = QRTProtocol\n\n def __init__(self, version, on_disconnect=None, on_event=None, logger=None):\n self.version = version\n self.logger = logger\n\n self.on_disconnect = on_disconnect\n self.on_event = on_event\n\n def buildProtocol(self, addr):\n p = self.protocol(self.version, self.on_disconnect, self.on_event, self.logger)\n p.factory = self\n return p\n","sub_path":"qtm/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"596165614","text":"trade_zones = [\"india\",\"east_north_america\",\"west_north_america\",\"caribbean\",\"west_south_america\",\"east_south_america\",\"south_east_asia\",\"indo_china\",\"yellow_sea\",\"southern_africa\",\"west_africa\",\"east_africa\",\"middle_east\",\"western_steppe\",\"eastern_steppe\",\"upper_yangtzi\",\"atlantic_seaboard\",\"central_europe\",\"west_mediterranean\",\"baltic\",\"east_europe\",\"east_mediterranean\"]\nindia_regions = [\"Ceylon\",\"South_India\",\"Central_India\",\"East_India\",\"Indo-Gangetic_Plain\",\"West_India\",\"Punjab\",\"Nepal\",\"Burma\",\"Eastern_Himalayas\",\"Bay_of_Bengal\",\"Maldives\",\"Kashmir\",\"Lan_Na\"]\neast_north_america_regions = [\"Mid-Atlantic_South\",\"Appalachia\",\"Great_Forests\",\"Great_Lakes\",\"Mid-Atlantic\",\"New_England\",\"Ontario\",\"Quebec\",\"New_Brunswick\",\"Nova_Scotia\",\"Atlantic_Region\",\"Greenland\"]\nwest_north_america_regions = [\"Pacific_Mexico\",\"Northern_Mexico\",\"American_Southwest\",\"Great_Plains\",\"Mountain_West\",\"California\",\"Cascadia\",\"Praire_Provinces\",\"British_Columbia\",\"Alaska\",\"Northern_Territories\",\"Hawaii\"]\ncaribbean_regions = [\"Central_America\",\"Deep_South\",\"Cuba\",\"Antilles\",\"Lucayan_Archipelago\",\"Bermuda\",\"Haiti\",\"Eastern_Mexico\"]\nwest_south_america_regions = [\"Colombia\",\"Peru\",\"Ecuador\",\"Lower_Peru\",\"Chile\"]\neast_south_america_regions = [\"Venezuela\",\"Guyana\",\"North_Brazil\",\"Northeast_Brazil\",\"Center-West_Brazil\",\"Southeast_Brazil\",\"Paraguay\",\"South_Brazil\",\"Uruguay\",\"Argentina\",\"South_Atlantic_Islands\",\"Patagonia\"]\nsouth_east_asia_regions = [\"South_Island\",\"North_Island\",\"Fiji\",\"Nouvelle-Caledonie\",\"Vanuatu\",\"Salomon_Islands\",\"Tuvalu\",\"Nauru\",\"Gilbert_Islands\",\"Caroline_Islands\",\"Marshall_Islands\",\"Guam\",\"Mariana_Islands\",\"Wake\",\"Marcus_Island\",\"Northern_Territory\",\"South_Australia\",\"Nusa_Tenggara\",\"Tasmania\",\"Western_Australia\",\"Queensland\",\"Johore\",\"Borneo\",\"Sulawesi\",\"Sumatra\",\"Christmas_Island\",\"Palau\",\"Bougainville_Island\",\"New_Britain\",\"New_Guinea\",\"Maluku\",\"Java\",\"Cocos_Islands\",\"Chagos\"]\nindo_china_regions = [\"Isan\",\"South_Siam\",\"Tenasserim\",\"Siam\",\"Cambodia\",\"Vietnam\",\"Laos\",\"Guangxi\",\"Sichuan_Kham\",\"Guangdong\"]\nyellow_sea_regions = [\"Jiangxi\",\"Fujian\",\"Zhejiang\",\"Anhui\",\"Jiangsu\",\"Henan\",\"Shandong\",\"Korea\",\"Kyushu\",\"Ezo\",\"Honshu\",\"Shikoku\",\"Taiwan\",\"Okinawa\",\"Iwo_Jima\",\"Mindanao\",\"Visayas\",\"Luzon\"]\nsouthern_africa_regions = [\"South_Africa\",\"Kalahari\",\"Angola\",\"Zimbabwe\"]\nwest_africa_regions = [\"Congo_Basin\",\"Gulf_of_Guinea\",\"Coastal_West_Africa\",\"Sahel\",\"Western_Sahara\",\"Macaronesia\",\"Comoro_Islands\"]\neast_africa_regions = [\"Mozambique\",\"Madagascar\",\"Lake_Victoria\",\"Horn_of_Africa\",\"Sudan\",\"Seychelles\",\"Arabian_Sea\",\"Arabia\"]\nmiddle_east_regions = [\"Eastern_Arabia\",\"Southern_Iran\",\"Arab_Iraq\",\"Azerbaijan\",\"Persian_Iraq\",\"Caspian_Iran\",\"Khurasan\",\"Kerman\",\"Balochistan\",\"Pashtunistan\",\"Tokharistan\",\"Armenia\",\"Bahrain\"]\nwestern_steppe_regions = [\"Fergana\",\"Bukhara\",\"Turkestan\",\"Khwarezm\",\"Siberia\",\"Kazan\",\"Moscow\"]\neastern_steppe_regions = [\"Tannu_Tuva\",\"Mongolia\",\"Gansu\",\"Shaanxi\",\"Zhili\",\"Liaoning\",\"Far_East\"]\nupper_yangtzi_regions = [\"Tibet\",\"Shanxi\",\"Qinghai\",\"Yunnan\",\"Guizhou\",\"Hunan\",\"Hubei\"]\natlantic_seaboard_regions = [\"Portugal\",\"Andalusia\",\"La_Mancha\",\"Leon-Castille\",\"Navarre\",\"Atlantic_France\",\"Grand_Est\",\"Northern_France\",\"Southern_England\",\"Wales_Mercia\",\"Northern_England\",\"Ireland\",\"Scotland\",\"Iceland\"]\ncentral_europe_regions = [\"Helvetia\",\"Austria\",\"Bavaria\",\"Baden-Wurttemberg\",\"Bohemia\",\"Saxony\",\"Low_Saxony\",\"Brandenburg\",\"Pomerania\",\"Hessen\",\"Westfalen\",\"Low_Countries\",\"Jutland\"]\nwest_mediterranean_regions = [\"Cisalpine_Italy\",\"Provence_Liguria\",\"Central_Italy\",\"Corsica_and_Sardinia\",\"Auvergne-Rhone-Alpes\",\"Occitanie\",\"Catalonia-Aragon\",\"Valencia\",\"Morocco\",\"Algeria\",\"Tunisia\"]\nbaltic_regions = [\"Denmark\",\"Sweden\",\"Norway\",\"Saapmi\",\"Finland\",\"Sankt-Petersburg\",\"Baltic_states\",\"Prussia\",\"Aland_Islands\",\"Bornholm\",\"Poland\"]\neast_europe_regions = [\"Galicia\",\"Minsk\",\"Kiev\",\"Subcarpathia\",\"Pannonia\",\"Bulgaria\",\"Odessa\",\"Black_Sea\",\"Caucasus\",\"Voiska_Donskova\"]\neast_mediterranean_regions = [\"Illyria\",\"Rumelia\",\"Southern_Italy\",\"Sicily\",\"Libya\",\"Crete\",\"Egypt\",\"Levant\",\"Syria\",\"Anatolia\",\"Cyprus\",\"Cilicia\",\"Aegean\",\"Marmara\",\"Venetia\",\"Malta\"]\ntrade_zones_dict = {'india':[india_regions],\n\t\t \t\t'east_north_america':[east_north_america_regions],\n\t\t \t\t'west_north_america':[west_north_america_regions],\n\t\t \t\t'caribbean':[caribbean_regions],\n\t\t \t\t'west_south_america':[west_south_america_regions],\n\t\t \t\t'east_south_america':[east_south_america_regions],\n\t\t \t\t'south_east_asia':[south_east_asia_regions],\n\t\t \t\t'indo_china':[indo_china_regions],\n\t\t \t\t'yellow_sea':[yellow_sea_regions],\n\t\t \t\t'southern_africa':[southern_africa_regions],\n\t\t \t\t'west_africa':[west_africa_regions],\n\t\t \t\t'east_africa':[east_africa_regions],\n\t\t \t\t'middle_east':[middle_east_regions],\n\t\t \t\t'western_steppe':[western_steppe_regions],\n\t\t \t\t'eastern_steppe':[eastern_steppe_regions],\n\t\t \t\t'upper_yangtzi':[upper_yangtzi_regions],\n\t\t \t\t'atlantic_seaboard':[atlantic_seaboard_regions],\n\t\t \t\t'central_europe':[central_europe_regions],\n\t\t \t\t'west_mediterranean':[west_mediterranean_regions],\n\t\t \t\t'baltic':[baltic_regions],\n\t\t \t\t'east_europe':[east_europe_regions],\n\t\t \t\t'east_mediterranean':[east_mediterranean_regions]}\n\ndef print_scripted_triggers(trade_zone):\n\tprint(\"{trade_zone}_tradezone = {{\\n trigger_if = {{\\n limit = {{\\n $PROVINCE$ = yes\\n }}\\n OR = {{\\n\".format(trade_zone=trade_zone))\n\tfor x in trade_zones_dict.get(trade_zone):\n\t\tfor i in x:\n\t\t\tprint(\" is_in_region = {i}\".format(i=i))\n\tprint(\" }\")\n\tprint(\" }\")\n\tprint(\" trigger_else = {\\n limit = {\\n $PROVINCE$ = no\\n }\\n OR = {\")\n\tfor x in trade_zones_dict.get(trade_zone):\n\t\tfor i in x:\n\t\t\tprint(\" this = region:{i}\".format(i=i))\n\tprint(\" }\")\n\tprint(\" }\")\n\tprint(\"}\")\n\ndef print_goods_produced_values(trade_zone):\n\tprint(\"{trade_zone}_total_goods = {{\\n value = 0\\n\".format(trade_zone=trade_zone))\n\tfor x in trade_zones_dict.get(trade_zone):\n\t\tfor i in x:\n\t\t\tprint(\" region:{i} = {{\\n every_region_province = {{\\n add = num_goods_produced\\n }}\\n }}\".format(i=i))\n\tprint(\"}\")\n\ndef print_population_values(trade_zone):\n\tprint(\"{trade_zone}_total_population = {{\\n value = 0\\n\".format(trade_zone=trade_zone))\n\tfor x in trade_zones_dict.get(trade_zone):\n\t\tfor i in x:\n\t\t\tprint(\" region:{i} = {{\\n every_region_province = {{\\n add = total_population\\n }}\\n }}\".format(i=i))\n\tprint(\"}\")\n\ndef print_custom_loc(trade_zone):\n\tx = 0\n\tprint(\"state_trade_zone_loc = {\\n type = province\\n\")\n\tfor i in trade_zones:\n\t\tx = x + 1\n\t\tprint(\" text = {{\\n localization_key = state_trade_zone_loc_{x}\\n trigger = {{\\n {i}_tradezone = {{ PROVINCE = yes }}\\n }}\\n }}\".format(trade_zone=trade_zone,x=x,i=i))\n\tprint(\" text = {\\n localization_key = state_trade_zone_loc_fallback\\n trigger = {\\n always = yes\\n }\\n }\")\n\tprint(\"}\")\n\tx = 0\n\tprint(\"state_trade_zone_value_loc = {\\n type = province\\n\")\n\tfor i in trade_zones:\n\t\tx = x + 1\n\t\tprint(\" text = {{\\n localization_key = state_trade_zone_value_loc_{x}\\n trigger = {{\\n {i}_tradezone = {{ PROVINCE = yes }}\\n }}\\n }}\".format(trade_zone=trade_zone,x=x,i=i))\n\tprint(\" text = {\\n localization_key = state_trade_zone_value_loc_fallback\\n trigger = {\\n always = yes\\n }\\n }\")\n\tprint(\"}\")\n\ndef print_tradezone_localization(trade_zones):\n\tx = 0\n\tfor i in trade_zones:\n\t\tcaps_name = i.replace(\"_\", \" \").title()\n\t\tx = x + 1\n\t\tprint(\"state_trade_zone_loc_{x}:0 \\\"{caps_name}\\\"\".format(caps_name=caps_name,x=x))\n\tprint(\"state_trade_zone_loc_fallback:0 \\\"0\\\"\\n\")\n\tx = 0\n\tfor i in trade_zones:\n\t\tx = x + 1\n\t\tprint(\"state_trade_zone_value_loc_{x}:0 \\\"[GuiScope.SetRoot( Player.MakeScope ).ScriptValue('{i}_total_goods')|0]\\\"\".format(i=i,x=x))\n\tprint(\"state_trade_zone_value_loc_fallback:0 \\\"0\\\"\")\n\n\n#FUNCTION 1: Purpose - Print trade zone scripted triggers\n#for trade_zone in trade_zones_dict:\n# print_scripted_triggers(trade_zone)\n\n#FUNCTION 2: Purpose - Print trade zone goods produced script values\n#for trade_zone in trade_zones_dict:\n# print_goods_produced_values(trade_zone)\n\n#FUNCTION 3: Purpose - Print trade zone population script values\n#for trade_zone in trade_zones_dict:\n# print_population_values(trade_zone)\n\n#FUNCTION 4: Purpose - Print trade zone custom localization\n#print_custom_loc(trade_zones)\n\n#FUNCTION 5: Purpose - Print trade zone localization\n#print_tradezone_localization(trade_zones)\n","sub_path":"zz_TradeZoneScript/TradeZones.py","file_name":"TradeZones.py","file_ext":"py","file_size_in_byte":8608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"186038460","text":"import random\nimport re\nimport requests\nimport sys\nimport time\n\nimport bottlenose\nfrom bs4 import BeautifulSoup\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom camera.models import CameraBrand, Camera\nfrom camera_lens.models import CameraLens\n\n\nAWS_ACCESS_KEY_ID=''\nAWS_SECRET_ACCESS_KEY=''\nAWS_ASSOCIATE_TAG=''\n\namazon = bottlenose.Amazon(AWS_ACCESS_KEY_ID,\n AWS_SECRET_ACCESS_KEY, AWS_ASSOCIATE_TAG)\n\n\n# I don't know the URL to look these up, but these are what amazon has in the finder as of 1/08/2014\nCAMERA_BRANDS = [\"Canon\",\"Fujifilm\",\"Leica\",\"Nikon\",\"Olympus\",\"Panasonic\",\"Pentax\",\"Samsung\",\"Sony\"]\n\nCANON_BRAND_SERIES_LIST = [\"EOS\", \"EOS Rebel\"]\nFUJI_BRAND_SERIES_LIST = [\"X series\"]\nLEICA_BRAND_SERIES_LIST = [\"M series\", \"T series\", ]\nNIKON_BRAND_SERIES_LIST = [\"CX (Nikon 1)\", \"DX\", \"FX\"]\nOLYMPUS_BRAND_SERIES_LIST = [\"E series\", \"O-MD\", \"PEN\"]\nPANASONIC_BRAND_SERIES_LIST = [\"Lumix G\"]\nPENTAX_BRAND_SERIES_LIST = [\"K series\", \"Q series\"]\nSAMSUNG_BRAND_SERIES_LIST = [\"NX\"]\nSONY_BRAND_SERIES_LIST = [\"A-mount\", \"E-mount\", \"FE-mount\"]\n\nCANON_EOS_CAMERAS = [\"10D\",\"1D\",\"1D C\",\"1D Mark II\",\"1D Mark II N\",\"1D Mark III\",\"1D Mark IV\",\"1D X\",\"1Ds\",\"1Ds Mark II\",\"1Ds Mark III\",\"20D\",\"20Da\",\"300D\",\"30D\",\"40D\",\"50D\",\"5D\",\"5D Mark II\",\"5D Mark III\",\"60D\",\"60Da\",\"6D\",\"70D\",\"7D\",\"7D Mark II\",\"D30\",\"D60\",\"M\"]\nCANON_EOS_REBEL_CAMERAS = [\"SL1\",\"T1i\",\"T2i\",\"T3\",\"T3i\",\"T4i\",\"T5\",\"T5i\",\"XS\",\"XSi\",\"XT\",\"XTi\"]\n\nFUJI_X_SERIES_CAMERAS = [\"X-A1\",\"X-E1\",\"X-E2\",\"X-M1\",\"X-Pro1\",\"X-T1\"]\n\nLEICA_M_SERIES_CAMERAS = [\"M Typ 240\",\"M-E Typ 220\",\"M-Monochrom\",\"M8\",\"M8.2\",\"M9\",\"M9-P\"]\nLEICA_T_SERIES_CAMERAS = [\"T (Typ 701)\"]\n\nNIKON_CX_CAMERAS = [\"AW1\",\"J1\",\"J2\",\"J3\",\"J4\",\"S1\",\"S2\",\"V1\",\"V2\",\"V3\"]\nNIKON_DX_CAMERAS = [\"D100\",\"D1H\",\"D1X\",\"D200\",\"D2H\",\"D2Hs\",\"D2X\",\"D2Xs\",\"D300\",\"D3000\",\"D300S\",\"D3100\",\"D3200\",\"D3300\",\"D40\",\"D40X\",\"D50\",\"D5000\",\"D5100\",\"D5200\",\"D5300\",\"D60\",\"D70\",\"D7000\",\"D70s\",\"D7100\",\"D80\",\"D90\"]\nNIKON_FX_CAMERAS = [\"D3\", \"D3S\", \"D3X\", \"D4\", \"D4s\", \"D600\", \"D610\", \"D700\", \"D750\", \"D800\", \"D800E\", \"D810\", \"Df\"]\n\nOLYMPUS_E_SERIES_CAMERAS = [\"E-1\",\"E-3\",\"E-30\",\"E-300\",\"E-330\",\"E-410\",\"E-420\",\"E-450\",\"E-5\",\"E-500\",\"E-510\",\"E-520\",\"E-600\",\"E-620\"]\nOLYMPUS_OMD_CAMERAS = [\"E-M1\",\"E-M10\",\"E-M5\"]\nOLYMPUS_PEN_CAMERAS = [\"E-P1\",\"E-P2\",\"E-P3\",\"E-P5\",\"E-PL1\",\"E-PL1s\",\"E-PL2\",\"E-PL3\",\"E-PL5\",\"E-PL6\",\"E-PL7\",\"E-PM1\",\"E-PM2\"]\n\nPANASONIC_LUMIX_G_CAMERAS = [\"G1\",\"G10\",\"G2\",\"G3\",\"G5\",\"G6\",\"GF1\",\"GF2\",\"GF3\",\"GF5\",\"GF6\",\"GH1\",\"GH2\",\"GH3\",\"GH4\",\"GM1\",\"GM5\",\"GX1\",\"GX7\"]\n\nPENTAX_K_SERIES_CAMERAS = [\"K-01\",\"K-3\",\"K-30\",\"K-5\",\"K-5 II\",\"K-5 IIs\",\"K-50\",\"K-500\",\"K-7\",\"K-m\",\"K-r\",\"K-S1\",\"K-x\",\"K100D\",\"K100D Super\",\"K10D\",\"K110D\",\"K200D\",\"K20D\"]\nPENTAX_Q_SERIES_CAMERAS = [\"Q\",\"Q-S1\",\"Q10\",\"Q7\"]\n\nSAMSUNG_NX_CAMERAS = [\"NX mini\",\"NX1\",\"NX10\",\"NX100\",\"NX1000\",\"NX1100\",\"NX20\",\"NX200\",\"NX2000\",\"NX210\",\"NX30\",\"NX300\",\"NX3000\",\"NX300M\"]\n\nSONY_A_MOUNT_CAMERAS = [\"Alpha DSLR-A100\",\"Alpha DSLR-A200\",\"Alpha DSLR-A230\",\"Alpha DSLR-A290\",\"Alpha DSLR-A300\",\"Alpha DSLR-A330\",\"Alpha DSLR-A350\",\"Alpha DSLR-A390\",\"Alpha DSLR-A500\",\"Alpha DSLR-A550\",\"Alpha DSLR-A560\",\"Alpha DSLR-A580\",\"Alpha DSLR-A700\",\"Alpha DSLR-A850\",\"Alpha DSLR-A900\",\"Alpha SLT-A33\",\"Alpha SLT-A35\",\"Alpha SLT-A37\",\"Alpha SLT-A55\",\"Alpha SLT-A57\",\"Alpha SLT-A58\",\"Alpha SLT-A65\",\"Alpha SLT-A77\",\"Alpha SLT-A77 II\",\"Alpha SLT-A99\"]\nSONY_E_MOUNT_CAMERAS = [\"3\",\"3N\",\"5\",\"5N\",\"5R\",\"5T\",\"6\",\"7\",\"Alpha 3000\",\"Alpha 5000\",\"Alpha 5100\",\"Alpha 6000\",\"C3\",\"F3\"]\nSONY_FE_MOUNT_CAMERAS = [\"Alpha 7\",\"Alpha 7 II\",\"Alpha 7R\",\"Alpha 7S\"]\n\n\n# set the headers to something other than python user agent.\nr_headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0' }\n\n\ndef lookup_lens_asins_for_cam(brand, brand_series, model_name):\n url = \"http://www.amazon.com/gp/finders/ajax/finderajax.html?action=getSupplies&finderId=lens&nodeId=\" + brand + \"%5E\" + brand_series.replace(\" \", \"+\") + \"%5E\" + model_name.replace(\" \", \"+\") + \"&supplyPage=1&pageSize=999&refinements=&supplyBrand=&redir=pf_rd_p%3D1561025342%26pf_rd_s%3Dcenter-5%26pf_rd_t%3D101%26pf_rd_i%3D6207565011%26pf_rd_m%3DATVPDKIKX0DER%26pf_rd_r%3D0B8F6QAY4MYB29EFZS0H&PowerBar=0\"\n try:\n r = requests.get(url, headers=r_headers)\n soup = BeautifulSoup(r.text)\n asins = re.findall(r'\"ASIN\":\"(\\w+)\"', str(soup.prettify()))\n return asins\n except:\n print(\"Couldnt return asins for \" + brand + \" \" + model_name)\n\n\ndef get_all_brand_series(brand):\n if brand == 'Canon':\n # can re-work this later incorporating url to get all the series but since its not updated often and the urls are tricky hard-coding per brand for now\n return CANON_BRAND_SERIES_LIST\n elif brand == 'Fujifilm':\n return FUJI_BRAND_SERIES_LIST\n elif brand == 'Leica':\n return LEICA_BRAND_SERIES_LIST\n elif brand == 'Nikon':\n return NIKON_BRAND_SERIES_LIST\n elif brand == 'Olympus':\n return OLYMPUS_BRAND_SERIES_LIST\n elif brand == 'Panasonic':\n return PANASONIC_BRAND_SERIES_LIST\n elif brand == 'Pentax':\n return PENTAX_BRAND_SERIES_LIST\n elif brand == 'Samsung':\n return SAMSUNG_BRAND_SERIES_LIST\n elif brand == \"Sony\":\n return SONY_BRAND_SERIES_LIST\n else:\n print(\"Sorry no matches for this, this should never happen, going to exit now with error.\")\n sys.exit(1)\n\n\ndef get_series_models(brand, brand_series):\n if brand == 'Canon':\n if brand_series == 'EOS':\n return CANON_EOS_CAMERAS\n elif brand_series == 'EOS Rebel':\n return CANON_EOS_REBEL_CAMERAS\n else:\n print(\"No match, this shouldnt ever happen. Exiting with error...\")\n sys.exit(1)\n\n elif brand == 'Fujifilm':\n if brand_series == 'X series':\n return FUJI_X_SERIES_CAMERAS\n else:\n print(\"No match, this shouldnt ever happen. Exiting with error...\")\n sys.exit(1)\n\n elif brand == 'Leica':\n if brand_series == 'M series':\n return LEICA_M_SERIES_CAMERAS\n elif brand_series == 'T series':\n return LEICA_T_SERIES_CAMERAS\n else:\n print(\"No match, this shouldnt ever happen. Exiting with error...\")\n sys.exit(1)\n\n elif brand == 'Nikon':\n if brand_series == 'CX (Nikon 1)':\n return NIKON_CX_CAMERAS\n elif brand_series == 'DX':\n return NIKON_DX_CAMERAS\n elif brand_series == 'FX':\n return NIKON_FX_CAMERAS\n else:\n print(\"No match, this shouldnt ever happen. Exiting with error...\")\n sys.exit(1)\n\n elif brand == 'Olympus':\n if brand_series == 'E series':\n return OLYMPUS_E_SERIES_CAMERAS\n elif brand_series == 'O-MD':\n return OLYMPUS_OMD_CAMERAS\n elif brand_series == 'PEN':\n return OLYMPUS_PEN_CAMERAS\n else:\n print(\"No match, this shouldnt ever happen. Exiting with error...\")\n sys.exit(1)\n\n elif brand == 'Panasonic':\n if brand_series == 'Lumix G':\n return PANASONIC_LUMIX_G_CAMERAS\n else:\n print(\"No match, this shouldnt ever happen. Exiting with error...\")\n sys.exit(1)\n\n elif brand == 'Pentax':\n if brand_series == 'K series':\n return PENTAX_K_SERIES_CAMERAS\n elif brand_series == 'Q series':\n return PENTAX_Q_SERIES_CAMERAS\n else:\n print(\"No match, this shouldnt ever happen. Exiting with error...\")\n sys.exit(1)\n\n elif brand == 'Samsung':\n if brand_series == 'NX':\n return SAMSUNG_NX_CAMERAS\n else:\n print(\"No match, this shouldnt ever happen. Exiting with error...\")\n sys.exit(1)\n\n elif brand == 'Sony':\n if brand_series == 'A-mount':\n return SONY_A_MOUNT_CAMERAS\n elif brand_series == 'E-mount':\n return SONY_E_MOUNT_CAMERAS\n elif brand_series == 'FE-mount':\n return SONY_FE_MOUNT_CAMERAS\n else:\n print(\"No match, this shouldnt ever happen. Exiting with error...\")\n sys.exit(1)\n\n else:\n print(\"Sorry no matches for this, this should never happen, going to exit now with error.\")\n sys.exit(1)\n\n\ndef amazon_query_asin(asin):\n try:\n response = amazon.ItemLookup(ItemId=asin)\n except:\n print(\"error performing amazon lookup of \" + asin)\n sys.exit(1)\n try:\n soup = BeautifulSoup(response)\n except:\n print(\"error creating soup from response.\")\n return soup\n\n\n\"\"\"\n response = amazon.ItemLookup(ItemId=asin)\n soup = BeautifulSoup(response)\n return unicode(soup)\n\"\"\"\n\n\n\"\"\"\ndef re_lens_title(title_long):\n try:\n title = re.search(r\"(^.*?) Lens for\", title_long).group(1)\n print(\"regex match - title set to \" + title)\n except:\n title = str(title_long)\n print(\"no regex match - title set to \" + title)\n return title\n\"\"\"\n\n\nclass Command(BaseCommand):\n def handle(self, **options):\n for brand in CAMERA_BRANDS:\n for brand_series in get_all_brand_series(brand=brand):\n for model_name in get_series_models(brand=brand, brand_series=brand_series):\n cam_title = brand + \" \" + model_name\n print(cam_title)\n try:\n c = Camera.objects.get(title=cam_title)\n print(\"Matched on \" + cam_title)\n except:\n print(\"No match for \" + cam_title + \" creating...\")\n c = Camera.objects.create(title=cam_title, brand=CameraBrand.objects.get(title=brand))\n\n\n asins = lookup_lens_asins_for_cam(brand=brand, brand_series=brand_series, model_name=model_name)\n for asin in asins:\n print(\"Checking if ASIN already exists... \" + asin)\n try:\n l = CameraLens.objects.get(amazon_asin=asin)\n except:\n print(\"Lens with ASIN \" + asin + \" doesnt already exist, querying amazon and creating.\")\n\n soup = amazon_query_asin(asin=asin)\n try:\n title_long = soup.find(\"title\").text\n title = unicode(title_long)\n #title = re_lens_title(title_long)\n except:\n print(\"errored the fucking souping title_long text...\")\n print(soup)\n\n try:\n amazon_detail_page_url = unicode(soup.find(\"detailpageurl\").text)\n except:\n print(\"error getting amazon detail page, setting it to blank\")\n amazon_detail_page_url = \"\"\n\n try:\n l = CameraLens.objects.get_or_create(title=title, amazon_asin=asin,amazon_detail_page_url=amazon_detail_page_url)\n except:\n print(\"Error: Failed to get or create CameraLens object...\")\n #sys.exit(1)\n try:\n c.lenses.add(l)\n except:\n print(\"Something fucked up adding lens to camera\")\n #sys.exit(1)\n\n print(\"looks like we made it through, sleeping for a bit then we go again...\")\n\n sleep_time = random.randint(5,6)\n time.sleep(sleep_time)\n","sub_path":"camera_lens/management/commands/populate_lenses_from_amazon.py","file_name":"populate_lenses_from_amazon.py","file_ext":"py","file_size_in_byte":11756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"14467703","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nimport re\n\nid_list = []\nvideo_list = []\ncount = 0\nfor i in ['1', '2', '3', '4']:\n html = urllib.request.urlopen('http://www.marschen.com/forum.php?mod=forumdisplay&fid=2&page=' + i).read().decode('gbk')\n soup = BeautifulSoup(html, 'html.parser')\n tids = soup.select('tbody')\n for tid in tids:\n id = tid.get('id')\n if '_' in id:\n id_list.append(id.split('_')[1])\nprint(id_list)\nprint(len(id_list))\n\nfor id in id_list:\n try:\n html = urllib.request.urlopen('http://www.marschen.com/forum.php?mod=viewthread&tid=' + id).read().decode('gbk')\n soup = BeautifulSoup(html, 'html.parser')\n if soup.select('td.t_f font'):\n video_title = soup.select('span#thread_subject')[0].get_text()\n video_name = soup.select('td.t_f font')[0].get_text()\n video_js = soup.select('td.t_f script')[-1].get_text()\n if re.findall(r'http.*swf', video_js):\n count += 1\n video_url = re.findall(r'http.*swf', video_js)[0]\n # video_data = {\n # 'title': video_title,\n # 'name': video_name,\n # 'url': video_url\n # }\n print(str(count), video_title, video_name, video_url)\n # video_list.append(video_data)\n except:\n pass\n# print(video_list)\n# print(len(video_list))","sub_path":"MarsAndroid/get_VideoList.py","file_name":"get_VideoList.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"197920848","text":"from icevision.imports import *\nfrom icevision.backbones import resnet_fpn\nfrom icevision.models.rcnn.utils import *\n\nfrom torchvision.models.detection.faster_rcnn import (\n fasterrcnn_resnet50_fpn,\n FasterRCNN,\n FastRCNNPredictor,\n)\n\n\ndef model(\n num_classes: int,\n backbone: Optional[nn.Module] = None,\n remove_internal_transforms: bool = True,\n **faster_rcnn_kwargs\n) -> nn.Module:\n \"\"\"FasterRCNN model implemented by torchvision.\n\n # Arguments\n num_classes: Number of classes.\n backbone: Backbone model to use. Defaults to a resnet50_fpn model.\n remove_internal_transforms: The torchvision model internally applies transforms\n like resizing and normalization, but we already do this at the `Dataset` level,\n so it's safe to remove those internal transforms.\n **faster_rcnn_kwargs: Keyword arguments that internally are going to be passed to\n `torchvision.models.detection.faster_rcnn.FastRCNN`.\n\n # Returns\n A Pytorch `nn.Module`.\n \"\"\"\n if backbone is None:\n model = fasterrcnn_resnet50_fpn(pretrained=True, **faster_rcnn_kwargs)\n\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n\n backbone_param_groups = resnet_fpn.param_groups(model.backbone)\n else:\n model = FasterRCNN(backbone, num_classes=num_classes, **faster_rcnn_kwargs)\n backbone_param_groups = backbone.param_groups()\n\n patch_param_groups(model=model, backbone_param_groups=backbone_param_groups)\n\n if remove_internal_transforms:\n remove_internal_model_transforms(model)\n\n return model\n","sub_path":"icevision/models/rcnn/faster_rcnn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"619805441","text":"\ndef bijiao(fname1,fname2):\n f1 = open(fname1,'r')\n f2 = open(fname2,'r')\n count = 0 #统计行数\n differ = [] #统计不一样的行数\n for line1 in f1:\n line2 = f2.readline() #记住这里的编程变换方法\n count += 1\n if line1 != line2:\n differ.append(count)\n f1.close()\n f2.close()\n return differ\n \n\nfname1 = input('请输入第一个文件名:')\nfname2 = input('请输入第二个文件名:')\ndiffer = bijiao(fname1,fname2)\nif len(differ) == 0:\n print('两个文件完全一样')\nelse:\n print('两个文件共有【%d】处不同'%len(differ))\n for each in differ:\n print('第%d行不一样'%each)\n","sub_path":"comparing_documents.py","file_name":"comparing_documents.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"653604543","text":"#import statement needed to change pathing \nimport sys\nsys.path.append(\"./../\")\n\nfrom pymatgen import MPRester\nimport pymatgen as pg\nimport numpy as np \nimport cPickle as pickle\nimport csv\nimport os\nimport re\nfrom collections import defaultdict\nimport copy\n\nimport mpdata as mp\nimport gatherLPNorm as gm\n\nPARTIAL_PATH = \"Norm_mapping\"\nFILEPATH = \"Computed CSV/NonLithiated_Atoms.csv\"\nOUTPUT_FILE = \"Num_Compound.csv\"\n\nclass getLiRatio:\n\tgetAtomNum = re.compile(r'[^\\d]')\n\tgetAtomType = re.compile(r'[\\d]')\n\n\tdef __init__(self, li_atomList, nonLi_atomList, LiMapping):\n\t\t#define some new constants here\n\t\tself.li_atomList = li_atomList\n\t\tself.nonLi_atomList = nonLi_atomList\n\t\tself.LiMapping = LiMapping\n\t\tself.DIR = os.path.dirname(__file__)\n\n\t\tself.gatherFeatures()\n\n\tdef gatherFeatures(self):\n\t\t#variable initialization\n\t\tself.LiRatioDict = {}\n\t\tself.nonLiRatioDict = {}\n\n\t\tself.gatherRatios(self.LiRatioDict, self.li_atomList)\n\t\tself.gatherRatios(self.nonLiRatioDict, self.nonLi_atomList)\n\t\n\t\tself.getCompoundNum()\n\n\t\tself.exportRatio()\n\n\tdef exportRatio(self):\n\t\tdef writeRatio(compound, writer):\n\t\t\toutputDict = {\"Li_atom\": compound, \"nonLi_atom\": self.LiMapping[compound]}\n\t\t\t# outputDict[\"liAtomTotal\"] = self.LiRatioDict[compound]\n\t\t\toutputDict[\"compoundTotal\"] = self.compoundNum[compound]\n\t\t\twriter.writerow(outputDict)\n\n\t\toutputFile = os.path.join(self.DIR, OUTPUT_FILE)\n\t\tcsvfile = open(outputFile, 'w')\n\t\tfieldnames = [\"Li_atom\", \"nonLi_atom\", \"compoundTotal\"]#\"liAtomTotal\", \"nonLiAtomTotal\"]\n\t\twriter = csv.DictWriter(csvfile, fieldnames = fieldnames)\n\t\twriter.writeheader()\n\t\tfor compound in self.li_atomList:\n\t\t\twriteRatio(compound, writer)\n\n\tdef getCompoundNum(self):\n\t\tdef getFormulaList(compound):\n\t\t\treturn pg.Composition(compound).formula.split()\n\n\t\tdef getNum(formulaList, Li_compound, nonLi_compound):\n\t\t\tfor atom in formulaList:\n\t\t\t\tif getLiRatio.getAtomType.sub('', str(atom)) == \"Li\":\n\t\t\t\t\tLiNum = getLiRatio.getAtomNum.sub('', str(atom))\n\t\t\t\t\tbreak\n\t\t\treturn (self.LiRatioDict[Li_compound] - int(LiNum))/self.nonLiRatioDict[nonLi_compound]\n\n\n\n\n\t\tself.compoundNum = {}\n\t\tfor i in range(len(self.li_atomList)):\n\t\t\tLi_compound = self.li_atomList[i]\n\t\t\tnonLi_compound = self.nonLi_atomList[i]\n\n\t\t\tformulaList = getFormulaList(Li_compound)\n\n\t\t\tself.compoundNum[Li_compound] = getNum(formulaList, Li_compound, nonLi_compound)\n\n\tdef gatherRatios(self, ratioDict, currAtomList):\n\t\tdef getRatio(formulaList):\n\t\t\ttotal = 0\n\t\t\tLiNum = 0\n\t\t\tfor atom in formulaList:\n\t\t\t\ttotal += int(getLiRatio.getAtomNum.sub('', str(atom)))\n\t\t\t\tif getLiRatio.getAtomType.sub('', str(atom)) == \"Li\":\n\t\t\t\t\tLiNum = getLiRatio.getAtomNum.sub('', str(atom))\n\t\t\treturn total\n\n\n\t\tfor compound in currAtomList:\n\t\t\tcomp = pg.Composition(compound)\n\t\t\tformulaList = comp.formula.split()\n\n\t\t\tratioDict[compound] = getRatio(formulaList)\n\n\n\n\n\n\ndef preInit():\n\t# FILEPATH = \"../Computed CSV/Energy Computed CSV/reduced_coalesced_computed_File.csv\"\n\tnormList = [2, 3, 5, 7]\n\tgather = gm.extractNorm(FILEPATH, normList, PARTIAL_PATH + \"_LI.csv\", PARTIAL_PATH + \"_nonLi.csv\")\n\tgather.getAtomList()\n\treturn gather.Li_atomList, gather.nonLi_atomList, gather.LiMapping\n\ndef main():\n\tli_atomList, nonLi_atomList, LiMapping = preInit()\n\tgetLiRatio(li_atomList, nonLi_atomList, LiMapping)\n\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"Gather Features/getLiRatio.py","file_name":"getLiRatio.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"433052393","text":"import random\nimport numpy as np\nimport torch \n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage.filters import uniform_filter1d\n\nfrom .ManagerFeatures import state_to_features\nfrom .ManagerRewards import *\n \nACTIONS_IDX = {'LEFT':0, 'RIGHT':1, 'UP':2, 'DOWN':3, 'WAIT':4, 'BOMB':5}\n\ndef generate_eps_greedy_policy(network):\n return np.linspace(network.epsilon_begin, network.epsilon_end, network.training_episodes)\n\ndef add_experience(self, old_game_state, self_action, new_game_state, events):\n old_features = state_to_features(old_game_state)\n if old_features is not None:\n if new_game_state is None:\n new_features = old_features\n else:\n new_features = state_to_features(new_game_state)\n reward = reward_from_events(self, events)\n reward += rewards_from_own_events(self, old_game_state, self_action, new_game_state, events)\n\n action_idx = ACTIONS_IDX[self_action]\n action = torch.zeros(6)\n action[action_idx] = 1\n\n self.experience_buffer.append((old_features, action, reward, new_features))\n number_of_elements_in_buffer = len(self.experience_buffer)\n if number_of_elements_in_buffer > self.network.buffer_size:\n self.experience_buffer.popleft()\n\ndef update_network(self):\n '''\n network: the network that gets updated\n experience_buffer: the collected experiences, list of game_episodes\n '''\n network = self.network \n experience_buffer = self.experience_buffer\n\n #randomly choose batch out of the experience buffer\n number_of_elements_in_buffer = len(experience_buffer)\n batch_size = min(number_of_elements_in_buffer, network.batch_size)\n\n random_i = [ random.randrange(number_of_elements_in_buffer) for _ in range(batch_size)]\n\n #compute for each experience in the batch \n # - the Ys using n-step TD Q-learning\n # - the current guess for the Q function\n sub_batch = []\n Y = []\n for i in random_i:\n random_experience = experience_buffer[i]\n sub_batch.append(random_experience)\n \n for b in sub_batch:\n old_state = b[0]\n action = b[1]\n reward = b[2]\n new_state = b[3]\n\n y = reward\n if new_state is not None:\n y += network.gamma * torch.max(network(new_state))\n\n Y.append(y)\n\n Y = torch.tensor(Y)\n\n #Qs\n states = torch.cat(tuple(b[0] for b in sub_batch)) #put all states of the sub_batch in one batch\n q_values = network(states)\n actions = torch.cat([b[1].unsqueeze(0) for b in sub_batch])\n Q = torch.sum(q_values*actions, dim=1)\n \n loss = network.loss_function(Q, Y)\n network.optimizer.zero_grad()\n loss.backward()\n network.optimizer.step()\n\n\ndef save_parameters(self, string):\n torch.save(self.network.state_dict(), f\"network_parameters/{string}.pt\")\n\n #plot scores\n y = self.game_score_arr\n y = uniform_filter1d(y, 10, mode=\"nearest\", output=\"float\")\n x = range(len(y))\n fig, ax = plt.subplots()\n ax.set_title('score')\n ax.set_xlabel('episode')\n ax.set_ylabel('total points')\n ax.plot(x,y, marker='o', markersize=3, linewidth=1)\n plt.savefig('network_parameters/training_progress.png')\n\n\n\ndef get_score(events):\n true_game_rewards = {\n e.COIN_COLLECTED: 1,\n e.KILLED_OPPONENT: 5,\n }\n score = 0\n for event in events:\n if event in true_game_rewards:\n score += true_game_rewards[event]\n return score\n\ndef track_game_score(self):\n self.game_score_arr.append(self.game_score)\n self.game_score = 0\n\n","sub_path":"agent_code_old_versions/Task1NNV2/ManagerTraining.py","file_name":"ManagerTraining.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"613774760","text":"import argparse\n\n\ndef read_file():\n # initialize\n parser = argparse.ArgumentParser(description='This script is ...')\n parser.add_argument(\"string\", type=str)\n args = parser.parse_args()\n filename = args.string\n print(filename + \" is reading...\")\n path = \"dataset/\" + filename\n return path\n\n\ndef read_files():\n # initialize\n parser = argparse.ArgumentParser(description='This script is ...')\n parser.add_argument(\"string\", type=str)\n args = parser.parse_args()\n filenames = args.string\n filename_arr = filenames.split(\",\")\n print(filename_arr)\n print(\"these files are reading...\")\n path_arr = []\n for name in filename_arr:\n path_arr.append(\"dataset/\" + name)\n return path_arr\n","sub_path":"python/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"516475835","text":"from app import db\nfrom auth import authorize\nfrom auth.AuthConstants import DATA_EDITOR_ROLE, EC_LEADERSHIP_ROLE\nfrom util import RequestBody\nfrom schemas import TallySheetVersionCE201Schema, TallySheetVersionSchema\nfrom orm.entities.Submission import TallySheet\nfrom orm.entities.SubmissionVersion.TallySheetVersion import TallySheetVersionCE201\nfrom exception import NotFoundException\n\n\n@authorize(required_roles=[DATA_EDITOR_ROLE, EC_LEADERSHIP_ROLE])\ndef get_by_id(tallySheetId, tallySheetVersionId):\n result = TallySheetVersionCE201.get_by_id(\n tallySheetId=tallySheetId,\n tallySheetVersionId=tallySheetVersionId\n )\n\n return TallySheetVersionCE201Schema().dump(result).data\n\n\n@authorize(required_roles=[DATA_EDITOR_ROLE, EC_LEADERSHIP_ROLE])\ndef get_all(tallySheetId):\n tallySheet = TallySheet.get_by_id(tallySheetId=tallySheetId)\n if tallySheet is None:\n raise NotFoundException(\"Tally sheet not found. (tallySheetId=%d)\" % tallySheetId)\n\n result = TallySheetVersionCE201.get_all(\n tallySheetId=tallySheetId\n )\n\n return TallySheetVersionCE201Schema(many=True).dump(result).data\n\n\n@authorize(required_roles=[DATA_EDITOR_ROLE])\ndef create(tallySheetId, body):\n request_body = RequestBody(body)\n tallySheetVersion = TallySheetVersionCE201.create(\n tallySheetId=tallySheetId\n )\n\n tally_sheet_content = request_body.get(\"content\")\n if tally_sheet_content is not None:\n for party_count_body in tally_sheet_content:\n party_count_body = RequestBody(party_count_body)\n tallySheetVersionRow = tallySheetVersion.add_row(\n areaId=party_count_body.get(\"areaId\"),\n ballotsIssued=party_count_body.get(\"ballotsIssued\"),\n ballotsReceived=party_count_body.get(\"ballotsReceived\"),\n ballotsSpoilt=party_count_body.get(\"ballotsSpoilt\"),\n ballotsUnused=party_count_body.get(\"ballotsUnused\"),\n ordinaryBallotCountFromBoxCount=party_count_body.get(\"ordinaryBallotCountFromBoxCount\"),\n tenderedBallotCountFromBoxCount=party_count_body.get(\"tenderedBallotCountFromBoxCount\"),\n ordinaryBallotCountFromBallotPaperAccount=party_count_body.get(\n \"ordinaryBallotCountFromBallotPaperAccount\"),\n tenderedBallotCountFromBallotPaperAccount=party_count_body.get(\n \"tenderedBallotCountFromBallotPaperAccount\")\n )\n\n for issued_ballot_box_id in party_count_body.get(\"ballotBoxesIssued\"):\n tallySheetVersionRow.add_issued_ballot_box(issued_ballot_box_id)\n\n for received_ballot_box_id in party_count_body.get(\"ballotBoxesReceived\"):\n tallySheetVersionRow.add_received_ballot_box(received_ballot_box_id)\n\n db.session.commit()\n\n return TallySheetVersionSchema().dump(tallySheetVersion).data\n","sub_path":"api/TallySheetVersionApi/TallySheetVersionCE201Api.py","file_name":"TallySheetVersionCE201Api.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"62886251","text":"# --------------\n# Code starts here\n\n# Create the lists \nclass_1 = ['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio' ]\nclass_2 = ['Hilary Mason','Carla Gentry','Corinna Cortes']\n\n# Concatenate both the strings\nnew_class = (class_1 + class_2)\nprint (new_class)\n\n\n# Append the list\nnew_class.append('Peter Warden')\n\n# Print updated list\nprint(new_class)\n\n\n# Remove the element from the list\nnew_class.remove('Carla Gentry')\n\n# Print the list\nprint(new_class)\n\n# Code ends here\n\n\n# --------------\n# Code starts here0\ncourses={\"Math\":65 ,\"English\":70,\"History\":80,\"French\":70,\"Science\":60}\n#printing values\nfor x in courses.values():\n print(x)\n\ntotal=0\n#finding total\nfor x in courses.values():\n total=total+x\n \nprint(total) \n\n#finding percentage \n\npercentage=(total)/500*100\nprint(percentage)\n\n\n\n# Code ends here\n\n\n# --------------\n# Code starts here\nmathematics={\"Geoffrey Hinton\":78,\"Andrew Ng\":95,\"Sebastian Raschka\":65,\n\"Yoshua Benjio\":50,\"Hilary Mason\":70,\"Corinna Cortes\":66,\"Peter Warden\":75}\n\nfor x,y in mathematics.items():\n print(x,y)\n\ntopper=max(mathematics,key=mathematics.get)\nprint(topper)\n\n\n# Code ends here \n\n\n# --------------\n# Given string\ntopper = 'andrew ng'\n\n\n# Code starts here\n\n#split the name\nfirst_name,last_name=topper.split(\" \")\n\n#conactenate the name\nfull_name=last_name+\" \"+first_name\nprint(full_name) \n\n#convert to upper\ncertificate_name=full_name.upper()\nprint(certificate_name)\n\n# Code ends here\n\n\n","sub_path":"Student-Management-System/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"429828209","text":"def foo(weights):\n \"\"\"\n This function returns 0 if it is impossible to split items into two piles,\n such that weight of one pile is not larger than 2 weights of the other, and\n list of items' weight from the first pile otherwise.\n [100, 20, 20] -> 0\n [10, 10, 11] -> [10, 10]\n \"\"\"\n W = sum(weights)\n low = W/3\n high = 2*low\n res = []\n weight = 0\n for item in weights:\n if weight + item <= high:\n weight += item\n res.append(item)\n if weight >= low:\n return res\n else:\n return 0\n \n\nif __name__ == '__main__':\n input_data = list(map(int, input('Enter weights of all items separated with space: ').split()))\n print(foo(input_data))\n","sub_path":"t_2017_10.py","file_name":"t_2017_10.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"429180710","text":"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nimport os\nimport operator\nimport tempfile\nfrom unittest import TestCase\n\nimport pytest\nimport torch\nfrom pytorch_lightning import LightningModule\nfrom torch import nn\nfrom test.pytorch.utils._train_torch_lightning import create_data_loader, data_transform\nimport torchmetrics\n\nfrom bigdl.nano.pytorch import Trainer\nfrom bigdl.nano.pytorch import InferenceOptimizer\nfrom bigdl.nano.pytorch.vision.models import vision\nfrom bigdl.nano.utils.log4Error import invalidOperationError\nfrom bigdl.nano.utils.util import compare_version\n\nbatch_size = 256\nnum_workers = 0\ndata_dir = os.path.join(os.path.dirname(__file__), \"data\")\n\n\nclass ResNet18(nn.Module):\n def __init__(self, num_classes, pretrained=True, include_top=False, freeze=True):\n super().__init__()\n backbone = vision.resnet18(pretrained=pretrained, include_top=include_top, freeze=freeze)\n output_size = backbone.get_output_size()\n head = nn.Linear(output_size, num_classes)\n self.model = nn.Sequential(backbone, head)\n\n def forward(self, x):\n return self.model(x)\n\n\nclass LitResNet18(LightningModule):\n def __init__(self, num_classes, pretrained=True, include_top=False, freeze=True):\n super().__init__()\n backbone = vision.resnet18(pretrained=pretrained, include_top=include_top, freeze=freeze)\n output_size = backbone.get_output_size()\n head = nn.Linear(output_size, num_classes)\n self.classify = nn.Sequential(backbone, head)\n\n def forward(self, *args):\n return self.classify(args[0])\n\n\nclass ModelCannotCopy(ResNet18):\n def __deepcopy__(self, obj):\n invalidOperationError(False, \"This model cannot be deepcopy\")\n\n\nclass TestTrainer(TestCase):\n model = ResNet18(10, pretrained=False, include_top=False, freeze=True)\n loss = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n train_loader = create_data_loader(data_dir, batch_size, num_workers, data_transform)\n user_defined_pl_model = LitResNet18(10)\n\n def test_quantize_inc_ptq_compiled(self):\n # Test if a Lightning Module compiled by nano works\n train_loader_iter = iter(self.train_loader)\n trainer = Trainer(max_epochs=1)\n pl_model = Trainer.compile(self.model, self.loss, self.optimizer)\n x = next(train_loader_iter)[0]\n\n # Case 1: Default\n qmodel = InferenceOptimizer.quantize(pl_model,\n calib_data=self.train_loader)\n assert qmodel\n out = qmodel(x)\n assert out.shape == torch.Size([256, 10])\n\n # Case 2: Override by arguments\n qmodel = InferenceOptimizer.quantize(pl_model,\n calib_data=self.train_loader,\n metric=torchmetrics.F1Score('multiclass', num_classes=10),\n approach='static',\n tuning_strategy='basic',\n accuracy_criterion={'relative': 0.99,\n 'higher_is_better': True})\n\n assert qmodel\n out = qmodel(x)\n assert out.shape == torch.Size([256, 10])\n\n # Case 3: Dynamic quantization\n qmodel = InferenceOptimizer.quantize(pl_model, approach='dynamic')\n assert qmodel\n out = qmodel(x)\n assert out.shape == torch.Size([256, 10])\n\n # Case 4: Invalid approach\n invalid_approach = 'qat'\n with pytest.raises(RuntimeError, match=\"Approach should be 'static' or 'dynamic', \"\n \"{} is invalid.\".format(invalid_approach)):\n InferenceOptimizer.quantize(pl_model, approach=invalid_approach)\n\n # Case 5: Test if registered metric can be fetched successfully\n qmodel = InferenceOptimizer.quantize(pl_model,\n calib_data=self.train_loader,\n metric=torchmetrics.F1Score('multiclass', num_classes=10),\n accuracy_criterion={'relative': 0.99,\n 'higher_is_better': True})\n assert qmodel\n out = qmodel(x)\n assert out.shape == torch.Size([256, 10])\n\n trainer.validate(qmodel, self.train_loader)\n trainer.test(qmodel, self.train_loader)\n trainer.predict(qmodel, self.train_loader)\n\n # save and load\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n InferenceOptimizer.save(qmodel, tmp_dir_name)\n loaded_qmodel = InferenceOptimizer.load(tmp_dir_name, pl_model)\n assert loaded_qmodel\n out = loaded_qmodel(x)\n assert out.shape == torch.Size([256, 10])\n\n def test_quantize_inc_ptq_customized(self):\n # Test if a Lightning Module not compiled by nano works\n train_loader_iter = iter(self.train_loader)\n x = next(train_loader_iter)[0]\n trainer = Trainer(max_epochs=1)\n\n qmodel = InferenceOptimizer.quantize(self.user_defined_pl_model,\n calib_data=self.train_loader)\n assert qmodel\n out = qmodel(x)\n assert out.shape == torch.Size([256, 10])\n\n # save and load\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n InferenceOptimizer.save(qmodel, tmp_dir_name)\n loaded_qmodel = InferenceOptimizer.load(tmp_dir_name, self.user_defined_pl_model)\n assert loaded_qmodel\n out = loaded_qmodel(x)\n assert out.shape == torch.Size([256, 10])\n\n def test_quantize_inc_ptq_with_tensor(self):\n train_loader_iter = iter(self.train_loader)\n trainer = Trainer(max_epochs=1)\n pl_model = Trainer.compile(self.model, self.loss, self.optimizer)\n x = next(train_loader_iter)[0]\n\n # Case 1: quantize with single tensor\n qmodel = InferenceOptimizer.quantize(pl_model,\n calib_data=x)\n assert qmodel\n out = qmodel(x)\n assert out.shape == torch.Size([256, 10])\n \n # Case 2: quantize with tensor tuple\n qmodel = InferenceOptimizer.quantize(pl_model,\n # fake a label\n calib_data=(x, torch.ones(1)))\n assert qmodel\n out = qmodel(x)\n assert out.shape == torch.Size([256, 10])\n\n def test_quantize_inc_ptq_compiled_context_manager(self):\n # Test if a Lightning Module compiled by nano works\n train_loader_iter = iter(self.train_loader)\n trainer = Trainer(max_epochs=1)\n pl_model = Trainer.compile(self.model, self.loss, self.optimizer)\n x = next(train_loader_iter)[0]\n\n qmodel = InferenceOptimizer.quantize(pl_model,\n calib_data=self.train_loader,\n thread_num=2)\n assert qmodel\n\n with InferenceOptimizer.get_context(qmodel):\n assert torch.get_num_threads() == 2\n out = qmodel(x)\n assert out.shape == torch.Size([256, 10])\n \n with tempfile.TemporaryDirectory() as tmp_dir_name:\n InferenceOptimizer.save(qmodel, tmp_dir_name)\n model = InferenceOptimizer.load(tmp_dir_name, pl_model)\n\n with InferenceOptimizer.get_context(model):\n assert torch.get_num_threads() == 2\n out = model(x)\n assert out.shape == torch.Size([256, 10])\n\n def test_quantize_inc_ptq_compiled_additional_attributes(self):\n # Test if a Lightning Module compiled by nano works\n train_loader_iter = iter(self.train_loader)\n pl_model = Trainer.compile(self.model, self.loss, self.optimizer)\n # patch a attribute\n pl_model.channels = 3\n def hello():\n print(\"hello world!\")\n # patch a function\n pl_model.hello = hello\n x = next(train_loader_iter)[0]\n\n qmodel = InferenceOptimizer.quantize(pl_model,\n calib_data=self.train_loader,\n thread_num=2)\n assert qmodel\n assert qmodel.channels == 3\n qmodel.hello()\n\n with InferenceOptimizer.get_context(qmodel):\n assert torch.get_num_threads() == 2\n out = qmodel(x)\n assert out.shape == torch.Size([256, 10])\n\n # This UT will fail with INC < 2.0\n @pytest.mark.skipif(compare_version(\"neural_compressor\", operator.lt, \"2.0\"), reason=\"\")\n def test_ipex_int8_quantize_with_model_cannot_deepcopy(self):\n model = ModelCannotCopy(num_classes=10)\n InferenceOptimizer.quantize(model,\n calib_data=self.train_loader,\n method=\"ipex\",\n # inplace=False is setting to disable back up ipex quantization\n inplace=False)\n\n # INC 1.14 and 2.0 doesn't supprot quantizing pytorch-lightning module,\n # but we have some workaround for pl models returned by our `Trainer.compile`\n def test_quantize_with_pl_model(self):\n trainer = Trainer(max_epochs=1)\n pl_model = Trainer.compile(self.model, self.loss, self.optimizer)\n trainer.fit(pl_model, self.train_loader)\n InferenceOptimizer.quantize(pl_model,\n calib_data=self.train_loader)\n","sub_path":"python/nano/test/inc/pytorch/test_quantize.py","file_name":"test_quantize.py","file_ext":"py","file_size_in_byte":10202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"157903690","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\n\"\"\"test_kill_stmgr.py\"\"\"\nimport logging\n\nfrom contextlib import contextmanager\nfrom subprocess import run, Popen\nfrom time import sleep\n\nfrom . import test_template\n\n\n@contextmanager\ndef bound_subprocess(popen: Popen) -> None:\n with popen:\n logging.debug(f\"starting {popen!r}\")\n try:\n yield\n finally:\n logging.debug(f\"killing {popen!r}\")\n popen.kill()\n popen.communicate()\n\ndef run_explorer(*args):\n cmd = [\"heron-explorer\", *args]\n logging.debug(f\"running command {cmd!r}\")\n run(cmd, check=True, timeout=5)\n logging.debug(f\"finished command {cmd!r}\")\n\n\nclass TestExplorer(test_template.TestTemplate):\n\n\n def execute_test_case(self):\n from getpass import getuser\n cre = f\"{self.params['cluster']}/{getuser()}/default\"\n topology = self.params[\"topologyName\"]\n # heron-explorer depens on heron-tracker, so start an instance as it is not started\n # by heron-cli when running against the local \"cluster\"\n with bound_subprocess(Popen([\"heron-tracker\"])):\n sleep(2)\n run_explorer(\"clusters\")\n\n cre_parts = cre.split(\"/\")\n for i in range(len(cre_parts)):\n run_explorer(\"topologies\", \"/\".join(cre_parts[:i+1]))\n\n run_explorer(\"logical-plan\", cre, topology)\n run_explorer(\"logical-plan\", cre, topology, \"--component-type=bolts\")\n run_explorer(\"logical-plan\", cre, topology, \"--component-type=spouts\")\n\n run_explorer(\"physical-plan\", \"containers\", cre, topology)\n run_explorer(\"physical-plan\", \"containers\", cre, topology, \"--id=1\")\n run_explorer(\"physical-plan\", \"metrics\", cre, topology)\n","sub_path":"integration_test/src/python/local_test_runner/test_explorer.py","file_name":"test_explorer.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"333254322","text":"\n# -*- coding: utf-8 -*-\n\nimport json\nimport requests\nimport scrapy\nfrom lianjiascrapy.items import FangXiaoQuDetItem\n\n\nclass LianJiaXiaoQuDet(scrapy.Spider):\n name = 'fangxiaoqudet'\n allowed_domains = ['fang.com']\n\n def start_requests(self):\n start_urls = []\n with open('xichengfang.json', encoding=\"utf-8\") as f:\n for item in f.readlines():\n item = json.loads(item)\n req = scrapy.Request(item['url'])\n start_urls.append(req)\n return start_urls\n\n def parse(self, response):\n item = FangXiaoQuDetItem()\n # name = response.xpath('//div[@class=\"con clearfix\"]/div/div[@class=\"box\"][1]').extract()\n # if name:\n # item['name'] = name[0].strip()\n # else:\n # item['name'] = 'None'\n\n # for item in response.xpath('//div[@class=\"con clearfix\"]//div[@class=\"box\"][2]')\n name = response.xpath('//div[@class=\"ceninfo_sq\"]/h1/a/text()').extract()\n if name:\n item['name'] = name[0].strip()\n else:\n item['name'] = '暂无信息'\n gongshui = response.xpath('//div[@class=\"con clearfix\"]//div[@class=\"box\"][2]//dl[@class=\"clearfix mr30\"]//dd[1]/span/text()').extract()\n if gongshui:\n item['gongshui'] = gongshui[0].strip()\n else:\n item['gongshui'] = '暂无信息'\n sec_manage = response.xpath('//div[@class=\"con clearfix\"]//div[@class=\"box\"][2]//dl[@class=\"clearfix mr30\"]//dt[2]/text()').extract()\n if sec_manage:\n item['sec_manage'] = sec_manage[0].strip()\n else:\n item['sec_manage'] = '暂无信息'\n tingchewei = response.xpath('//div[@class=\"con clearfix\"]//div[@class=\"box\"][2]//dl[@class=\"clearfix mr30\"]//dt[3]/text()').extract()\n if tingchewei:\n item['tingchewei'] = tingchewei[0].strip()\n else:\n item['tingchewei'] = '暂无信息'\n ranqi = response.xpath('//div[@class=\"con clearfix\"]//div[@class=\"box\"][2]//dl[@class=\"clearfix mr30\"]//dd[4]/span/text()').extract()\n if ranqi:\n item['ranqi'] = ranqi[0].strip()\n else:\n item['ranqi'] = '暂无信息'\n volume = response.xpath('/html/body/div[3]/div[4]/div[1]/div[1]/div[2]/dl/dd[17]/text()').extract()\n if volume:\n item['volume'] = volume[0].strip()\n else:\n item['volume'] = '暂无信息'\n struct = response.xpath('/html/body/div[3]/div[4]/div[1]/div[1]/div[2]/dl/dd[10]/text()').extract()\n if struct:\n item['struct'] = struct[0].strip()\n else:\n item['struct'] = '暂无信息'\n\n\n # zone = response.xpath('//ul[@class=\"build_info\"]/li[3]/span[2]/text()').extract()\n # if zone:\n # item['zone'] = zone[0].replace(' ', '').replace('\\r\\n', '')\n # else:\n # item['zone'] = unicode('暂无信息', 'utf-8')\n # price = response.xpath('//div[@class=\"village_name\"]/dl/dd/p[2]/span[2]/span/text()').extract()\n # if price:\n # item['price'] = price[0].strip()\n # else:\n # item['price'] = unicode('暂无信息', 'utf-8')\n # address = response.xpath('//ul[@class=\"build_info\"]/li[21]/span[2]/text()').extract()\n # if address:\n # item['address'] = address[0].strip('[').strip(']').strip()\n # else:\n # item['address'] = unicode('暂无信息', 'utf-8')\n # huanxian = response.xpath('//ul[@class=\"build_info\"]/li[5]/span[2]/text()').extract()\n # if huanxian:\n # item['huanxian'] = huanxian[0].strip()\n # else:\n # item['huanxian'] = unicode('暂无信息', 'utf-8')\n # wuyetype = response.xpath('//ul[@class=\"build_info\"]/li[1]/span[2]/text()').extract()\n # if wuyetype:\n # item['wuyetype'] = wuyetype[0].strip()\n # else:\n # item['wuyetype'] = unicode('暂无信息', 'utf-8') \n # product = response.xpath('//ul[@class=\"build_info\"]/li[8]/span[2]/text()').extract()\n # if product:\n # item['product'] = product[0].strip()\n # else:\n # item['product'] = unicode('暂无消息', 'utf-8')\n # landarea = response.xpath('//ul[@class=\"build_info\"]/li[11]/span[2]/text()').extract()\n # if landarea:\n # item['landarea'] = landarea[0].strip()\n # else:\n # item['landarea'] = unicode('暂无信息', 'utf-8') \n # wuyegongsi = response.xpath('//ul[@class=\"build_info\"]/li[20]/span[2]/text()').extract()\n # if wuyegongsi:\n # item['wuyegongsi'] = wuyegongsi[0].strip()\n # else:\n # item['wuyegongsi'] = unicode('暂无信息', 'utf-8')\n # fee = response.xpath('//ul[@class=\"build_info\"]/li[15]/span[2]/text()').extract()\n # if fee:\n # item['fee'] = fee[0].strip()\n # else:\n # item['fee'] = unicode('暂无信息', 'utf-8')\n # code = response.xpath('//ul[@class=\"build_info\"]/li[4]/span[2]/text()').extract()\n # if code:\n # item['code'] = code[0].strip()\n # else:\n # item['code'] = unicode('暂无信息', 'utf-8')\n # propertydict = response.xpath('//ul[@class=\"build_info\"]/li[6]/span[2]/text()').extract()\n # if propertydict:\n # item['propertydict'] = propertydict[0].strip()\n # else:\n # item['propertydict'] = unicode('暂无信息', 'utf-8')\n # shape = response.xpath('//ul[@class=\"build_info\"]/li[9]/span[2]/text()').extract()\n # if shape:\n # item['shape'] = shape[0].strip()\n # else:\n # item['shape'] = unicode('暂无信息', 'utf-8')\n # buildarea = response.xpath('//ul[@class=\"build_info\"]/li[10]/span[2]/text()').extract()\n # if buildarea:\n # item['buildarea'] = buildarea[0].strip()\n # else:\n # item['buildarea'] = unicode('暂无信息', 'utf-8')\n # rooms = response.xpath('//ul[@class=\"build_info\"]/li[12]/span[2]/text()').extract()\n # if rooms:\n # item['rooms'] = rooms[0].strip()\n # else:\n # item['rooms'] = unicode('暂无信息', 'utf-8')\n # green = response.xpath('//ul[@class=\"build_info\"]/li[13]/span[2]/text()').extract()\n # if green:\n # item['green'] = green[0].strip()\n # else:\n # item['green'] = unicode('暂无信息', 'utf-8')\n # if address:\n # location_str = 'https://api.map.baidu.com/geocoder/v2/?address=' + item['address'].split(' ')[0] + '&output=json&ak=39GuXLCBZK4Tk7wxdtUZ5NqPbOK1iRRG'\n # location = requests.get(location_str).json()\n # if location['status'] == 0:\n # lng = location['result']['location']['lng']\n # if lng:\n # item['lng'] = lng\n # else:\n # item['lng'] = unicode('暂无信息', 'utf-8')\n # lat = location['result']['location']['lat']\n # if lat:\n # item['lat'] = lat\n # else:\n # item['lat'] = unicode('暂无信息', 'utf-8')\n # else:\n # item['lng'] = unicode('暂无信息', 'utf-8')\n # item['lat'] = unicode('暂无信息', 'utf-8')\n # url = response.url\n # if url:\n # item['url'] = url.strip()\n # else:\n # item['url'] = unicode('暂无信息', 'utf-8')\n yield item","sub_path":"lianjiascrapy/spiders/box/fangxiaoqudet.py","file_name":"fangxiaoqudet.py","file_ext":"py","file_size_in_byte":7505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"358351707","text":"\"\"\"\nAutomated testing of non-Python code snippets in the docs\n\"\"\"\nimport re\nimport unittest\nfrom pathlib import Path\n\nimport pytest\nimport requests\nfrom dash.testing.application_runners import JuliaRunner, RRunner\n\nfrom .helpers import clean_path, drop_keys, py_source_to_app, rename_variable\nfrom .wrappers import JL_WRAPPER, PY_WRAPPER, R_WRAPPER\n\nHERE = Path(__file__).parent\n\nPATTERN = re.compile(r\"{{example:(.*)}}\")\nPARAMS = [\n (\n path,\n [match.split(\":\") for match in PATTERN.findall(path.read_text())],\n )\n for path in HERE.parent.glob(\"*.md\")\n]\n\nSKIP = [\n \"components/table/kwargs.py\",\n \"components/table/color.py\",\n \"components/tabs/active_tab.py\",\n]\nENVS = {\n \"modal.md\": {\n \"LOREM\": (HERE.parent / \"modal\" / \"lorem.txt\").read_text().strip()\n },\n}\n\nR_PORT = 8051\nJL_PORT = 8053\n\n\n@pytest.fixture\ndef dashr_server():\n with RRunner() as starter:\n starter.port = R_PORT\n yield starter\n\n\n@pytest.fixture\ndef dashjl_server():\n with JuliaRunner() as starter:\n starter.port = JL_PORT\n yield starter\n\n\n@pytest.mark.parametrize(\"config\", PARAMS)\ndef test_r_snippets(dash_thread_server, dashr_server, config):\n md_path, data = config\n env = ENVS.get(md_path.name)\n\n python_r_compare = []\n\n # Concatenate all the snippets in the markdown file together\n for i, (snippet_path, name) in enumerate(data):\n if snippet_path in SKIP:\n continue\n\n snippet_path = HERE.parent / clean_path(snippet_path)\n py_snippet = rename_variable(snippet_path, i, name)\n\n r_snippet_path = snippet_path.parent / f\"{snippet_path.stem}.R\"\n\n if r_snippet_path.exists():\n r_snippet = rename_variable(\n r_snippet_path, i, name, assign_op=\"<-\"\n )\n python_r_compare.append((py_snippet, r_snippet, f\"{name}__{i}\"))\n\n if python_r_compare:\n assert_layouts_equal(\n python_r_compare,\n dashr_server,\n R_WRAPPER,\n R_PORT,\n dash_thread_server,\n env,\n 8050,\n )\n\n\n@pytest.mark.parametrize(\"config\", PARAMS)\ndef test_jl_snippets(dash_thread_server, dashjl_server, config):\n md_path, data = config\n env = ENVS.get(md_path.name)\n\n python_jl_compare = []\n\n # Concatenate all the snippets in the markdown file together\n for i, (snippet_path, name) in enumerate(data):\n if snippet_path in SKIP:\n continue\n\n snippet_path = HERE.parent / clean_path(snippet_path)\n py_snippet = rename_variable(snippet_path, i, name)\n\n jl_snippet_path = snippet_path.parent / f\"{snippet_path.stem}.jl\"\n\n if jl_snippet_path.exists():\n jl_snippet = rename_variable(jl_snippet_path, i, name)\n python_jl_compare.append((py_snippet, jl_snippet, f\"{name}__{i}\"))\n\n if python_jl_compare:\n assert_layouts_equal(\n python_jl_compare,\n dashjl_server,\n JL_WRAPPER,\n JL_PORT,\n dash_thread_server,\n env,\n 8052,\n )\n\n\ndef test_landing_page_example(dash_thread_server, dashr_server, dashjl_server):\n index_dir = HERE.parent / \"index\"\n py_source = (index_dir / \"simple.py\").read_text()\n r_source = (\n (index_dir / \"simple.R\").read_text().replace(\"8050\", str(R_PORT))\n )\n jl_source = (\n (index_dir / \"simple.jl\").read_text().replace(\"8050\", str(JL_PORT))\n )\n\n app = py_source_to_app(py_source, {})\n dash_thread_server.start(app, port=8050)\n py_layout = requests.get(f\"{dash_thread_server.url}/_dash-layout\").json()\n\n dashr_server.start(r_source)\n r_layout = requests.get(f\"{dashr_server.url}/_dash-layout\").json()\n\n dashjl_server.start(jl_source)\n jl_layout = requests.get(f\"{dashjl_server.url}/_dash-layout\").json()\n\n # Test layouts match\n unittest.TestCase().assertDictEqual(\n drop_keys(py_layout), drop_keys(r_layout)\n )\n unittest.TestCase().assertDictEqual(\n drop_keys(py_layout), drop_keys(jl_layout)\n )\n\n\ndef assert_layouts_equal(\n compare, runner, wrapper, port, py_runner, py_env, py_port\n):\n # Get python snippet layout\n app = py_source_to_app(\n PY_WRAPPER.format(\n snippet=\"\\n\".join(x[0] for x in compare),\n components=\", \".join(x[2] for x in compare),\n ),\n env=py_env,\n )\n py_runner.start(app, port=py_port)\n py_layout = requests.get(f\"{py_runner.url}/_dash-layout\").json()\n\n runner.start(\n wrapper.format(\n snippet=\"\\n\".join(x[1] for x in compare),\n components=\", \".join(x[2] for x in compare),\n port=port,\n )\n )\n layout = requests.get(f\"{runner.url}/_dash-layout\").json()\n\n # Test layouts match\n unittest.TestCase().assertDictEqual(\n drop_keys(py_layout), drop_keys(layout)\n )\n","sub_path":"docs/components_page/components/__tests__/test_snippets.py","file_name":"test_snippets.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"400022273","text":"import asyncio\nimport itertools\nimport logging\nimport random\nfrom urllib.parse import quote, urljoin\n\n\nimport aiohttp\nfrom async_lru import alru_cache\nfrom decouple import config\n\nTRANSIFEX_TOKEN = config(\"TRANSIFEX_TOKEN\")\n\nTRANSIFEX_API = {\n \"python\": \"https://www.transifex.com/api/2/project/python-newest/\",\n \"jupyter\": \"https://www.transifex.com/api/2/project/jupyter-meta-documentation/\",\n}\n\nPROJECT_URL = {\n \"python\": (\n \"https://www.transifex.com/\"\n \"python-doc/python-newest/translate/#{language}/{resource}/1\"\n \"?q={query_string}\"\n ),\n \"jupyter\": (\n \"https://www.transifex.com/\"\n \"project-jupyter/jupyter-meta-documentation/translate/#{language}/{resource}/1\"\n \"?q={query_string}\"\n ),\n}\n\nFILTER_RESOURCES_TO_BE_TRANSLATED = {\n \"python\": lambda r: r.split(\"--\")[0] in [\"bugs\", \"howto\", \"library\"],\n \"jupyter\": None,\n}\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger()\n\n\nasync def transifex_api(url, project, data=None, retrying=False):\n if retrying:\n logger.info(\"retrying url=%s\", url)\n\n auth = aiohttp.BasicAuth(login=\"api\", password=TRANSIFEX_TOKEN)\n async with aiohttp.ClientSession(auth=auth) as session:\n http_method = session.put if data else session.get\n args = {\"json\": data} if data else {}\n\n try:\n async with http_method(\n urljoin(TRANSIFEX_API[project], url), **args\n ) as response:\n logger.info(\"url=%s, status_code=%s\", url, response.status)\n return await response.json()\n\n except aiohttp.client_exceptions.ClientConnectorSSLError as e:\n logger.error(\"url=%s, error=%s\", url, e)\n if not retrying:\n await asyncio.sleep(2)\n return await transifex_api(url, project, retrying=True)\n raise\n\n\nasync def random_resource(project):\n resources = await transifex_api(f\"resources/\", project)\n resources = [resource[\"slug\"] for resource in resources]\n\n if FILTER_RESOURCES_TO_BE_TRANSLATED[project]:\n resources = filter(FILTER_RESOURCES_TO_BE_TRANSLATED[project], resources)\n\n resource = random.choice(list(resources))\n logger.info(\"random_resource, resource=%s\", resource)\n return resource\n\n\nasync def strings_from_resource(resource, language, project):\n strings = await transifex_api(\n f\"resource/{resource}/translation/{language}/strings/?details\", project,\n )\n logger.info(\n \"getting strings from resource, resource=%s, strings_found=%s\",\n resource,\n len(strings),\n )\n for string in strings:\n string[\"resource\"] = resource\n\n return strings\n\n\nasync def random_string(\n language, project, resource=None, translated=None, reviewed=None, max_size=None\n):\n if not resource:\n resource = await random_resource(project)\n\n strings = await strings_from_resource(resource, language, project)\n\n if translated is not None:\n strings = filter(lambda s: bool(s[\"translation\"]) == translated, strings)\n\n if reviewed is not None:\n strings = filter(lambda s: s[\"reviewed\"] == reviewed, strings)\n\n if max_size is not None:\n strings = filter(lambda s: len(s[\"source_string\"]) <= max_size, strings)\n\n strings = list(strings)\n if not strings:\n if max_size:\n max_size += 300\n\n resource = None\n return await random_string(\n language, project, resource, translated, reviewed, max_size\n )\n\n return resource, random.choice(list(strings))\n\n\ndef transifex_string_url(resource, key, language, project):\n return PROJECT_URL[project].format(\n resource=resource, language=language, query_string=quote(f\"text:'{key[:20]}'\"),\n )\n\n\nasync def translate_string(resource, string_hash, translation):\n await transifex_api(\n f\"resource/{resource}/translation/pt_BR/string/{string_hash}/\",\n data={\"translation\": translation},\n )\n\n\nasync def download_all_strings(language):\n \"\"\" Download all strings in Transifex to JSON file\n \"\"\"\n resources = await transifex_api(f\"resources/\", \"python\")\n resources = [resource[\"slug\"] for resource in resources]\n print(\"Resources\", len(resources))\n\n sema = asyncio.Semaphore(10)\n async with sema:\n strings = await asyncio.gather(\n *[strings_from_resource(resource, language) for resource in resources]\n )\n strings = list(itertools.chain.from_iterable(strings))\n print(\"Strings\", len(resources))\n return strings\n","sub_path":"i17obot/transifex.py","file_name":"transifex.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"342563758","text":"\"\"\"\nQFAST Soft Pauli Model Module\n\nThis models a circuit as a sequence of fixed gates potentially led by\na generic gate. Generic gates multiplex gate placement, or location.\n\"\"\"\n\n\nimport logging\n\nimport numpy as np\nimport functools as ft\n\nfrom qfast import utils\nfrom qfast import gate\nfrom qfast.decomposition.circuitmodel import CircuitModel\nfrom qfast.decomposition.models.softpauli.genericgate import GenericGate\nfrom qfast.decomposition.models.softpauli.fixedgate import FixedGate\n\n\nlogger = logging.getLogger( \"qfast\" )\n\n\nclass SoftPauliModel ( CircuitModel ):\n\n def __init__ ( self, utry, gate_size, locations, optimizer,\n success_threshold = 1e-3, partial_solution_callback = None,\n progress_threshold = 5e-3 ):\n \"\"\"\n Soft Pauli Model Constructor\n\n Args:\n utry (np.ndarray): The unitary to model.\n\n gate_size (int): The size of the model's gate.\n\n locations (list[tuple[int]): The valid locations for gates.\n\n optimizer (Optimizer): The optimizer available for use.\n\n success_threshold (float): The distance criteria for success.\n\n partial_solution_callback (None or callable): callback for\n partial solutions. If not None, then callable that takes\n a list[gate.Gate] and returns nothing.\n\n progress_threshold (float): The distance increase criteria\n for successful expansion.\n \"\"\"\n\n super().__init__( utry, gate_size, locations, optimizer,\n success_threshold, partial_solution_callback )\n\n self.progress_threshold = progress_threshold\n\n self.head = GenericGate( self.num_qubits, self.gate_size,\n self.locations )\n self.append_gate( self.head )\n self.last_dist = 1\n\n def progress ( self ):\n \"\"\"If the model has made progress.\"\"\"\n return self.last_dist - self.distance() > self.progress_threshold\n\n def expand ( self, location ):\n \"\"\"Expand the model by adding gates.\"\"\"\n logger.info( \"Expanding by adding a gate at location %s\"\n % str( location ) )\n\n new_gate = FixedGate( self.num_qubits, self.gate_size, location )\n self.insert_gate( -1, new_gate )\n self.head.lift_restrictions()\n self.head.restrict( location )\n\n def finalize ( self ):\n \"\"\"Finalize the circuit by replacing the head if necessary.\"\"\"\n location = self.head.get_location( self.get_input_slice( -1 ) )\n fun_vals = self.head.get_function_values( self.get_input_slice( -1 ),\n True )\n self.pop_gate()\n\n new_gate = FixedGate( self.num_qubits, self.gate_size, location )\n self.append_gate( new_gate, fun_vals )\n self.optimize( fine = True )\n\n return self.get_gate_list()\n\n def solve ( self ):\n \"\"\"Solve the model for the target unitary.\"\"\"\n failed_locs = []\n\n while True:\n\n self.reset_input()\n\n self.optimize()\n\n logger.info( \"Finished optimizing depth %d at %e distance.\"\n % ( self.depth(), self.distance() ) )\n\n if self.success():\n logger.info( \"Exploration finished: success\" )\n return self.finalize()\n\n location = self.head.get_location( self.get_input_slice( -1 ) )\n\n if self.progress():\n logger.info( \"Progress has been made, depth increasing.\" )\n self.last_dist = self.distance()\n self.expand( location )\n\n elif self.head.cannot_restrict():\n logger.info( \"Progress has not been made.\" )\n logger.info( \"Cannot restrict further, depth increasing.\" )\n\n failed_locs.append( ( location, self.distance() ) )\n\n if len( failed_locs ) > 0:\n failed_locs.sort( key = lambda x : x[1] )\n location, self.last_dist = failed_locs[0]\n else:\n self.last_dist = self.distance()\n\n self.expand( location )\n failed_locs = []\n\n else:\n logger.info( \"Progress has not been made, restricting model.\" )\n failed_locs.append( ( location, self.distance() ) )\n self.head.restrict( location )\n\n return self.finalize()\n\n","sub_path":"qfast/decomposition/models/softpauli/softpaulimodel.py","file_name":"softpaulimodel.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"425579063","text":"#Построить минимальный остов связного неориентированного взвешенного графа. \r\n#Метод решения: алгоритм Борувки-Краскла. \r\n#Файл входных данных: Граф, заданный массивом смежности.\r\n\r\nimport random \r\nA = []\r\nf = open(r'in3.txt','r') \r\nfor line in f: \r\n row = [int(i) for i in line.split()] \r\n A.append(row) \r\nlength = A[0][0]\r\nresult = open('out3.txt', 'w')\r\nT=[]\r\nmT=0\r\nF=[0]*(length)\r\nfor i in range(1,length+1): \r\n F[i-1]=i\r\nnear=[0]*(length+1)\r\nd=[0]*(length+1)\r\nw=random.choice(F)\r\nF.remove(w)\r\nfor v in F:\r\n near[v]=w\r\n d[v]=A[v][w-1]\r\nwhile (len(T)A[u][v-1]:\r\n near[u]=v\r\n d[u]=A[u][v-1]\r\nfor a in range(1,length+1):\r\n R=[]\r\n for i in range(0, len(T)):\r\n if a==T[i][0]:\r\n R.append(T[i][1]) \r\n if a==T[i][1]:\r\n R.append(T[i][0])\r\n R.sort()\r\n R.append(0)\r\n res=\" \".join(map(str,R))\r\n result.write(res+\"\\n\")\r\nresult.write(str(mT))\r\n","sub_path":"до 4.py","file_name":"до 4.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"45111376","text":"# https://practice.geeksforgeeks.org/problems/rotate-array-by-n-elements/0\n\ndef rotateArray(n, array):\n limit = n % len(array)\n return array[limit:] + array[:limit]\n\nif __name__ == '__main__':\n T = int(input())\n inputs = [( tuple(map(int, input().split())), list(map(int, input().split()))) for _ in range(T)]\n outputs = [rotateArray(x[0][1], x[1]) for x in inputs]\n v = [' '.join('{0}'.format(y) for y in x) for x in outputs]\n print ('\\n'.join(v))\n","sub_path":"geeksforgeeks.org/rotate_array.py","file_name":"rotate_array.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"544412630","text":"import json\nfrom datetime import timezone\n\nimport responses\n\nfrom descarteslabs.compute import Job, JobStatus\n\nfrom .base import BaseTestCase\n\n\nclass TestCreateJob(BaseTestCase):\n @responses.activate\n def test_create(self):\n params = dict(function_id=\"some-fn\", args=[1, 2], kwargs={\"key\": \"blah\"})\n self.mock_job_create(params)\n\n job = Job(**params)\n assert job.state == \"new\"\n job.save()\n assert job.state == \"saved\"\n assert job.id\n assert job.creation_date == self.now.replace(tzinfo=timezone.utc)\n self.assertDictContainsSubset(params, job.to_dict())\n\n @responses.activate\n def test_create_with_tags(self):\n params = dict(\n function_id=\"some-fn\",\n args=[1, 2],\n kwargs={\"key\": \"blah\"},\n tags=[\"tag1\", \"tag2\"],\n )\n self.mock_job_create(params)\n\n job = Job(**params)\n assert job.state == \"new\"\n job.save()\n assert job.state == \"saved\"\n assert job.id\n assert job.creation_date == self.now.replace(tzinfo=timezone.utc)\n self.assertDictContainsSubset(params, job.to_dict())\n\n\nclass TestListJobs(BaseTestCase):\n @responses.activate\n def test_list_jobs(self):\n self.mock_response(\n responses.GET,\n \"/jobs\",\n json=self.make_page(\n [self.make_job(), self.make_job()],\n page_cursor=\"page2\",\n ),\n )\n self.mock_response(\n responses.GET,\n \"/jobs\",\n json=self.make_page([self.make_job()]),\n )\n jobs = list(Job.list())\n\n for job in jobs:\n assert isinstance(job, Job)\n assert job.state == \"saved\"\n assert job.to_dict()\n\n assert len(jobs) == 3\n self.assert_url_called(\"/jobs?page_size=100\", 1)\n self.assert_url_called(\"/jobs?page_cursor=page2\", 1)\n\n @responses.activate\n def test_list_jobs_compatible(self):\n self.mock_response(\n responses.GET,\n \"/jobs\",\n json=self.make_page([self.make_job()]),\n )\n list(Job.list(status=JobStatus.PENDING, function_id=\"some-fn\"))\n self.assert_url_called(\n \"/jobs\",\n params={\n \"page_size\": 100,\n \"status\": \"pending\",\n \"function_id\": \"some-fn\",\n },\n )\n\n list(Job.list(status=[JobStatus.PENDING, JobStatus.RUNNING]))\n self.assert_url_called(\n \"/jobs\",\n params={\n \"page_size\": 100,\n \"status\": [\"pending\", \"running\"],\n },\n )\n\n\nclass TestJob(BaseTestCase):\n @responses.activate\n def test_get(self):\n self.mock_response(\n responses.GET,\n \"/jobs/some-id\",\n json=self.make_job(\n id=\"some-id\",\n function_id=\"function-id\",\n args=[1, 2],\n kwargs={\"first\": \"blah\", \"second\": \"blah\"},\n ),\n )\n job = Job.get(\"some-id\")\n assert job.state == \"saved\"\n assert job.to_dict() == {\n \"args\": [1, 2],\n \"creation_date\": self.now.replace(tzinfo=timezone.utc).isoformat(),\n \"error_reason\": None,\n \"execution_count\": None,\n \"exit_code\": None,\n \"function_id\": \"function-id\",\n \"id\": \"some-id\",\n \"kwargs\": {\"first\": \"blah\", \"second\": \"blah\"},\n \"runtime\": None,\n \"status\": JobStatus.PENDING,\n \"tags\": [],\n \"provisioning_time\": None,\n \"pull_time\": None,\n }\n\n @responses.activate\n def test_delete(self):\n self.mock_response(responses.DELETE, \"/jobs/some-id\")\n job = Job(id=\"some-id\", function_id=\"some-fn\", saved=True)\n job.delete()\n self.assert_url_called(\"/jobs/some-id\")\n assert job._deleted is True\n assert job.state == \"deleted\"\n\n @responses.activate\n def test_delete_new(self):\n job = Job(id=\"some-id\", function_id=\"some-fn\", saved=True)\n job._saved = False\n\n with self.assertRaises(ValueError) as ctx:\n job.delete()\n assert \"has not been saved\" in str(ctx.exception)\n assert job._deleted is False\n assert job.state == \"new\"\n\n @responses.activate\n def test_delete_failed(self):\n self.mock_response(responses.DELETE, \"/jobs/some-id\", status=400)\n job = Job(id=\"some-id\", function_id=\"some-fn\", saved=True)\n\n with self.assertRaises(Exception):\n job.delete()\n\n self.assert_url_called(\"/jobs/some-id\")\n assert job._deleted is False\n assert job.state == \"saved\"\n\n @responses.activate\n def test_result_empty(self):\n self.mock_response(responses.GET, \"/jobs/some-id/result\", body=None)\n job = Job(id=\"some-id\", function_id=\"some-fn\", saved=True)\n assert job.result() is None\n\n @responses.activate\n def test_result_json(self):\n body = json.dumps({\"test\": \"blah\"}).encode()\n self.mock_response(responses.GET, \"/jobs/some-id/result\", body=body)\n job = Job(id=\"some-id\", function_id=\"some-fn\", saved=True)\n assert job.result() == {\"test\": \"blah\"}\n\n @responses.activate\n def test_result_float(self):\n body = json.dumps(15.68).encode()\n self.mock_response(responses.GET, \"/jobs/some-id/result\", body=body)\n job = Job(id=\"some-id\", function_id=\"some-fn\", saved=True)\n assert job.result() == 15.68\n\n @responses.activate\n def test_result_cast(self):\n class CustomString:\n @classmethod\n def deserialize(cls, data: bytes):\n return \"custom\"\n\n self.mock_response(responses.GET, \"/jobs/some-id/result\", body=\"blah\")\n job = Job(id=\"some-id\", function_id=\"some-fn\", saved=True)\n assert job.result(CustomString) == \"custom\"\n\n with self.assertRaises(ValueError) as ctx:\n job.result(bool)\n assert \"must implement Serializable\" in str(ctx.exception)\n\n @responses.activate\n def test_log(self):\n log_lines = [\"test\", \"log\"]\n log = \"\\n\".join(\n [\n json.dumps({\"date\": self.now.isoformat() + \"Z\", \"log\": log})\n for log in log_lines\n ]\n )\n log_bytes = (log + \"\\n\").encode()\n self.mock_response(responses.GET, \"/jobs/some-id/log\", body=log_bytes)\n\n job = Job(id=\"some-id\", function_id=\"some-fn\", saved=True)\n job.log()\n\n @responses.activate\n def test_wait_for_complete(self):\n self.mock_response(\n responses.GET,\n \"/jobs/some-id\",\n json=self.make_job(\n id=\"some-id\",\n function_id=\"function-id\",\n args=[1, 2],\n kwargs={},\n ),\n )\n self.mock_response(\n responses.GET,\n \"/jobs/some-id\",\n json=self.make_job(\n id=\"some-id\",\n function_id=\"function-id\",\n args=[1, 2],\n kwargs={},\n status=JobStatus.SUCCESS,\n ),\n )\n job = Job(id=\"some-id\", function_id=\"function-id\", saved=True)\n job.wait_for_completion(interval=0.1, timeout=5)\n assert job.status == JobStatus.SUCCESS\n\n @responses.activate\n def test_wait_for_complete_timeout(self):\n self.mock_response(\n responses.GET,\n \"/jobs/some-id\",\n json=self.make_job(\n id=\"some-id\",\n function_id=\"function-id\",\n args=[1, 2],\n kwargs={},\n ),\n )\n job = Job(id=\"some-id\", function_id=\"function-id\", saved=True)\n with self.assertRaises(TimeoutError):\n job.wait_for_completion(interval=0.1, timeout=5)\n\n @responses.activate\n def test_modified_patch(self):\n self.mock_response(\n responses.PATCH,\n \"/jobs/some-id\",\n json=self.make_job(id=\"some-id\", function_id=\"some-fn\", args=[1, 2]),\n )\n\n job = Job(id=\"some-id\", function_id=\"some-fn\", saved=True)\n job.args = [1, 2]\n job.save()\n assert job.state == \"saved\"\n self.assert_url_called(\"/jobs/some-id\", json={\"args\": [1, 2]})\n\n\nclass TestJobNoApi(BaseTestCase):\n @responses.activate\n def test_no_request_when_saved(self):\n job = Job(id=\"some-id\", function_id=\"some-fn\", saved=True)\n job.save()\n assert len(responses.calls) == 0\n\n @responses.activate\n def test_deleted(self):\n job = Job(id=\"some-id\", function_id=\"some-fn\", saved=True)\n job._deleted = True\n\n with self.assertRaises(AttributeError) as ctx:\n job.save()\n assert \"Job has been deleted\" in str(ctx.exception)\n","sub_path":"descarteslabs/core/compute/tests/test_job.py","file_name":"test_job.py","file_ext":"py","file_size_in_byte":8884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"415848286","text":"#1st character should be an alphabets within a to k\n#second character must be number it iss divisible by 3\n#following by any words\nimport re\n\nrule=\"[a-k][369][a-zA-Z0-9]*\"\n\npattern=input(\"enter variable name\")\n\nmatch=re.fullmatch(rule,pattern)\n\nif(match==None):\n print(\"invalid variable name\")\nelse:\n print(\"valid variable name \")\n\n\n","sub_path":"Regular Expersion/varinamecheck.py","file_name":"varinamecheck.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"384263897","text":"\n\nimport time, random, datetime\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\nfrom multiprocessing import Process, Manager\n\nfrom amazon_com_parser import AmazonCom_Parser\nfrom amazon_com_downloader import AmazonCom_Downloader\n\nfrom db import dbConnect, couchCRUD\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"secret!\"\nCORS(app)\n\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef ping():\n\tresponse = { \"data\": None, \"message\": \"SEARCH-SCRAPER: Alive and kick'n!\" }\n\treturn (jsonify(response), 200)\n\n\n\ndef getPriority():\n\ttry:\n\n\t\tqueue = couchCRUD({ \"db\": \"queue_amazonsearchterm\", \"limit\": 5 }, \"GET\")\n\n\texcept Exception as err:\n\t\tprint(\"RETRIEVE-ERR: \" + str(err))\n\t\treturn []\n\n\treturn queue\n\n\n\ndef completePriority(mission):\n\ttry:\n\n\t\t# cnxn = dbConnect()\n\t\t# cursor = cnxn.cursor()\n\n\t\t# # 0 means this search term has been executed\n\t\t# cursor.execute(\"UPDATE QUEUE_AmazonSearchTerm SET Priority=0 WHERE Id=%s\"%mission[\"id\"])\n\t\t# cnxn.commit()\n\n\t\tdoc = mission\n\t\tdoc[\"Priority\"] = 0\n\t\tcouchCRUD({\n\t\t\t\"db\": \"queue_amazonsearchterm\",\n\t\t\t\"_id\": doc[\"_id\"],\n\t\t\t\"doc\": doc\n\t\t}, \"UPSERT\")\n\n\texcept Exception as err:\n\t\tprint(\"COMPLETE-ERR: \" + str(err))\n\n\n\ndef getLinks(mission, browser):\n\traw = browser.request_splash(\"https://www.amazon.com/s?url=search-alias%3Daps&field-keywords=\" + mission[\"SearchTerm\"].replace(\" \", \"+\"))\n\tlinks = AmazonCom_Parser(raw).parse_searchPage(mission[\"SearchTerm\"])\n\tprint(\"LINKS: \" + str(len(links)))\n\n\ttry:\n\n\t\tnow = datetime.datetime.utcnow().isoformat()\n\t\tcount = 0\n\t\tfor link in links:\n\t\t\tcount = count + 1\n\t\t\ttry:\n\t\t\t\ttitle = link[\"title\"].encode(\"utf8\", \"ignore\")\n\t\t\texcept:\n\t\t\t\ttry:\n\t\t\t\t\ttitle = link[\"title\"].encode(\"ascii\", \"ignore\")\n\t\t\t\texcept:\n\t\t\t\t\ttry:\n\t\t\t\t\t\ttitle = u\"%s\"%(link[\"title\"])\n\t\t\t\t\texcept:\n\t\t\t\t\t\ttitle = link[\"title\"]\n\n\t\t\t_id = now.split(\"T\")[0] + \":[\" + link[\"search_term\"] + \"]-\" + str(count)\n\t\t\tprint(\"SAVING: \" + _id)\n\t\t\tdoc = {\n\t\t\t\t\"AverageReviewRating\": link[\"rating\"],\n\t\t\t\t\"ImageURL\": link[\"img\"],\n\t\t\t\t\"IsPrime\": link[\"is_prime\"],\n\t\t\t\t\"IsSponsored\": link[\"is_sponsored\"],\n\t\t\t\t\"Price\": link[\"price\"],\n\t\t\t\t\"ProductURL\": link[\"url\"],\n\t\t\t\t\"SearchTerm\": link[\"search_term\"],\n\t\t\t\t\"Seller\": link[\"seller\"],\n\t\t\t\t\"Title\": title,\n\t\t\t\t\"TotalReviews\": link[\"reviews_count\"],\n\t\t\t\t\"CreateDate\": now,\n\t\t\t}\n\n\t\t\tcouchCRUD({\n\t\t\t\t\"db\": \"queue_amazonsearchterm_results\",\n\t\t\t\t\"_id\": _id,\n\t\t\t\t\"doc\": doc\n\t\t\t}, \"UPSERT\")\n\n\t\t\tcouchCRUD({\n\t\t\t\t\"db\": \"queue_amazondetailpage\",\n\t\t\t\t\"_id\": _id,\n\t\t\t\t\"doc\": {\n\t\t\t\t\t\"ASIN\": computeASIN(link[\"url\"]),\n\t\t\t\t\t\"ProductURL\": link[\"url\"],\n\t\t\t\t\t\"Priority\": 1,\n\t\t\t\t\t\"CreateDate\": now\n\t\t\t\t}\n\t\t\t}, \"UPSERT\")\n\n\texcept Exception as err:\n\t\tprint(\"SAVE-LINK-ERR: \" + str(err))\n\t\tprint(doc)\n\n\n\ndef computeASIN(url):\n\ttry:\n\t\t_url = url.split(\"/\")\n\t\t# ASIN is commonly 10 characters long\n\t\t_ASIN = _url[len(_url)-1]\n\t\tif len(_ASIN) == 10:\n\t\t\treturn _ASIN\n\texcept:\n\t\treturn None\n\n\n\ndef startMonitor(flag=False):\n\n\tproxy = \"37.48.118.90:13042\"\n\n\twhile(flag):\n\t\ttry:\n\t\t\tbrowser = AmazonCom_Downloader(proxy)\n\t\t\tqueue = getPriority()\n\n\t\t\tif len(queue) > 0:\n\t\t\t\tmission = random.choice(queue)\n\n\t\t\t\tgetLinks(mission, browser)\n\n\t\t\t\t# flag the Priority = 0 as complete mission\n\t\t\t\tcompletePriority(mission)\n\t\texcept Exception as err:\n\t\t\tprint(\"ERR: \" + str(err))\n\n\t\t# random resting time before querying again in the database\n\t\trest = random.randint(10, 30)\n\t\tprint(\"REST: \" + str(rest))\n\t\ttime.sleep(rest)\n\n\n\n\nif __name__ == '__main__':\n\n\t# nohup python -u api-searchParser.py > logs_searchParse.log &\n\tstartMonitor(True)\n\n\t# process = Process(target=startMonitor, args=(True,))\n\t# process.daemon = True\n\t# process.start()\n\n\t# app.debug = True\n\t# app.host = \"0.0.0.0\"\n\t# app.port = 3000\n\t# app.threaded = True\n\t# app.run()\n\n\t# gunicorn --workers=1 --bind=0.0.0.0:3000 --log-level=debug --log-file=logs.log --reload --daemon --timeout=9000 --enable-stdio-inheritance api:app","sub_path":"link-scraper/api-searchParser.py","file_name":"api-searchParser.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"367091209","text":"# MIT License\n#\n# Copyright (c) 2020 Mehran Maghoumi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# ----------------------------------------------------------------------------------------------------------------------\nimport torch\nimport torch.jit as jit\nimport torch.nn as nn\nfrom torch.nn import Parameter\nfrom typing import List, Tuple\nfrom torch import Tensor\nimport math\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass LayerNormGRUCell(jit.ScriptModule):\n def __init__(self, input_size, hidden_size, bias=True):\n super(LayerNormGRUCell, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n\n self.ln_i2h = torch.nn.LayerNorm(2*hidden_size, elementwise_affine=False)\n self.ln_h2h = torch.nn.LayerNorm(2*hidden_size, elementwise_affine=False)\n self.ln_cell_1 = torch.nn.LayerNorm(hidden_size, elementwise_affine=False)\n self.ln_cell_2 = torch.nn.LayerNorm(hidden_size, elementwise_affine=False)\n\n self.i2h = torch.nn.Linear(input_size, 2 * hidden_size, bias=bias)\n self.h2h = torch.nn.Linear(hidden_size, 2 * hidden_size, bias=bias)\n self.h_hat_W = torch.nn.Linear(input_size, hidden_size, bias=bias)\n self.h_hat_U = torch.nn.Linear(hidden_size, hidden_size, bias=bias)\n self.reset_parameters()\n\n def reset_parameters(self):\n std = 1.0 / math.sqrt(self.hidden_size)\n for w in self.parameters():\n w.data.uniform_(-std, std)\n\n @jit.script_method\n def forward(self, x, h):\n # type: (Tensor, Tensor) -> Tensor\n h = h\n h = h.view(h.size(0), -1)\n x = x.view(x.size(0), -1)\n\n # Linear mappings\n i2h = self.i2h(x)\n h2h = self.h2h(h)\n\n # Layer norm\n i2h = self.ln_i2h(i2h)\n h2h = self.ln_h2h(h2h)\n\n preact = i2h + h2h\n\n # activations\n gates = preact[:, :].sigmoid()\n z_t, r_t = gates.chunk(2, 1)\n # z_t = gates[:, :self.hidden_size]\n # r_t = gates[:, -self.hidden_size:]\n\n # h_hat\n h_hat_first_half = self.h_hat_W(x)\n h_hat_last_half = self.h_hat_U(h)\n\n # layer norm\n h_hat_first_half = self.ln_cell_1( h_hat_first_half )\n h_hat_last_half = self.ln_cell_2( h_hat_last_half )\n\n h_hat = torch.tanh(h_hat_first_half + torch.mul(r_t, h_hat_last_half))\n\n h_t = torch.mul(1-z_t, h) + torch.mul(z_t, h_hat)\n\n # Reshape for compatibility\n\n h_t = h_t.view(h_t.size(0), -1)\n return h_t\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass JitGRULayer(jit.ScriptModule):\n def __init__(self, cell, *cell_args):\n super(JitGRULayer, self).__init__()\n self.cell = cell(*cell_args)\n\n @jit.script_method\n def forward(self, x, hidden):\n # type: (Tensor, Tensor) -> Tuple[Tensor, Tensor]\n inputs = x.unbind(0)\n outputs = torch.jit.annotate(List[Tensor], [])\n\n for i in range(len(inputs)):\n hidden = self.cell(inputs[i], hidden)\n outputs += [hidden]\n\n return torch.stack(outputs), hidden\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass JitGRULN(jit.ScriptModule):\n __constants__ = ['hidden_size', 'num_layers', 'batch_first', 'layers']\n\n def __init__(self, input_size, hidden_size, num_layers, batch_first=False, bias=True):\n super(JitGRULN, self).__init__()\n # The following are not implemented.\n assert bias\n\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.batch_first = batch_first\n\n if num_layers == 1:\n self.layers = nn.ModuleList([JitGRULayer(LayerNormGRUCell, input_size, hidden_size)])\n else:\n self.layers = nn.ModuleList([JitGRULayer(LayerNormGRUCell, input_size, hidden_size)] + [JitGRULayer(LayerNormGRUCell, hidden_size, hidden_size)\n for _ in range(num_layers - 1)])\n\n @jit.script_method\n def forward(self, x, h=None):\n # type: (Tensor, Optional[Tensor]) -> Tuple[Tensor, Tensor]\n output_states = jit.annotate(List[Tensor], [])\n\n # Handle batch_first cases\n if self.batch_first:\n x = x.permute(1, 0, 2)\n\n if h is None:\n h = torch.zeros(self.num_layers, x.shape[1], self.hidden_size, dtype=x.dtype, device=x.device)\n\n output = x\n i = 0\n\n for rnn_layer in self.layers:\n output, hidden = rnn_layer(output, h[i])\n output_states += [hidden]\n i += 1\n\n # Don't forget to handle batch_first cases for the output too!\n if self.batch_first:\n output = output.permute(1, 0, 2)\n\n return output, torch.stack(output_states)\n","sub_path":"R2D2GAN/rd_test/with/jit_gru_ln.py","file_name":"jit_gru_ln.py","file_ext":"py","file_size_in_byte":6010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"305425832","text":"\n\"\"\"Ref: https://www.cnblogs.com/shld/p/9758303.html\"\"\"\n\nfrom typing import *\n\n\ndef is_in_circumscribed_rectangular(point: Tuple[float, float], vertexes: List[Tuple[float, float]],\n contain_boundary: bool = True):\n \"\"\"\n Judge if point is in the circumscribed rectangular area given vertexes.\n It's a quick judgement for judging point `not` in an area with given same vertexes.\n :param point: The target point to evaluate, a tuple of x, y value.\n :param vertexes: The vertexes of the area.\n :param contain_boundary: If including the boundaries.\n \"\"\"\n xs, ys = zip(*vertexes)\n min_x, max_x = min(xs), max(xs)\n min_y, max_y = min(ys), max(ys)\n\n x, y = point\n if contain_boundary:\n return (min_x <= x <= max_x) & (min_y <= y <= max_y)\n\n return (min_x < x < max_x) & (min_y < y < max_y)\n\n\ndef is_intersect(point: Tuple[float, float], start_point: Tuple[float, float], end_point: Tuple[float, float]):\n \"\"\"\n Judge if point's ray-line crosses the segment with `start_point` and `end_point`\n For default, the ray-line if the right-ray\n \"\"\"\n if point == start_point or point == end_point: # coincide with vertex\n return None\n\n x, y = point\n start_x, start_y = start_point\n end_x, end_y = end_point\n\n if start_y == end_y: # The segment is parallel with right-ray\n return False\n\n if start_y > y and end_y > y: # The segment is on the top of right-ray\n return False\n\n if start_y < y and end_y < y: # The segment is under the right-ray\n return False\n\n # An additional convention to avoid counting multi times when cross the intersect between two neighbour lines\n # If intersect between ray and segment is the bottom point, not count.\n if start_y == y and end_y > y: # The intersect may be bottom point(the start point)\n return False\n\n if end_y == y and start_y > y: # The intersect may be bottom point(the end point)\n return False\n\n if start_x < x and end_x < x: # The segment is on the left of the right-ray\n return False\n\n # Calculate the intersect point (x_seg, y)\n x_seg = start_x + (end_x - start_x) / (end_y - start_y) * (y - start_y)\n if x_seg == x: # The intersect is point itself, e.g. the point is on the boundary\n return None\n if x_seg < x: # The intersect if on the left of point, not on the right-ray\n return False\n return True\n\n\ndef is_in_polygon_area(point: Tuple[float, float], vertexes: List[Tuple[float, float]],\n contain_boundary: bool = True) -> bool:\n # Judge if in the circumscribed rectangular, if not directly return False\n if not is_in_circumscribed_rectangular(point, vertexes, True):\n return False\n\n n_intersects = 0\n for start_point, end_point in zip(vertexes[:-1], vertexes[1:]):\n intersect = is_intersect(point, start_point, end_point)\n if intersect is None: # The point is on the boundary of polygon area\n return contain_boundary\n n_intersects += intersect # Add the count of intersect\n return bool(n_intersects % 2)\n\n\nif __name__ == '__main__':\n vertex_lst = [(0, 0), (1, 1), (1, 2), (0, 2), (0, 0)]\n poi = (0.82, 0.75)\n print(is_in_polygon_area(poi, vertex_lst, contain_boundary=True))\n","sub_path":"others/area_inside_judge/quick_try.py","file_name":"quick_try.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"171114556","text":"\nfrom serial.tools import list_ports\nfrom pydobot import Dobot\n\ndevice = None\n\nclass myDobot(Dobot): \n def grep(self, sx, sy, szu, szd, r, cls):\n #time.sleep(1)\n self.move_to(sx, sy, szu, r, wait=True) # 물체 위로 이동\n self.move_to(sx, sy, szd, r, wait=True) # 물체로 이동하여 Pickup\n self.suck(True)\n self.move_to(sx, sy, szu, r, wait=True) # 물체 위로 이동\n\n if cls:\n dx, dy, dzu, dzd = 35, 250, 40, -10\n\n else:\n dx, dy, dzu, dzd = 35, -250, 40, -10\n\n self.move_to(dx, dy, dzu, r, wait=True) # 목적지로 이동\n self.move_to(dx, dy, dzd, r, wait=True) # 물체를 목적지에 Drop\n self.suck(True)\n self.move_to(dx, dy, dzu, r, wait=True) # 목적지로 이동\n self.move_to(sx, sy, szu, r, wait=True) # 물체 위로 이동\n self.suck(False)\n \n def release(self, sx, sy, szu, szd, r):\n self.move_to(sx, sy, szd, r, wait=True) # 물체로 이동하여 Pickup\n self.suck(False)\n self.move_to(sx, sy, szu, r, wait=True) # 물체 위로 이동\n \n\nport = list_ports.comports()[0].device\ndevice = myDobot(port=port, verbose=False)\ndevice.speed(1500, 1500)\n\n# Dobot Control\n\n# Source(과일, 야채)의 X, Y 좌표 설정\nSRC_X, SRC_Y = 260, 0\n\n# Source(과일, 야채)의 Z 좌표 설정\nSRC_UP_Z, SRC_DOWN_Z = 50, 50\nR = 0\n\ndevice.grep(SRC_X, SRC_Y, SRC_UP_Z, SRC_DOWN_Z, R, True)\n# device.grep(SRC_X, SRC_Y, SRC_UP_Z, SRC_DOWN_Z, R, True)\n\n","sub_path":"bad.py","file_name":"bad.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"332522825","text":"#!/usr/bin/env python\nfrom pg_catalog.test import *\n\nclass TestCase(TestCase):\n def test(self):\n for r in self.db.query(pg_ts_parser).all():\n is_(r,pg_ts_parser)\n is_int(r.oid)\n is_text(r.prsname)\n is_int(r.prsnamespace)\n is_int(r.prsstart)\n is_int(r.prstoken)\n is_int(r.prsend)\n is_int(r.prsheadline)\n is_int(r.prslextype)\n\nif __name__ == \"__main__\":\n main()","sub_path":"tests/pg_ts_parser.py","file_name":"pg_ts_parser.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"518803226","text":"import json\r\nimport requests\r\nimport telegram\r\n\r\nhas_sent = False\r\nbot = telegram.Bot('TOKEN')\r\n\r\nwith open(\"data.json\") as data_file:\r\n data = json.load(data_file)\r\n\r\n\r\ndef run():\r\n global has_sent\r\n count_web = len(data[\"websites\"])\r\n for x in range(0, count_web):\r\n url = data[\"websites\"][x][\"url\"]\r\n try:\r\n response = requests.head(url)\r\n code = str(response.status_code)\r\n if code != data[\"websites\"][x][\"code\"]:\r\n message_all(\r\n '' + url + '\\nReturned Code: ' + code + \"\\n\" + \"Expected: \" + data[\"websites\"][x][\"code\"])\r\n except Exception:\r\n message_all(\"There is a problem with \" + url)\r\n\r\n\r\ndef message_all(msg):\r\n global has_sent\r\n count_sub = len(data[\"subscribers\"])\r\n if not has_sent:\r\n for x in range(0, count_sub):\r\n bot.send_message(data[\"subscribers\"][x][\"chat_id\"],\r\n \"Hello \" + data[\"subscribers\"][x][\"name\"] + ', I suppose there is bad news for you.')\r\n has_sent = True\r\n for x in range(0, count_sub):\r\n bot.send_message(data[\"subscribers\"][x][\"chat_id\"], msg, telegram.ParseMode.HTML)\r\n\r\n\r\nrun()\r\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"86176654","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom openpyxl import load_workbook\n\nwb = load_workbook(\"template.xlsx\")\nsheet = wb.get_sheet_by_name(\"系統百分比結果\")\n\ntemperatures=[]\niced_tea_sales=[]\n\n\nfor row in sheet.iter_rows(min_col=11,min_row=2, max_col=11, max_row=1614):\n for cell in row:\n #print(cell.value)\n temperatures.append(cell.value)\nprint(\"===========================\")\nfor row in sheet.iter_rows(min_col=13 ,min_row=2, max_col=13, max_row=1614):\n for cell in row:\n #print(cell.value)\n iced_tea_sales.append(cell.value)\n\n#temperatures = np.array([29, 28, 34, 31, 25, 29, 32, 31, 24, 33, 25, 31, 26, 30])\n#iced_tea_sales = np.array([77, 62, 93, 84, 59, 64, 80, 75, 58, 91, 51, 73, 65, 84])\n\nlm = LinearRegression()\n#lm.fit(np.reshape(temperatures, (len(temperatures), 1)), np.reshape(iced_tea_sales, (len(iced_tea_sales), 1)))\nlm.fit(np.reshape(iced_tea_sales, (len(iced_tea_sales), 1)),np.reshape(temperatures, (len(temperatures), 1)))\n\n\n\n# 新的氣溫\nto_be_predicted = np.array([60])\npredicted = lm.predict(np.reshape(to_be_predicted, (len(to_be_predicted), 1)))\nprint(predicted)\n#to_be_predicted = np.array([511.9759])\n#predicted_sales = lm.predict(np.reshape(to_be_predicted, (len(to_be_predicted), 1)))\n#print(predicted_sales)\n\n\n\n# 視覺化\n#plt.scatter(temperatures, iced_tea_sales, color='black')\n#plt.plot(temperatures, lm.predict(np.reshape(temperatures, (len(temperatures), 1))), color='blue', linewidth=3)\nplt.scatter(temperatures,iced_tea_sales , color='black')\nplt.plot(lm.predict(np.reshape(iced_tea_sales, (len(iced_tea_sales), 1))),iced_tea_sales , color='blue', linewidth=3)\n\nplt.plot(predicted,to_be_predicted , color = 'red', marker = '*', markersize = 20)\n#plt.xticks(())\n#plt.yticks(())\nplt.ylim((0, 350))\nplt.xlabel('RT')\nplt.ylabel('hen')\nplt.show()\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"210032483","text":"import os\n\nfrom celery import Celery, platforms\n\n\nfrom school import settings\n\n# 获取当前文件夹名,即为该Django的项目名\nproject_name = os.path.split(os.path.abspath('.'))[-1]\nproject_settings = '%s.settings' % project_name\n\n# 设置环境变量\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', project_settings)\n\napp = Celery(\"cel_app\")\n\nplatforms.C_FORCE_ROOT = True\n\napp.config_from_object('django.conf:settings')\n\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n","sub_path":"school/school/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"402624126","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/nowandnext/applications/schedule_to_rss.py\n# Compiled at: 2010-04-06 19:32:03\n\"\"\"\nGenerate an RSS feed of the schedule.\n\"\"\"\nimport PyRSS2Gen, datetime, sys, logging\nfrom nowandnext.utils.cmdline import cmdline\nfrom nowandnext.utils.detectos import osinfo\nfrom nowandnext.timezones.utc import utc\nfrom nowandnext.calendar import periods\nfrom nowandnext.calendar.scheduleevent import scheduleevent\nfrom nowandnext.calendar.calQuery import CalQuery, NoCalendarEntry\nfrom nowandnext.utils.textparser import textparser\nfrom nowandnext.timezones.uk import uk as timezone_uk\nlog = logging.getLogger(__name__)\n\nclass schedule_to_rss(cmdline):\n\n def __init__(self, configfilepath):\n self._config = self.getconfiguration(configfilepath)\n self._calargs = (self._config.get('pinger', 'account'),\n self._config.get('pinger', 'password'),\n self._config.get('pinger', 'calendar_name'))\n self.default_website = self._config.get('feed', 'link')\n\n def calendaritemtorssitem(self, calendaritem):\n scheduleitem = scheduleevent(calendaritem, default_website=self._config.get('feed', 'link'))\n query = textparser.translate(textparser.parsetodict(calendaritem.getEvent().getDescription()))\n description = ('\\n').join(query['unmatched'])\n startdate = scheduleitem.getStartTime(tz=timezone_uk)\n startdate.replace(tzinfo=utc)\n item = PyRSS2Gen.RSSItem(title=scheduleitem.getTitle(), link=query.get('web', self.default_website), description=description, guid=scheduleitem.getGuid(), pubDate=startdate)\n return item\n\n def __call__(self):\n calendaritems = self.getcalendaritems()\n builddate = datetime.datetime.now(tz=timezone_uk).astimezone(utc)\n rss = PyRSS2Gen.RSS2(title=self._config.get('feed', 'title'), link=self._config.get('feed', 'link'), description=self._config.get('feed', 'description'), lastBuildDate=builddate, items=[ self.calendaritemtorssitem(a) for a in calendaritems ])\n rss.write_xml(sys.stdout, encoding='utf-8')\n\n def getcalendaritems(self):\n now = datetime.datetime.now(timezone_uk)\n sometimeinthefuture = now + periods.onehour * 24\n cal = CalQuery(*self._calargs)\n eventinstances = []\n try:\n eventinstances.append(cal.getCurrentEventInstance(now))\n except NoCalendarEntry:\n log.warn('There is no calendar entry for now.')\n\n eventinstances.extend([ a for a in cal.getEventInstances(now, sometimeinthefuture) if a not in eventinstances ])\n return eventinstances\n\n\ndef main():\n logging.basicConfig()\n (options, args) = schedule_to_rss.mkParser().parse_args()\n if options.verbose:\n logging.getLogger('').setLevel(logging.INFO)\n else:\n logging.getLogger('').setLevel(logging.WARN)\n os_spesific_handler = osinfo.get_logger('Pinger')\n os_spesific_handler.setLevel(logging.WARN)\n logging.getLogger('').addHandler(os_spesific_handler)\n s2r = schedule_to_rss(options.configfilepath)\n s2r()\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/nowandnext-0.2.5.dev_r493-py2.5/schedule_to_rss.py","file_name":"schedule_to_rss.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"630621424","text":"from typing import Dict, Optional\n\nimport torch\nfrom overrides import overrides\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.models.model import Model\nfrom allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder, Embedding\nfrom allennlp.modules.text_field_embedders import BasicTextFieldEmbedder\nfrom allennlp.modules.seq2seq_decoders.seq_decoder import SeqDecoder\nfrom allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n\n\n@Model.register(\"custom_composed_seq2seq\")\nclass CustomComposedSeq2Seq(Model):\n def __init__(self,\n vocab: Vocabulary,\n source_text_embedder: TextFieldEmbedder,\n encoder: Seq2SeqEncoder,\n decoder: SeqDecoder,\n tied_source_embedder_key: Optional[str] = None,\n initializer: InitializerApplicator = InitializerApplicator(),\n regularizer: Optional[RegularizerApplicator] = None) -> None:\n\n super(CustomComposedSeq2Seq, self).__init__(vocab, regularizer)\n\n self._source_text_embedder = source_text_embedder\n self._encoder = encoder\n self._decoder = decoder\n\n if self._encoder.get_output_dim() != self._decoder.get_output_dim():\n raise ConfigurationError(f\"Encoder output dimension {self._encoder.get_output_dim()} should be\"\n f\" equal to decoder dimension {self._decoder.get_output_dim()}.\")\n if tied_source_embedder_key:\n # A bit of a ugly hack to tie embeddings.\n # Works only for `BasicTextFieldEmbedder`, and since\n # it can have multiple embedders, and `SeqDecoder` contains only a single embedder, we need\n # the key to select the source embedder to replace it with the target embedder from the decoder.\n if not isinstance(self._source_text_embedder, BasicTextFieldEmbedder):\n raise ConfigurationError(\"Unable to tie embeddings,\"\n \"Source text embedder is not an instance of `BasicTextFieldEmbedder`.\")\n # pylint: disable=protected-access\n source_embedder = self._source_text_embedder._token_embedders[tied_source_embedder_key]\n if not isinstance(source_embedder, Embedding):\n raise ConfigurationError(\"Unable to tie embeddings,\"\n \"Selected source embedder is not an instance of `Embedding`.\")\n if source_embedder.get_output_dim() != self._decoder.target_embedder.get_output_dim():\n raise ConfigurationError(f\"Output Dimensions mismatch between\"\n f\"source embedder and target embedder.\")\n self._source_text_embedder._token_embedders[tied_source_embedder_key] = self._decoder.target_embedder\n initializer(self)\n\n @overrides\n def forward(self, # type: ignore\n source_tokens: Dict[str, torch.LongTensor],\n target_tokens: Dict[str, torch.LongTensor] = None) -> Dict[str, torch.Tensor]:\n # pylint: disable=arguments-differ\n \"\"\"\n Make foward pass on the encoder and decoder for producing the entire target sequence.\n Parameters\n ----------\n source_tokens : ``Dict[str, torch.LongTensor]``\n The output of `TextField.as_array()` applied on the source `TextField`. This will be\n passed through a `TextFieldEmbedder` and then through an encoder.\n target_tokens : ``Dict[str, torch.LongTensor]``, optional (default = None)\n Output of `Textfield.as_array()` applied on target `TextField`. We assume that the\n target tokens are also represented as a `TextField`.\n Returns\n -------\n Dict[str, torch.Tensor]\n The output tensors from the decoder.\n \"\"\"\n state = self._encode(source_tokens)\n\n return self._decoder(state, target_tokens)\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Finalize predictions.\n \"\"\"\n return self._decoder.post_process(output_dict)\n\n def _encode(self, source_tokens: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Make foward pass on the encoder.\n Parameters\n ----------\n source_tokens : ``Dict[str, torch.LongTensor]``\n The output of `TextField.as_array()` applied on the source `TextField`. This will be\n passed through a `TextFieldEmbedder` and then through an encoder.\n Returns\n -------\n Dict[str, torch.Tensor]\n Map consisting of the key `source_mask` with the mask over the\n `source_tokens` text field,\n and the key `encoder_outputs` with the output tensor from\n forward pass on the encoder.\n \"\"\"\n # shape: (batch_size, max_input_sequence_length, encoder_input_dim)\n embedded_input = self._source_text_embedder(source_tokens)\n # shape: (batch_size, max_input_sequence_length)\n source_mask = util.get_text_field_mask(source_tokens)\n # shape: (batch_size, max_input_sequence_length, encoder_output_dim)\n encoder_outputs = self._encoder(embedded_input, source_mask.bool())\n return {\n \"source_mask\": source_mask,\n \"encoder_outputs\": encoder_outputs,\n }\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n return self._decoder.get_metrics(reset)","sub_path":"summarus/custom_composed_seq2seq.py","file_name":"custom_composed_seq2seq.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"62543313","text":"import xml.etree.ElementTree as ET\nimport hashlib\n\nfiletypes = (\".lua\",\".txt\")\n\nmanifest = ET.parse(\"manifest.xml\")\nroot = manifest.getroot()\n\nfor file in root.iter(\"File\"):\n\tpath = file.get('name')\n\tif not path.endswith(filetypes):\n \t\tcontinue\n\ttry:\n\t\told_hash = file.get('sha1')\n\t\thash = hashlib.sha1(open(path, 'rb').read()).hexdigest()\n\t\tfile.set(\"sha1\", hash)\n\t\tif old_hash != hash:\n\t\t\tprint(\"path {} hash changed: now {}\".format(path,hash))\n\texcept FileNotFoundError:\n\t\tprint(\"file not found, skipping: {}\".format(path))\n\n\n############VERY HACKY#############\n#ElementTree re-orders attributes by default sorted() order on Python <=3.7\n#This... er... changes that behaviour\nimport builtins\n_sorted = builtins.sorted\nbuiltins.sorted = lambda x,**args:_sorted(x,**args,reverse=True)\n\t\nmanifest.write(\"manifest.xml\")\n\n","sub_path":"updatemanifest.py","file_name":"updatemanifest.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"321616327","text":"import pickle\r\nimport random\r\nimport sys\r\nimport subprocess\r\nfrom kivy.app import App\r\nfrom kivy.uix.widget import Widget\r\nfrom kivy.properties import ObjectProperty\r\nfrom kivy.config import Config\r\n\r\n\r\nzadani = pickle.load(open(\"gramatika/data.dat\", \"rb\"))\r\nprint(zadani)\r\n\r\nConfig.set('graphics', 'width', '1200')\r\nConfig.set('graphics', 'height', '800')\r\nConfig.set('graphics', 'resizable', False)\r\n\r\n\r\nclass Okno(Widget):\r\n l0 = ObjectProperty(None)\r\n l1 = ObjectProperty(None)\r\n l2 = ObjectProperty(None)\r\n l3 = ObjectProperty(None)\r\n l4 = ObjectProperty(None)\r\n l5 = ObjectProperty(None)\r\n l6 = ObjectProperty(None)\r\n l7 = ObjectProperty(None)\r\n l8 = ObjectProperty(None)\r\n l9 = ObjectProperty(None)\r\n\r\n t0 = ObjectProperty(None)\r\n t1 = ObjectProperty(None)\r\n t2 = ObjectProperty(None)\r\n t3 = ObjectProperty(None)\r\n t4 = ObjectProperty(None)\r\n t5 = ObjectProperty(None)\r\n t6 = ObjectProperty(None)\r\n t7 = ObjectProperty(None)\r\n t8 = ObjectProperty(None)\r\n t9 = ObjectProperty(None)\r\n\r\n button = ObjectProperty(None)\r\n pb = 0\r\n c = ['', '', '', '', '', '', '', '', '', '']\r\n\r\n def btn(self):\r\n if self.pb == 0:\r\n self.pb = 1\r\n self.button.text = \"Zkontrolovat\"\r\n\r\n s = 0\r\n for x in range(10):\r\n s = 0\r\n while s == 0:\r\n self.c[x] = random.randint(0, len(zadani) - 1)\r\n s = 1\r\n for y in range(len(zadani) - 1):\r\n if self.c[x] == self.c[y]:\r\n if x != y:\r\n s = 0\r\n\r\n self.l0.text = zadani[self.c[0]][0]\r\n self.l1.text = zadani[self.c[1]][0]\r\n self.l2.text = zadani[self.c[2]][0]\r\n self.l3.text = zadani[self.c[3]][0]\r\n self.l4.text = zadani[self.c[4]][0]\r\n self.l5.text = zadani[self.c[5]][0]\r\n self.l6.text = zadani[self.c[6]][0]\r\n self.l7.text = zadani[self.c[7]][0]\r\n self.l8.text = zadani[self.c[8]][0]\r\n self.l9.text = zadani[self.c[9]][0]\r\n else:\r\n ch = 0\r\n if self.t0.text == zadani[self.c[0]][1]:\r\n self.l0.color = 0, 1, 0, 1\r\n else:\r\n self.l0.color = 1, 0, 0, 1\r\n ch += 1\r\n\r\n if self.t1.text == zadani[self.c[1]][1]:\r\n self.l1.color = 0, 1, 0, 1\r\n else:\r\n self.l1.color = 1, 0, 0, 1\r\n ch += 1\r\n\r\n if self.t2.text == zadani[self.c[2]][1]:\r\n self.l2.color = 0, 1, 0, 1\r\n else:\r\n self.l2.color = 1, 0, 0, 1\r\n ch += 1\r\n\r\n if self.t3.text == zadani[self.c[3]][1]:\r\n self.l3.color = 0, 1, 0, 1\r\n else:\r\n self.l3.color = 1, 0, 0, 1\r\n ch += 1\r\n\r\n if self.t4.text == zadani[self.c[4]][1]:\r\n self.l4.color = 0, 1, 0, 1\r\n else:\r\n self.l4.color = 1, 0, 0, 1\r\n ch += 1\r\n\r\n if self.t5.text == zadani[self.c[5]][1]:\r\n self.l5.color = 0, 1, 0, 1\r\n else:\r\n self.l5.color = 1, 0, 0, 1\r\n ch += 1\r\n\r\n if self.t6.text == zadani[self.c[6]][1]:\r\n self.l6.color = 0, 1, 0, 1\r\n else:\r\n self.l6.color = 1, 0, 0, 1\r\n ch += 1\r\n\r\n if self.t7.text == zadani[self.c[7]][1]:\r\n self.l7.color = 0, 1, 0, 1\r\n else:\r\n self.l7.color = 1, 0, 0, 1\r\n ch += 1\r\n\r\n if self.t8.text == zadani[self.c[8]][1]:\r\n self.l8.color = 0, 1, 0, 1\r\n else:\r\n self.l8.color = 1, 0, 0, 1\r\n ch += 1\r\n\r\n if self.t9.text == zadani[self.c[9]][1]:\r\n self.l9.color = 0, 1, 0, 1\r\n else:\r\n self.l9.color = 1, 0, 0, 1\r\n ch += 1\r\n\r\n if ch > 0:\r\n self.button.text = \"Chyb: \" + str(ch) + \" | Zkontrolovat\"\r\n else:\r\n self.button.text = \"Vyhjál, pjvní\"\r\n subprocess.Popen(['python', 'gramatika/win.py'])\r\n App.stop()\r\n sys.exit(0)\r\n\r\nclass GramatikaApp(App):\r\n def build(self):\r\n return Okno()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n GramatikaApp().run()\r\n","sub_path":"gramatika/gramatika.py","file_name":"gramatika.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"624048469","text":"# Copyright (c) 2021 Emanuele Bellocchia\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n# BIP-0039 reference: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki\n\n# Imports\nfrom __future__ import annotations\nimport os\nfrom enum import auto, Enum, IntEnum, unique\nfrom typing import Dict, List, Optional, Union\nfrom bip_utils.bip39.bip39_ex import Bip39ChecksumError\nfrom bip_utils.bip39.bip39_entropy_generator import Bip39EntropyGenerator\nfrom bip_utils.utils import AlgoUtils, ConvUtils, CryptoUtils\n\n\n@unique\nclass Bip39WordsNum(IntEnum):\n \"\"\" Enumerative for BIP-0039 words number. \"\"\"\n\n WORDS_NUM_12 = 12,\n WORDS_NUM_15 = 15,\n WORDS_NUM_18 = 18,\n WORDS_NUM_21 = 21,\n WORDS_NUM_24 = 24,\n\n\n@unique\nclass Bip39Languages(Enum):\n \"\"\" Enumerative for BIP-0039 languages. \"\"\"\n\n CHINESE_SIMPLIFIED = auto(),\n CHINESE_TRADITIONAL = auto(),\n CZECH = auto(),\n ENGLISH = auto(),\n FRENCH = auto(),\n ITALIAN = auto(),\n KOREAN = auto(),\n PORTUGUESE = auto(),\n SPANISH = auto(),\n\n\nclass Bip39MnemonicConst:\n \"\"\" Class container for BIP39 constants. \"\"\"\n\n # Accepted mnemonic word lengths\n MNEMONIC_WORD_LEN: List[Bip39WordsNum] = [\n Bip39WordsNum.WORDS_NUM_12,\n Bip39WordsNum.WORDS_NUM_15,\n Bip39WordsNum.WORDS_NUM_18,\n Bip39WordsNum.WORDS_NUM_21,\n Bip39WordsNum.WORDS_NUM_24,\n ]\n\n # Language files\n LANGUAGE_FILES: Dict[Bip39Languages, str] = {\n Bip39Languages.ENGLISH: \"bip39_words/english.txt\",\n Bip39Languages.ITALIAN: \"bip39_words/italian.txt\",\n Bip39Languages.FRENCH: \"bip39_words/french.txt\",\n Bip39Languages.SPANISH: \"bip39_words/spanish.txt\",\n Bip39Languages.PORTUGUESE: \"bip39_words/portuguese.txt\",\n Bip39Languages.CZECH: \"bip39_words/czech.txt\",\n Bip39Languages.CHINESE_SIMPLIFIED: \"bip39_words/chinese_simplified.txt\",\n Bip39Languages.CHINESE_TRADITIONAL: \"bip39_words/chinese_traditional.txt\",\n Bip39Languages.KOREAN: \"bip39_words/korean.txt\",\n }\n\n # Languages supporting binary search\n LANGUAGE_BIN_SEARCH: List[Bip39Languages] = [\n Bip39Languages.ENGLISH,\n Bip39Languages.ITALIAN,\n Bip39Languages.PORTUGUESE,\n Bip39Languages.CZECH\n ]\n\n # Total number of words\n WORDS_LIST_NUM: int = 2048\n # Word length in bit\n WORD_BIT_LEN: int = 11\n\n\nclass Bip39Mnemonic:\n \"\"\" BIP39 mnemonic class. It represents a generic mnemonic phrase.\n It acts as a simple container with some helper functions, so it doesn't validate the given mnemonic.\n \"\"\"\n\n @classmethod\n def FromString(cls,\n mnemonic_str: str) -> Bip39Mnemonic:\n \"\"\" Create a class from mnemonic string.\n\n Args:\n mnemonic_str (str): Mnemonic string\n\n Returns:\n Bip39Mnemonic: Mnemonic object\n \"\"\"\n return cls.FromList(mnemonic_str.split(\" \"))\n\n @classmethod\n def FromList(cls,\n mnemonic_list: List[str]) -> Bip39Mnemonic:\n \"\"\" Create a class from mnemonic list.\n\n Args:\n mnemonic_list (list): Mnemonic list\n\n Returns:\n Bip39Mnemonic: Mnemonic object\n \"\"\"\n return cls(mnemonic_list)\n\n def __init__(self,\n mnemonic_list: List[str]) -> None:\n \"\"\" Construct class.\n\n Args:\n mnemonic_list (list): Mnemonic list\n \"\"\"\n self.m_mnemonic_list = self.__NormalizeNfkd(mnemonic_list)\n\n def WordsCount(self) -> int:\n \"\"\" Get the words count.\n\n Returns:\n int: Words count\n \"\"\"\n return len(self.m_mnemonic_list)\n\n def ToList(self) -> List[str]:\n \"\"\" Get the mnemonic as a list.\n\n Returns:\n list: Mnemonic as a list\n \"\"\"\n return self.m_mnemonic_list\n\n def ToStr(self) -> str:\n \"\"\" Get the mnemonic as a string.\n\n Returns:\n str: Mnemonic as a string\n \"\"\"\n return \" \".join(self.m_mnemonic_list)\n\n def __str__(self) -> str:\n \"\"\" Get the mnemonic as a string.\n\n Returns:\n str: Mnemonic as a string\n \"\"\"\n return self.ToStr()\n\n @staticmethod\n def __NormalizeNfkd(mnemonic_list: List[str]) -> List[str]:\n \"\"\" Normalize mnemonic list using NFKD.\n\n Args:\n mnemonic_list (list): Mnemonic list\n\n Returns:\n list: Normalized mnemonic list\n \"\"\"\n return list(map(lambda s: ConvUtils.NormalizeNfkd(s), mnemonic_list))\n\n\nclass _Bip39WordsList:\n \"\"\" BIP39 words list class. \"\"\"\n\n def __init__(self,\n words_list: List[str],\n lang: Bip39Languages) -> None:\n \"\"\" Construct class by reading the words list from file.\n\n Args:\n lang (Bip39Languages): Language\n\n Raises:\n ValueError: If loaded words list is not valid\n \"\"\"\n\n # Check words list length\n if len(words_list) != Bip39MnemonicConst.WORDS_LIST_NUM:\n raise ValueError(\"Number of words list (%d) is not valid\" % len(words_list))\n\n self.m_lang = lang\n self.m_words_list = words_list\n\n def Language(self) -> Bip39Languages:\n \"\"\" Get words list language.\n\n Returns:\n Bip39Languages: Language\n \"\"\"\n return self.m_lang\n\n def GetWordIdx(self,\n word: str) -> int:\n \"\"\" Get the index of the specified word, by searching it in the list.\n\n Args:\n word (str): Word to be searched\n\n Returns:\n int: Word index\n\n Raises:\n ValueError: If the word is not found\n \"\"\"\n\n # Use binary search when possible\n if self.m_lang in Bip39MnemonicConst.LANGUAGE_BIN_SEARCH:\n idx = AlgoUtils.BinarySearch(self.m_words_list, word)\n if idx == -1:\n raise ValueError(\"Word '%s' is not existent in word list\" % word)\n else:\n idx = self.m_words_list.index(word)\n\n return idx\n\n def GetWordAtIdx(self,\n word_idx: int) -> str:\n \"\"\" Get the word at the specified index.\n\n Args:\n word_idx (int): Word index\n\n Returns:\n str: Word at the specified index\n \"\"\"\n return self.m_words_list[word_idx]\n\n\nclass _Bip39WordsListFileReader:\n \"\"\" BIP39 words list file reader class. It reads the words list from a file. \"\"\"\n\n @staticmethod\n def LoadFile(lang: Bip39Languages) -> _Bip39WordsList:\n \"\"\" Load words list file correspondent to the specified language.\n\n Args:\n lang (Bip39Languages): Language\n\n Returns:\n _Bip39WordsList: Loaded words list from mnemonic file\n\n Raises:\n ValueError: If loaded words list is not valid\n \"\"\"\n\n # Get file path\n file_name = Bip39MnemonicConst.LANGUAGE_FILES[lang]\n file_path = os.path.join(os.path.dirname(__file__), file_name)\n # Read file\n with open(file_path, \"r\", encoding=\"utf-8\") as fin:\n words_list = [word.strip() for word in fin.readlines() if word.strip() != \"\"]\n\n return _Bip39WordsList(words_list, lang)\n\n\nclass _Bip39WordsListGetter:\n \"\"\" BIP39 words list getter class. It allows to get words list by language so that\n they are loaded from file only once per language (i.e. on the first request).\n \"\"\"\n\n # Global instance\n instance = None\n\n def __init__(self):\n \"\"\" Construct class. \"\"\"\n self.m_words_lists = {}\n\n def GetByLanguage(self,\n lang: Bip39Languages) -> _Bip39WordsList:\n \"\"\" Get words list by language.\n Words list of a specific language are loaded from file only the first time they are requested.\n\n Args:\n lang (Bip39Languages): Language\n\n Returns:\n _Bip39WordsList object: Words list\n\n Raises:\n ValueError: If loaded words list is not valid\n \"\"\"\n\n # Only load words list for a specific language the first time it is requested\n try:\n return self.m_words_lists[lang]\n except KeyError:\n self.m_words_lists[lang] = _Bip39WordsListFileReader.LoadFile(lang)\n return self.m_words_lists[lang]\n\n @classmethod\n def Instance(cls) -> _Bip39WordsListGetter:\n \"\"\" Get the global class instance.\n\n Returns:\n _Bip39WordsListGetter object: _Bip39WordsListGetter object\n \"\"\"\n if cls.instance is None:\n cls.instance = _Bip39WordsListGetter()\n return cls.instance\n\n\nclass _Bip39WordsListFinder:\n \"\"\" BIP39 words list finder class.\n It automatically finds the correct words list from a mnemonic.\n \"\"\"\n\n @staticmethod\n def FindLanguage(mnemonic: Bip39Mnemonic) -> _Bip39WordsList:\n \"\"\" Automatically find the language of the specified mnemonic and\n get the correct _Bip39WordsList class for it.\n\n Args:\n mnemonic (Bip39Mnemonic object): Mnemonic object\n\n Returns:\n _Bip39WordsList object: _Bip39WordsList object\n\n Raises:\n ValueError: If the mnemonic language cannot be found\n \"\"\"\n\n for lang in Bip39Languages:\n # Search all the words because some languages have words in common\n # (e.g. 'fatigue' both in English and French)\n # It's more time consuming, but considering only the first word can detect the wrong language sometimes\n try:\n words_list = _Bip39WordsListGetter.Instance().GetByLanguage(lang)\n for word in mnemonic.ToList():\n words_list.GetWordIdx(word)\n return words_list\n except ValueError:\n continue\n\n # Language not found\n raise ValueError(\"Invalid language for mnemonic '%s'\" % mnemonic.ToStr())\n\n\nclass Bip39MnemonicEncoder:\n \"\"\" BIP39 mnemonic encoder class. It encodes bytes to the mnemonic phrase. \"\"\"\n\n def __init__(self,\n lang: Bip39Languages) -> None:\n \"\"\" Construct class.\n\n Args:\n lang (Bip39Languages): Language\n\n Raises:\n TypeError: If the language is not a Bip39Languages enum\n ValueError: If loaded words list is not valid\n \"\"\"\n if not isinstance(lang, Bip39Languages):\n raise TypeError(\"Language is not an enumerative of Bip39Languages\")\n\n self.m_words_list = _Bip39WordsListGetter.Instance().GetByLanguage(lang)\n\n def Encode(self,\n entropy_bytes: bytes) -> Bip39Mnemonic:\n \"\"\" Encode bytes to mnemonic phrase.\n\n Args:\n entropy_bytes (bytes): Entropy bytes (accepted lengths in bits: 128, 160, 192, 224, 256)\n\n Returns:\n Bip39Mnemonic object: Encoded mnemonic\n\n Raises:\n ValueError: If bytes length is not valid\n \"\"\"\n\n # Check entropy length\n entropy_byte_len = len(entropy_bytes)\n if not Bip39EntropyGenerator.IsValidEntropyByteLen(entropy_byte_len):\n raise ValueError(\"Entropy byte length (%d) is not valid\" % entropy_byte_len)\n\n # Convert entropy to binary string\n entropy_bin_str = ConvUtils.BytesToBinaryStr(entropy_bytes, entropy_byte_len * 8)\n # Get entropy hash as binary string\n entropy_hash_bin_str = ConvUtils.BytesToBinaryStr(CryptoUtils.Sha256(entropy_bytes),\n CryptoUtils.Sha256DigestSize() * 8)\n # Get mnemonic binary string by concatenating entropy and checksum\n mnemonic_bin_str = entropy_bin_str + entropy_hash_bin_str[:entropy_byte_len // 4]\n\n # Get mnemonic from entropy\n mnemonic = []\n for i in range(len(mnemonic_bin_str) // Bip39MnemonicConst.WORD_BIT_LEN):\n # Get current word index\n word_bin_str = mnemonic_bin_str[i * Bip39MnemonicConst.WORD_BIT_LEN:(i + 1) * Bip39MnemonicConst.WORD_BIT_LEN]\n word_idx = ConvUtils.BinaryStrToInteger(word_bin_str)\n # Get word at given index\n mnemonic.append(self.m_words_list.GetWordAtIdx(word_idx))\n\n return Bip39Mnemonic.FromList(mnemonic)\n\n\nclass Bip39MnemonicDecoder:\n \"\"\" BIP39 mnemonic decoder class. It decodes a mnemonic phrase to bytes. \"\"\"\n\n #\n # Public methods\n #\n\n def __init__(self,\n lang: Optional[Bip39Languages] = None) -> None:\n \"\"\" Construct class.\n\n Args:\n lang (Bip39Languages, optional): Language, None for automatic detection\n\n Raises:\n TypeError: If the language is not a Bip39Languages enum\n ValueError: If loaded words list is not valid\n \"\"\"\n if lang is not None and not isinstance(lang, Bip39Languages):\n raise TypeError(\"Language is not an enumerative of Bip39Languages\")\n\n self.m_words_list = (_Bip39WordsListGetter.Instance().GetByLanguage(lang)\n if lang is not None\n else None)\n\n def Decode(self,\n mnemonic: Union[str, Bip39Mnemonic]) -> bytes:\n \"\"\" Decode a mnemonic phrase to bytes (no checksum).\n\n Args:\n mnemonic (str or Bip39Mnemonic object): Mnemonic\n\n Returns:\n bytes: Decoded bytes (no checksum)\n\n Raises:\n Bip39ChecksumError: If checksum is not valid\n ValueError: If mnemonic is not valid\n \"\"\"\n mnemonic_bin_str = self.__DecodeAndVerifyBinaryStr(mnemonic)\n\n return self.__EntropyBytesFromBinaryStr(mnemonic_bin_str)\n\n def DecodeWithChecksum(self,\n mnemonic: Union[str, Bip39Mnemonic]) -> bytes:\n \"\"\" Decode a mnemonic phrase to bytes (with checksum).\n\n Args:\n mnemonic (str or Bip39Mnemonic object): Mnemonic\n\n Returns:\n bytes: Decoded bytes (with checksum)\n\n Raises:\n Bip39ChecksumError: If checksum is not valid\n ValueError: If mnemonic is not valid\n \"\"\"\n mnemonic_bin_str = self.__DecodeAndVerifyBinaryStr(mnemonic)\n\n # Compute pad bit length\n mnemonic_bit_len = len(mnemonic_bin_str)\n pad_bit_len = (mnemonic_bit_len\n if mnemonic_bit_len % 8 == 0\n else mnemonic_bit_len + (8 - mnemonic_bit_len % 8))\n\n return ConvUtils.BinaryStrToBytes(mnemonic_bin_str, pad_bit_len // 4)\n\n def __DecodeAndVerifyBinaryStr(self,\n mnemonic: Union[str, Bip39Mnemonic]) -> str:\n \"\"\" Decode a mnemonic phrase to its mnemonic binary string by verifying the checksum.\n\n Args:\n mnemonic (str or Bip39Mnemonic object): Mnemonic\n\n Returns:\n str: Mnemonic binary string\n\n Raises:\n Bip39ChecksumError: If checksum is not valid\n ValueError: If mnemonic is not valid\n \"\"\"\n if isinstance(mnemonic, str):\n mnemonic = Bip39Mnemonic.FromString(mnemonic)\n\n # Check mnemonic length\n if mnemonic.WordsCount() not in Bip39MnemonicConst.MNEMONIC_WORD_LEN:\n raise ValueError(\"Mnemonic words count is not valid (%d)\" % mnemonic.WordsCount())\n\n # Detect language if it was not specified at construction\n words_list = (_Bip39WordsListFinder.FindLanguage(mnemonic)\n if self.m_words_list is None\n else self.m_words_list)\n\n # Get back mnemonic binary string\n mnemonic_bin_str = self.__MnemonicToBinaryStr(mnemonic, words_list)\n\n # Verify checksum\n checksum_bin_str = mnemonic_bin_str[-self.__GetChecksumLen(mnemonic_bin_str):]\n comp_checksum_bin_str = self.__ComputeChecksumBinaryStr(mnemonic_bin_str)\n\n if checksum_bin_str != comp_checksum_bin_str:\n raise Bip39ChecksumError(\"Invalid checksum (expected %s, got %s)\" %\n (checksum_bin_str, comp_checksum_bin_str))\n\n return mnemonic_bin_str\n\n def __ComputeChecksumBinaryStr(self,\n mnemonic_bin_str: str) -> str:\n \"\"\" Compute checksum from mnemonic binary string.\n\n Args:\n mnemonic_bin_str (str): Mnemonic binary string\n\n Returns:\n str: Computed checksum binary string\n \"\"\"\n\n # Get entropy bytes\n entropy_bytes = self.__EntropyBytesFromBinaryStr(mnemonic_bin_str)\n # Convert entropy hash to binary string\n entropy_hash_bin_str = ConvUtils.BytesToBinaryStr(CryptoUtils.Sha256(entropy_bytes),\n CryptoUtils.Sha256DigestSize() * 8)\n\n # Return checksum\n return entropy_hash_bin_str[:self.__GetChecksumLen(mnemonic_bin_str)]\n\n def __EntropyBytesFromBinaryStr(self,\n mnemonic_bin_str: str) -> bytes:\n \"\"\" Get entropy bytes from mnemonic binary string.\n\n Args:\n mnemonic_bin_str (str): Mnemonic binary string\n\n Returns:\n bytes: Entropy bytes\n \"\"\"\n\n # Get checksum length\n checksum_len = self.__GetChecksumLen(mnemonic_bin_str)\n # Get back entropy binary string\n entropy_bin_str = mnemonic_bin_str[:-checksum_len]\n\n # Get entropy bytes from binary string\n return ConvUtils.BinaryStrToBytes(entropy_bin_str, checksum_len * 8)\n\n @staticmethod\n def __MnemonicToBinaryStr(mnemonic: Bip39Mnemonic,\n words_list: _Bip39WordsList) -> str:\n \"\"\" Get mnemonic binary string from mnemonic phrase.\n\n Args:\n mnemonic (Bip39Mnemonic object) : Mnemonic object\n words_list (Bip39WordsList object): Words list\n\n Returns:\n str: Mnemonic binary string\n\n Raises:\n ValueError: If the one of the mnemonic word is not valid\n \"\"\"\n\n # Convert each word to its index in binary format\n mnemonic_bin_str = map(lambda word: ConvUtils.IntegerToBinaryStr(words_list.GetWordIdx(word),\n Bip39MnemonicConst.WORD_BIT_LEN),\n mnemonic.ToList())\n\n return \"\".join(mnemonic_bin_str)\n\n @staticmethod\n def __GetChecksumLen(mnemonic_bin_str: str) -> int:\n \"\"\" Get checksum length from mnemonic binary string.\n\n Args:\n mnemonic_bin_str (str): Mnemonic binary string\n\n Returns:\n int: Checksum length\n \"\"\"\n return len(mnemonic_bin_str) // 33\n","sub_path":"bip_utils/bip39/bip39_mnemonic.py","file_name":"bip39_mnemonic.py","file_ext":"py","file_size_in_byte":19607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"520372480","text":"import json\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.response import Response\nfrom invoices.mixins import Paginate\nfrom invoices.models import Invoice, Item\nfrom invoices.serializers import InvoiceSerializer, ItemSerializer\n\n\nclass InvoiceAPI(LoginRequiredMixin, ViewSet):\n \"\"\" Endpoint that manages invoice detail data\n \"\"\"\n serializer_class = InvoiceSerializer\n\n def detail(self, *args, **kwargs):\n serializer = self.serializer_class(get_object_or_404(\n self.serializer_class.Meta.model, id=kwargs.get('inv_id')))\n\n return Response(serializer.data, status=200)\n\n def update(self, *args, **kwargs):\n invoice = get_object_or_404(Invoice, id=kwargs.get('inv_id'))\n serializer = self.serializer_class(data=self.request.data, instance=invoice)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data, status=200)\n\n def delete(self, *args, **kwargs):\n invoice = get_object_or_404(Invoice, id=kwargs.get('inv_id'))\n invoice.delete()\n\n return Response(status=200)\n\n\nclass InvoicesAPI(LoginRequiredMixin, Paginate ,ViewSet):\n \"\"\" Endpoint that manages invoice data\n \"\"\"\n serializer_class = InvoiceSerializer\n\n def list(self, *args, **kwargs):\n invoices = self.serializer_class.Meta.model.objects.all()\n data = self.paginate(invoices)\n\n return Response(data, status=200)\n\n def create(self, *args, **kwargs):\n serializer = self.serializer_class(data=self.request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data, status=201)\n\n def terms(self, *args, **kwargs):\n terms = json.dumps(self.serializer_class.Meta.model.TERMS)\n\n return Response(json.loads(terms))\n\n def latest(self, *args, **kwargs):\n serializer = self.serializer_class(\n self.serializer_class.Meta.model.objects.order_by('date_created').last())\n\n return Response(serializer.data)\n\n\nclass ItemsAPI(LoginRequiredMixin, ViewSet):\n \"\"\" Add item in invoice\n \"\"\"\n serializer_class = ItemSerializer\n\n def create(self, *args, **kwargs):\n serializer = self.serializer_class(data=self.request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data, status=201)\n\n\nclass ItemAPI(LoginRequiredMixin, ViewSet):\n \"\"\" Detail of item\n \"\"\"\n serializer_class = ItemSerializer\n\n def update(self, *args, **kwargs):\n item = get_object_or_404(Item, id=kwargs.get('item_id'))\n serializer = self.serializer_class(data=self.request.data, instance=item)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data, status=200)\n","sub_path":"invoices/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"600173985","text":"import sys\n\nif sys.version_info >= (3, 8):\n from unittest.mock import AsyncMock\nelse:\n from asynctest.mock import CoroutineMock as AsyncMock\n\nfrom unittest.mock import Mock, call, sentinel\n\nimport pytest\nfrom pytest import raises\n\nfrom jj.matchers import AnyMatcher, LogicalMatcher, ResolvableMatcher\n\nfrom ..._test_utils.fixtures import request_, resolver_\nfrom ..._test_utils.steps import given, then, when\n\n__all__ = (\"request_\", \"resolver_\",)\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize((\"ret_val\", \"res\"), [\n (True, True),\n (False, False),\n])\nasync def test_single_submatcher(ret_val, res, *, resolver_, request_):\n with given:\n submatcher_ = Mock(ResolvableMatcher, match=AsyncMock(return_value=ret_val))\n matcher = AnyMatcher([submatcher_], resolver=resolver_)\n\n with when:\n actual = await matcher.match(request_)\n\n with then:\n assert actual is res\n assert submatcher_.mock_calls == [call.match(request_)]\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize((\"ret_val1\", \"ret_val2\", \"res\"), [\n (True, True, True),\n (True, False, True),\n])\nasync def test_multiple_truthy_submatchers(ret_val1, ret_val2, res, *, resolver_, request_):\n with given:\n submatcher1_ = Mock(ResolvableMatcher, match=AsyncMock(return_value=ret_val1))\n submatcher2_ = Mock(ResolvableMatcher, match=AsyncMock(return_value=ret_val2))\n matcher = AnyMatcher([submatcher1_, submatcher2_], resolver=resolver_)\n\n with when:\n actual = await matcher.match(request_)\n\n with then:\n assert actual is res\n assert submatcher1_.mock_calls == [call.match(request_)]\n assert submatcher2_.mock_calls == []\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize((\"ret_val1\", \"ret_val2\", \"res\"), [\n (False, True, True),\n (False, False, False),\n])\nasync def test_multiple_false_submatchers(ret_val1, ret_val2, res, *, resolver_, request_):\n with given:\n submatcher1_ = Mock(ResolvableMatcher, match=AsyncMock(return_value=ret_val1))\n submatcher2_ = Mock(ResolvableMatcher, match=AsyncMock(return_value=ret_val2))\n matcher = AnyMatcher([submatcher1_, submatcher2_], resolver=resolver_)\n\n with when:\n actual = await matcher.match(request_)\n\n with then:\n assert actual is res\n assert submatcher1_.mock_calls == [call.match(request_)]\n assert submatcher2_.mock_calls == [call.match(request_)]\n\n\ndef test_empty_submatchers_raises_exception(*, resolver_):\n with when, raises(Exception) as exception:\n AnyMatcher([], resolver=resolver_)\n\n with then:\n assert exception.type is AssertionError\n\n\ndef test_is_instance_of_logical_matcher(*, resolver_):\n with given:\n submatcher_ = Mock(ResolvableMatcher)\n matcher = AnyMatcher([submatcher_], resolver=resolver_)\n\n with when:\n actual = isinstance(matcher, LogicalMatcher)\n\n with then:\n assert actual is True\n\n\ndef test_repr(*, resolver_):\n with given:\n resolver_.__repr__ = Mock(return_value=\"\")\n matcher = AnyMatcher(resolver=resolver_, matchers=[\n Mock(ResolvableMatcher, __repr__=Mock(return_value=\"\")),\n Mock(ResolvableMatcher, __repr__=Mock(return_value=\"\")),\n ])\n\n with when:\n actual = repr(matcher)\n\n with then:\n assert actual == \"AnyMatcher([, ], resolver=)\"\n\n\ndef test_pack(*, resolver_):\n with given:\n submatchers = [Mock(ResolvableMatcher), Mock(ResolvableMatcher)]\n matcher = AnyMatcher(submatchers, resolver=resolver_)\n\n with when:\n actual = matcher.__packed__()\n\n with then:\n assert actual == {\"matchers\": submatchers}\n\n\ndef test_unpack(*, resolver_):\n with given:\n submatchers = [Mock(ResolvableMatcher), Mock(ResolvableMatcher)]\n kwargs = {\n \"matchers\": submatchers,\n \"resolver\": resolver_,\n \"future_field\": sentinel,\n }\n\n with when:\n actual = AnyMatcher.__unpacked__(**kwargs)\n\n with then:\n assert isinstance(actual, AnyMatcher)\n","sub_path":"tests/matchers/logical_matchers/test_logical_any_matcher.py","file_name":"test_logical_any_matcher.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"407031794","text":"\"\"\"Original processMiner.py from client\n\"\"\"\nimport datetime\nimport pygraphviz as pgv\nimport xml.etree.ElementTree as ET\n\ntree = ET.parse('repairExample.xes')\nroot = tree.getroot()\nlog = dict()\n\n\nns = {'xes': 'http://www.xes-standard.org/'}\n\nfor trace in root.findall('xes:trace', ns):\n caseid = ''\n for string in trace.findall('xes:string', ns):\n if string.attrib['key'] == 'concept:name':\n caseid = string.attrib['value']\n for event in trace.findall('xes:event', ns):\n task = ''\n user = ''\n event_type = ''\n for string in event.findall('xes:string', ns):\n if string.attrib['key'] == 'concept:name':\n task = string.attrib['value']\n if string.attrib['key'] == 'org:resource':\n user = string.attrib['value']\n if string.attrib['key'] == 'lifecycle:transition':\n event_type = string.attrib['value']\n timestamp = ''\n for date in event.findall('xes:date', ns):\n if date.attrib['key'] == 'time:timestamp':\n timestamp = date.attrib['value']\n timestamp = datetime.datetime.strptime(\n timestamp[:-10], '%Y-%m-%dT%H:%M:%S'\n )\n print(caseid, '|', task, '|', user, '|', timestamp)\n if caseid not in log:\n log[caseid] = []\n event = (task, user, timestamp)\n log[caseid].append(event)\n\n\nF = dict()\nfor caseid in log:\n for i in range(0, len(log[caseid]) - 1):\n ai = log[caseid][i][0]\n aj = log[caseid][i+1][0]\n if ai not in F:\n F[ai] = dict()\n if aj not in F[ai]:\n F[ai][aj] = 0\n F[ai][aj] += 1\n\nA = dict()\nfor caseid in log:\n for i in range(0, len(log[caseid])):\n ai = log[caseid][i][0]\n if ai not in A:\n A[ai] = 0\n A[ai] += 1\n\nG = pgv.AGraph(strict=False, directed=True)\nG.graph_attr['rankdir'] = 'LR'\nG.node_attr['shape'] = 'box'\n\nx_min = min(A.values())\nx_max = max(A.values())\nfor ai in A:\n text = ai + '\\n' + str(A[ai]) + ')'\n gray = int(float(x_max - A[ai]) / float(x_max - x_min) * 100.00)\n fill = 'gray' + str(gray)\n font = 'black'\n if gray < 50:\n font = 'white'\n G.add_node(ai, label=text, style='filled', fillcolor=fill, fontcolor=font)\n\nvalues = [F[ai][aj] for ai in F for aj in F[ai]]\nx_min = min(values)\nx_max = max(values)\ny_min = 1.0\ny_max = 5.0\n\nfor ai in F:\n for aj in F[ai]:\n x = F[ai][aj]\n y = y_min + (y_max - y_min) * float(x - x_min)/float(x_max - x_min)\n G.add_edge(ai, aj, label=x, penwidth=y)\n\nG.draw('graph.png', prog='dot')\n# processMiner.py\n# Displaying processMiner.py.\n","sub_path":"src/au/edu/qut/processmining/python/processMiner.py","file_name":"processMiner.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"223390777","text":"\"\"\"\nAuthor: poiroot\n\"\"\"\n\nimport logging\nfrom datetime import datetime, timedelta\nfrom collections import defaultdict\nfrom itertools import combinations\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error\n\nfrom .metrics import Metric, getModelCoefs\nfrom ..utils.load import load_data, load_model\nfrom xgboost import XGBRegressor\nfrom keras.models import Sequential\nfrom sklearn.externals import joblib\n\nlogger = logging.getLogger(__name__)\n\nclass FortuneTeller:\n \"\"\"\n The predict system\n\n Parameters\n ----------\n X : pandas DataFrame\n The forecast data\n\n name : string\n The config target name\n\n length : int\n The row number of history data\n\n mean : pandas DataFrame, shape [X.shape[1]]\n The mean of history data, i.e. the data seen by models\n\n std : pandas DataFrame, shape [X.shape[1]]\n The standard deviation of history data\n\n cov : pandas DataFrame, shape [X.shape[1], X.shape[1]]\n The covariance matrix of hostory data\n\n models :\n The models created by sklearn\n\n Attributes\n ----------\n r2 : float\n The maximum R^2 of models\n\n RI : float\n The confidence of predict value by system\n\n MD : float\n The mahalannobis distance of X\n\n DQIX : float\n The data quality of X computed by multivariate normal distribution\n\n ZScore : pandas DataFrame, shape [X.shape[1]]\n The z-score of X\n\n predicts : dict, shape [length of models], key : model, value : float\n The predictive values by models\n\n predict :float\n The predictive value by weighted average of models\n\n coefs : dict, shape [length of models], key : model, value : float\n The coefficients of each algorithms\n\n intercept : float\n The intercept of linear regression model\n\n error : float\n The mean absolute error of the linear regression model\n\n algo_name : list\n The algorithm name in config \"algo_name\"\n\n mae : float\n The mean absolute error of models\n\n threshold: float, optional, default 0\n Get predict values whose confidence index greater than threshold\n\n time : datetime object\n The predict time i.e. get feature time\n \"\"\"\n\n def __init__(self, name, data_manager, sql):\n\n self.time = None\n self.X = None\n self.data_manager = data_manager\n self.config = data_manager.model_resource[name]\n self.name = name\n self.sql = sql\n self.length = self.config[\"train\"].data.shape[0]\n self.mean = self.config[\"mean\"]\n self.std = self.config[\"std\"]\n self.cov = self.config[\"cov\"]\n self.models = self.config[\"models\"]\n self.r2 = max(self.config[\"algo_r2\"])\n self.RI = 0\n self.MD = 0\n self.DQIX = 0\n self.ZScore = 0\n self.predicts = dict()\n self.predict = 0\n self.coefs = dict()\n self.intercept = 0\n self.error = 0\n self.algo_name = [name.split(\".\")[0] for name in self.config['algo_name']]\n for name in self.algo_name:\n self.coefs[name] = 1 / len(self.models)\n self.mae = 0\n self.threshold = self.config[\"threshold\"]\n self.start_time = None\n self.target_data = None\n self.metric = Metric(self.config[\"train\"].data, self.r2, self.config[\"confidence\"])\n self.contribute = list()\n self.error_method = \"mse\"\n logger.info(\"Creating as instance of FortuneTeller\")\n\n def tell(self, time):\n \"\"\"\n The main method of system\n \"\"\"\n self.start_time = self.start_time - timedelta(hours = 8)\n #為了能夠往回撈到lims的資料,多回推8小時\n \n \n X = self.data_manager.get_feature(self.name, time)\n self.time = time\n self.X = X\n self.MD, self.contribute = self.metric.mahalanobis(X=X, mean=self.mean, cov=self.cov, length=self.length)\n if self.config[\"confidence\"] is True:\n self.DQIX = self.metric.quality(X=X, mean=self.mean, cov=self.cov, log=True)\n self.RI = self.metric.ri(forecast_DQIX=self.DQIX)\n logger.info(\"Compute indicator score\")\n\n for name, model in zip(self.algo_name, self.models):\n if type(model) == type(XGBRegressor()):\n #model.get_booster().feature_names = list(X.columns)\n X = X.reindex(columns = model.get_booster().feature_names)\n if type(model) == type(Sequential()):\n X = X.values.reshape(1,4,12)\n print(X)\n \n predict = model.predict(X)\n print(\"scaled predict value:\", predict) \n \n if type(model) == type(Sequential()):\n y_scaler = joblib.load(\"prep/TRL_Y_scaler_0719.sav\")\n predict = y_scaler.inverse_transform(predict)\n \n print(\"your predict value :\",predict)\n \n while True:\n if len(predict.shape) > 1:\n predict = predict[0]\n else:\n break\n self.predicts[name] = float(predict[0])\n logger.info(\"Predict\")\n\n end_time = time\n logger.debug(\"target_data time: {0}~{1}\".format(self.start_time, end_time))\n self.target_data = self.data_manager.get_target(self.name, self.start_time, end_time, [\"SAMPLED_DATE\", \"RESULT_VALUE\"])\n logger.info(\"Get target data\")\n logger.debug(\"target_data length: {0}\".format(self.target_data.shape[0]))\n\n self.coefs = self._updateModelCoefs(self.coefs)\n if self.config['revise'] is True:\n self._revise()\n\n if self.target_data.shape[0] >= self.config['revise_sample_times']:\n self.start_time = self.target_data['time'][1]\n\n self.predict = 0\n for name in self.algo_name:\n self.predict += self.coefs[name] * self.predicts[name]\n self.predict += self.intercept\n\n self.mae = self._calculateMAE()\n\n def _updateModelCoefs(self, weights):\n #print(\"START UPDATE COEF\")\n #print(\"DATA SHAPE:\", self.target_data.shape[0])\n if self.target_data.shape[0] < self.config[\"revise_sample_times\"]:\n return weights\n \n \n error = dict()\n value = defaultdict(list)\n for name in self.algo_name:\n self.drop_index = list()\n for index in self.target_data.index:\n row = self.target_data.loc[index]\n predict_value = self._getPredictValue(row[\"time\"],\n name,\n self.config[\"revise_minutes_low\"],\n self.config[\"revise_minutes_high\"])\n if np.isinf(predict_value):\n #print(\"np.isinf = T :\",predict_value)\n self.drop_index.append(index)\n continue\n value[name].append(predict_value)\n #print(self.target_data)\n #print(\"DROP INDEX:\", self.drop_index)\n \n true_value = self.target_data[\"value\"].copy()\n true_value.drop(self.drop_index, inplace=True)\n #print(\"true_value :\",true_value)\n #print(\"predict value :\",value[name])\n error[name] = self._calculateError(true_value,\n value[name],\n method=self.error_method)\n #print(\"PREDICT ERROR :\",error)\n \n if len(weights) == 1:\n return weights\n if sum(error.values()) == 0:\n return weights\n new_weights = getModelCoefs(error)\n #print(\"new_weights : \",new_weights )\n #print(\"END UPDATE COEF\")\n return new_weights\n\n def _revise(self):\n \"\"\"\n The method that revise the predict value by simple math\n\n Should check quality of y ??\n According LIMS time to get predict value then revise\n Because predict value is immediate\n\n \"\"\"\n\n if self.target_data.shape[0] < self.config[\"revise_sample_times\"]:\n return\n\n value = list()\n for index in self.target_data.index:\n row = self.target_data.loc[index]\n value.append(self._getPredictValue(row[\"time\"],\n self.algo_name,\n self.config[\"revise_minutes_low\"],\n self.config[\"revise_minutes_high\"]))\n predict = list()\n for part in value:\n num = 0\n for i, algo in enumerate(self.algo_name):\n if not np.isinf(part[i]):\n num += self.coefs[algo] * part[i]\n if num != 0:\n predict.append(num)\n true_value = self.target_data[\"value\"].copy()\n true_value.drop(self.drop_index, inplace=True)\n self.error = self._calculateError(true_value, predict, method=\"mae\")\n\n try:\n self.intercept = sum(true_value - predict) / len(predict)\n except ZeroDivisionError:\n self.intercept = 0\n logger.info(\"Revise\")\n\n def _getPredictValue(self, lims_time, name=None, start_time=-30, end_time=0):\n \"\"\"\n According the LIMS time and the time interval to get the mean of predictive value\n Should delete extreme predictive values??\n\n Parameters\n ----------\n lims_time : datetime\n The LIMS time\n\n name : string or list\n The algorithm name\n\n start_time : int, optional, default -30\n The start time of the time interval according LIMS time\n i.e. start time of the time interval = LIMS time + start_time\n\n end_time : int, optional, default 0\n The end time of the time interval according LIMS time\n i.e. end time of the time interval = LIMS time + end_time\n\n Returns\n -------\n value : float or list\n The mean of predictive values in the time interval\n \"\"\"\n start = lims_time + timedelta(minutes=start_time)\n end = lims_time + timedelta(minutes=end_time)\n\n # get RTPMS data and mean it\n predict_df = self.sql.get_model_result(self.name, start, end)\n try:\n predict_df = predict_df[predict_df[\"conf_idx\"] > self.threshold]\n if predict_df.shape[0] == 0:\n logger.debug(\"There is no {0} data's confidence index greater than threshold, Time: {1} ~ {2}\".format(name, start, end))\n if type(name) is list:\n return [np.inf for i in name]\n return np.inf\n if name is None:\n value = np.mean(predict_df[\"predict\"])\n elif type(name) is list:\n value = list()\n for n in name:\n value.append(np.mean(predict_df[\"predict_{0}\".format(n)]))\n else:\n value = np.mean(predict_df[\"predict_{0}\".format(name)])\n except KeyError:\n # There is no data in the database\n logger.error(\"No {0} data in database, Time: {1} ~ {2}\".format(name, start, end))\n if type(name) is list:\n return [np.inf for i in name]\n return np.inf\n return value\n\n def _calculateError(self, true, predict, method=\"mse\"):\n \"\"\"\n Calculate error between true values and predictive values\n\n Parameters\n ----------\n true : list\n The true values\n\n predict : list\n The predictive values\n\n method : string, optional, default \"mse\"\n The method to calculate error\n \"mse\" : mean square error\n \"mae\" : mean absolute error\n \"r2\" : 1 - (R squared)\n \"\"\"\n methods = set([\"mse\", \"mae\", \"r2\"])\n if method not in methods:\n raise ValueError(\"The method is invalid\")\n if len(true) == 0 or len(predict) == 0:\n return 0\n\n if method == \"mse\":\n error = mean_squared_error(true, predict)\n elif method == \"mae\":\n error = mean_absolute_error(true, predict)\n elif method == \"r2\":\n error = 1 - r2_score(true, predict)\n\n return error\n\n def setStartTime(self, time):\n self.start_time = time\n\n def _calculateMAE(self):\n \"\"\"\n Calculating MAE bases on the linear regression model and model difference\n \"\"\"\n mae = self.error\n model_diff_mae = list()\n\n for indices in combinations(range(len(self.algo_name)), 2):\n diff = abs(self.predicts[self.algo_name[indices[0]]] - self.predicts[self.algo_name[indices[1]]])\n coefs = [abs(self.coefs[self.algo_name[indices[i]]]) for i in range(2)]\n ratio = min(coefs) / max(coefs)\n if ratio is np.nan:\n continue\n model_diff_mae.append(ratio * diff)\n try:\n mae += max(model_diff_mae)\n except ValueError:\n pass\n\n return mae\n\n def initialModelCoefs(self):\n time = datetime.now()\n logger.info(\"Intialize model coefs start\")\n if self.config[\"target_source\"] == \"rtpms\":\n self.target_data = self.data_manager.get_target(self.name, time - timedelta(seconds=5*self.config[\"predict_sleep_seconds\"]), time, [\"SAMPLED_DATE\", \"RESULT_VALUE\"])\n else:\n self.target_data = self.data_manager.get_target(self.name, time - timedelta(days=1), time, [\"SAMPLED_DATE\", \"RESULT_VALUE\"])\n error = dict()\n value = defaultdict(list)\n drop_index = list()\n for index in self.target_data.index:\n row = self.target_data.loc[index]\n base_time = row[\"time\"]\n times = [base_time + timedelta(minutes=i) for i in range(self.config[\"revise_minutes_low\"],\n self.config[\"revise_minutes_high\"],\n self.config[\"predict_sleep_seconds\"] // 60)]\n predicts = defaultdict(list)\n for t in times:\n if t > time:\n break\n feature = self.data_manager.get_feature(self.name, t)\n for name, model in zip(self.algo_name, self.models):\n predict = model.predict(feature)\n while True:\n if len(predict.shape) > 1:\n predict = predict[0]\n else:\n break\n if self.config[\"confidence\"] is True:\n self.DQIX = self.metric.quality(X=feature, mean=self.mean, cov=self.cov, log=True)\n self.RI = self.metric.ri(forecast_DQIX=self.DQIX)\n if self.RI > self.threshold:\n predicts[name].append(predict[0])\n else:\n predicts[name].append(predict[0])\n if len(predicts.keys()) == 0:\n drop_index.append(index)\n for name in predicts.keys():\n if len(predicts[name]) == 0:\n drop_index.append(index)\n break\n value[name].append(np.mean(predicts[name]))\n true_value = self.target_data[\"value\"].copy()\n true_value.drop(drop_index, inplace=True)\n for name in self.algo_name:\n error[name] = self._calculateError(true_value,\n value[name],\n method=self.error_method)\n weights = dict()\n total_error = sum(error.values())\n if total_error == 0:\n logger.info(\"Initialize model coefs and intercept done by total error is zero\")\n return self.coefs, 0\n if len(self.algo_name) == 1:\n name = self.algo_name[0]\n weights[name] = 1\n else:\n weights = getModelCoefs(error)\n logger.info(\"Initialize model coefs done\")\n\n if true_value.shape[0] < self.config[\"revise_sample_times\"]:\n return weights, 0\n\n predict = list()\n for i in range(len(true_value)):\n num = 0\n for key in value:\n num += weights[key] * value[key][i]\n predict.append(num)\n\n if self.config[\"revise\"]:\n try:\n intercept = sum(true_value - predict) / len(predict)\n except ZeroDivisionError:\n intercept = 0\n else:\n intercept = 0\n logger.info(\"Initialize intercept done\")\n return weights, intercept\n\n","sub_path":"pycode/tellers/.ipynb_checkpoints/ft-checkpoint.py","file_name":"ft-checkpoint.py","file_ext":"py","file_size_in_byte":16930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"132121488","text":"'''\nTrevor Urbshas\nMr. Reid\nICS 4U Area of a Triangle\nOctober 3, 2014\nOctober 3, 2014\nCalculates the area of a triangle using Heron's formula and trigonometry.\n'''\nfrom math import sqrt,sin,cos,radians\n\nprint('This program will calculate the area of a triangle using Heron\\'s formula and trigonometry')\n\ndef checkNum(message): #Create a function to check if inputs are numbers\n while True:\n temp = input(message) #Getting user input. Name is irrelevant because variable is global\n try:\n float(temp)\n except:\n print('Invalid input.')\n else:\n if float(temp) > 0.0:\n return float(temp)\n else:\n print('That is not a valid input.')\n\nwhile True:\n a = checkNum('What is the length of side a of the triangle? ')\n b = checkNum('What is the length of side b of the triangle? ')\n angleC = radians(checkNum('What is the value of angle C(in degrees)? '))\n c = sqrt(a**2 + b**2 - 2*a*b*(cos(angleC))) #Don't need to ask for length of c because it can be calculated using a,b, and angleC\n s = (a + b + c)/2 #s is half the perimeter of the triangle\n\n heronsArea = sqrt(s*(s-a)*(s-b)*(s-c)) #Area of the triangle using Heron's formula\n trigArea = (a*b*abs(sin(angleC)))/2 #Area of the triangle using trigonometry\n\n print('The area of the triangle using Heron\\'s formula is ',heronsArea,'.',sep='')\n print('The area of the triangle using trigonometry is ',trigArea,'.',sep='')\n\n close = input('Do you want to exit the program?(y or Y to close) ')\n if close == 'y' or close == 'Y':\n break\n","sub_path":"ICS4U - TUrbshas/Unit 1/U1A1Pr4(Area of a Triangle).py","file_name":"U1A1Pr4(Area of a Triangle).py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"385934502","text":"import sys\nimport time\n\n\nimport tensorflow as tf\n\nfrom networkcore import networkcore\n\n\n\n# Main class\nclass uresnetcore(networkcore):\n '''Define a network model and run training\n\n U resnet implementation\n '''\n def __init__(self):\n '''initialization\n\n Requires a list of parameters as python dictionary\n\n Arguments:\n params {dict} -- Network parameters\n\n Raises:\n ConfigurationException -- Missing a required parameter\n '''\n\n # Call the base class to initialize _core_network_params:\n super(uresnetcore,self).__init__()\n\n # Extend the parameters to include the needed ones:\n\n self._core_network_params += [\n 'BALANCE_LOSS',\n 'NUM_LABELS',\n ]\n\n return\n\n\n def _initialize_input(self, dims):\n '''Initialize input parameters of the network. Must return a dict type\n\n For example, paremeters of the dict can be 'image', 'label', 'weights', etc\n\n Arguments:\n dims {[type]} -- [description]\n\n Keyword Arguments:\n label_dims {[type]} -- [description] (default: {None})\n\n Raises:\n NotImplementedError -- [description]\n '''\n\n inputs = dict()\n\n inputs.update({\n 'image' : tf.placeholder(tf.float32, dims['image'], name=\"input_image\")\n })\n\n\n if 'label' in dims:\n inputs['label'] = tf.placeholder(tf.int64, dims['label'], name=\"label\")\n\n if self._params['BALANCE_LOSS']:\n if 'weight' in dims:\n inputs['weight'] = tf.placeholder(tf.float32, dims['weight'], name='weight')\n else:\n raise Exception(\"Weights requested for loss balance, but dimension not provided.\")\n\n return inputs\n","sub_path":"networks/uresnetcore.py","file_name":"uresnetcore.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"486681277","text":"import SWiMwB as FD\n\nfd = FD.Face_detector() # obiekt detektora\nfd.get_photo() # pobieranie zdjecia z kamerki\nfd.detect_face() # wykrywanie twarzy\nprint(fd.face)\nif fd.face is not None:\n fr = FD.Face_recognitor('lbph')\n\n #fr.read_model('models/lbph/model_BioID_LBPH-2020-06-13_14-32-22.xml','models/lbph/subjects_BioID_LBPH-2020-06-13_14-32-22.csv')\n fr.train_model()\n (how_much, label) = fr.predict(fd.face)\n print('wykryto: ' + label)\n\nelse:\n print('nie wykryto twarzy!')\n\nprint('done!')","sub_path":"program as scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"589227324","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 28 15:35:32 2018\n\n@author: lukas\n\"\"\"\n\nimport nibabel as nib\nimport numpy as np\n\nnii = nib.load('/home/lukas/Documents/projects/strokeHeads/processed/logTPM/TPM_padded.nii')\nd = np.array(nii.get_data(),dtype='float32')\nd.dtype\nnp.min(d[d>0])\n\nd[d<1e-9] = 1e-9 # Values smaller than e-10 start generating too much floating point artifacts.\nnp.sum(d==0)\nnp.min(d)\ndlog = np.log(d)\n\nimg = nib.Nifti1Image(dlog, nii.affine)\nnib.save(img, '/home/lukas/Documents/projects/strokeHeads/processed/logTPM/logTPM.nii')\n","sub_path":"scripts/log_TPM.py","file_name":"log_TPM.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"399231573","text":"import random\nimport string\nfrom django.db import transaction\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom haikunator import Haikunator\nfrom .models import Room, Member, MemberAccept\nfrom django.views.decorators.cache import never_cache\nfrom django.shortcuts import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.contrib.auth.models import User\nfrom django.views.generic import TemplateView\nfrom .forms import FriendForm, LabelForm\nfrom .utils import get_room_or_error, catch_client_error\nfrom django.core.urlresolvers import reverse\n\n\nhaikunator = Haikunator()\n\n@never_cache\n@login_required\ndef about(request):\n return render(request, \"chat/about.html\")\n'''\n@never_cache\n@login_required\ndef new_room(request, *args,**kwargs):\n \"\"\"\n Randomly create a new room, and redirect to it.\n \"\"\"\n\n new_room = None\n while not new_room:\n with transaction.atomic():\n label = haikunator.haikunate()\n if Room.objects.filter(label=label).exists():\n continue\n new_room = Room.objects.create(label=label, current_user_id=request.user.id, private = False)\n\n return redirect(\"chat_index:chat_room\", pk = new_room.pk)\n #return HttpResponseRedirect(reverse(\"chat_index:chat_room\", pk = new_room.pk))\n'''\n\nfrom django.core.exceptions import PermissionDenied\n\n@never_cache\n@login_required\n#@catch_client_error\ndef chat_room(request, pk, *args,**kwargs):\n \"\"\"\n Room view - show the room, with latest messages.\n\n The template for this view has the WebSocket business to send and stream\n messages, so see the template for where the magic happens.\n \"\"\"\n\n\n # If the room with the given label doesn't exist, automatically create it\n # upon first visit (a la etherpad).\n #room = get_object_or_404(Room, pk=pk)\n room = get_object_or_404(Room, pk=pk)\n access = False\n if room.private == True:\n memberaccepter = MemberAccept.objects.filter(acceptroom_id=pk, accepter_id = request.user.id, agree = True)\n if not memberaccepter:\n if request.user.id != room.current_user_id:\n access = False\n else:\n access = True\n else:\n access = True\n elif room.private == False:\n access = True\n else:\n access = False\n\n if access != True:\n #need to output with error\n raise PermissionDenied\n #return redirect('chat_index:chat_list_rooms') \n \n else:\n #rooms = get_room_or_error(pk, request.user)\n #room = Room.objects.get(pk=rooms.pk)\n\n # We want to show the last 50 messages, ordered most-recent-last\n messages = room.messages.order_by('-timestamp')[:10]\n return render(request, \"chat/room.html\", {\n 'room': room,\n 'messages': messages,\n })\n\nfrom django.core.paginator import Paginator, InvalidPage\nfrom el_pagination.decorators import page_template\n\n@never_cache\n@login_required\n@page_template('chat/list_rooms_page.html')\ndef chat_list_rooms(request, template = 'chat/list_rooms.html',\n extra_context=None):\n #template = 'chat/list_rooms.html'\n #page_template = 'chat/list_rooms_page.html'\n\n context = {\n 'object_list': Room.objects.all(),\n \n }\n if extra_context is not None:\n context.update(extra_context)\n\n return render(\n request, template, context)\n \n\n\n\n\n'''\n@never_cache\n@login_required\ndef priv_room(request):\n users = User.objects.order_by('id')\n\n return render(request, \"chat/priv_room.html\",{\n \"users\": users,\n }) \n'''\n\nclass FriendView(TemplateView):\n template_name = \"chat/priv_room.html\"\n\n\n def get(self, request, pk, *args, **kwargs):\n \n room = get_object_or_404(Room, pk=pk)\n #room = Room.objects.get(pk=pk)\n if request.user.id == room.current_user_id:\n form = FriendForm()\n users = User.objects.exclude(id=request.user.id)\n\n query = request.GET.get(\"q\")\n\n true_members=[]\n memberlist = None \n \n\n if query:\n queryset_list = User.objects.filter(username=query)\n\n else:\n queryset_list = None\n\n #your followers\n #drugs = room.who_added_user(request.user)\n\n #member, created = Member.objects.get_or_create(current_user=request.user, pk=pk)\n #members = member.users.all()\n members = MemberAccept.objects.filter(acceptroom_id=pk, agree=False)\n membersinroom = MemberAccept.objects.filter(acceptroom_id=pk, agree=True)\n\n if queryset_list:\n memberlist = MemberAccept.objects.filter(acceptroom_id=pk, accepter = queryset_list)\n \n \n '''\n for member in members:\n memberlist.append(member)\n \n for drug in drugs:\n \n if drug.pk == member.pk:\n true_members.append(drug)\n drugs.remove(drug)\n memberlist.remove(drug)\n '''\n \n\n\n context = {'form': form, 'users': users,\n 'members': members,\n 'membersinroom': membersinroom,\n #'drugs': drugs,\n 'object_list': queryset_list,\n #'true_members': true_members,\n 'memberlist': memberlist,\n 'room': room,\n }\n return render(request,self.template_name, context) \n else:\n return redirect('home')\n def post(self, request, *args, **kwargs):\n return HttpResponseRedirect(reverse('chat_index:select_room'))\n\n\n@never_cache\n@login_required\ndef change_members(request, pk, operation, pkid):\n \n member = User.objects.get(pk=pkid)\n label = get_object_or_404(Room, pk=pk)\n if operation == 'add':\n M = MemberAccept.objects.filter(accepter=member, acceptroom=label)\n if M:\n MemberAccept.objects.get(accepter=member,acceptroom=label)\n else:\n MemberAccept.objects.create(accepter=member,acceptroom=label,agree=False)\n return redirect('chat_index:private_room', pk = pk)\n elif operation == 'remove':\n #Member.remove_member(request.user, label, member)\n M = get_object_or_404(MemberAccept, accepter=member,acceptroom=label)\n M.delete()\n return redirect('chat_index:private_room', pk = pk)\n \n elif operation == 'delete':\n if label.current_user_id == request.user.id:\n M = get_object_or_404(Room, current_user=member,id=pk)\n M.delete()\n return HttpResponseRedirect(reverse('chat_index:select_room'))\n else:\n raise PermissionDenied\n return redirect('chat_index:private_room', pk = pk)\n\n\nfrom django.contrib import messages\n@never_cache\n@login_required\ndef get_name(request):\n #template_name = \"chat/title_room.html\"\n new_room = None\n form = LabelForm(request.POST or None)\n \n if request.method == 'POST':\n '''\n while not new_room:\n query = request.POST.get(\"label\")\n userid = request.user.id\n #checkmember = Member.objects.filter(current_user_id=userid)\n new_room = Member.objects.create(label=query, current_user_id=userid)\n '''\n\n #form = LabelForm(request.POST)\n if form.is_valid():\n query = request.POST.get(\"label\")\n\n #check checkbox\n userid = request.user.id\n private = request.POST.get(\"private_group\", False)\n if private:\n new_room = Room.objects.create(label=query, current_user_id=userid, private = True)\n return HttpResponseRedirect('/chat/private/%s/' % new_room.id)\n else:\n new_room = Room.objects.create(label=query, current_user_id=userid, private = False)\n #reverse for redirecting after POST\n return HttpResponseRedirect(reverse('chat_index:select_room'))\n\n else:\n render(request, 'chat/title_room.html', {'form': LabelForm()})\n #else:\n #form = LabelForm()\n # if a GET (or any other method) we'll create a blank form\n \n\n return render(request, 'chat/title_room.html', {'form': form})\n\n\n@never_cache\n@login_required\ndef select_room(request, *args,**kwargs):\n \n rooms = Room.objects.all()\n room_members = MemberAccept.objects.filter(accepter=request.user.id, agree=True)\n \n #userinrooms = Room.objects.filter(users__id = request.user.id)\n room_invites = MemberAccept.objects.filter(accepter=request.user.id, agree=False)\n\n room_owners = Room.objects.filter(current_user_id = request.user.id)\n general_rooms = Room.objects.filter(private = 0)\n #general_room_owners = general_rooms.filter(current_user_id = request.user.id)\n\n '''\n room_chats = []\n for room_member in room_members:\n for room_owner in room_owners:\n room_chats.append(room_member.acceptroom)\n if room_owner.label != room_member.acceptroom:\n room_chats.append(room_owner.label)\n '''\n\n \n return render(request, \"chat/select_room.html\", {\n 'rooms': rooms,\n 'room_members': room_members,\n #'userinrooms': userinrooms,\n 'room_invites': room_invites,\n 'room_owners': room_owners,\n 'general_rooms': general_rooms,\n #'general_room_owners': general_room_owners,\n \n })\n\n\n\n@never_cache\n@login_required\ndef accept_members(request, pk, operation, pkid):\n \n accepter = User.objects.get(pk=pkid)\n acceptroom = Room.objects.get(pk=pk)\n if operation == 'agree':\n if accepter.id == request.user.id:\n M = get_object_or_404(MemberAccept, accepter=accepter,acceptroom=acceptroom)\n M.agree = True\n M.save()\n return redirect('chat_index:select_room')\n else:\n raise PermissionDenied\n\n \n elif operation == 'disagree':\n if accepter.id == request.user.id:\n M = get_object_or_404(MemberAccept, accepter=accepter,acceptroom=acceptroom, agree = False)\n M.delete()\n return redirect('chat_index:select_room')\n else:\n raise PermissionDenied \n elif operation == 'leave':\n if accepter.id == request.user.id:\n M = get_object_or_404(MemberAccept, accepter=accepter,acceptroom=acceptroom, agree = True)\n M.delete()\n return redirect('chat_index:select_room')\n else:\n raise PermissionDenied \n return redirect('chat_index:select_room')\n","sub_path":"chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"326316955","text":"import tensorflow as tf\nimport argparse\n# import cv2\nfrom tensorflow.examples.tutorials.mnist.input_data import read_data_sets\n\n#tensorboard --logdir logs --port 6789\nclass Config:\n def __init__(self):\n self.lr = 0.001\n self.filters = 32\n self.batch_size = 64\n self.classes = 10\n self.eps = 1e-8\n\n self.epoches = 5\n self.name = 'p07'\n self.save_path = './models/{name}/{name}'.format(name = self.name)\n self.sample_path = './data/MNIST_data'\n self.logdir = './logs/{name}'.format(name = self.name)\n\n def from_cmd_line(self):\n attrs_dict = self._get_attrs()\n parse = argparse.ArgumentParser()\n for key, value in attrs_dict.items():\n parse.add_argument('--'+key, type = type(value), default=value, help = 'set %s' % key)\n a = parse.parse_args()\n for name in attrs_dict:\n setattr(self, name, getattr(a, name))\n\n def __repr__(self):\n result = '{'\n for key, value in self._get_attrs().items():\n result += '%s=%s,' % (key, value)\n result = result[0:-1]\n result += '}'\n return result\n\n def __str__(self):\n return self.__repr__()\n\nclass Tensors:\n def __init__(self, config: Config):\n with tf.device('/gpu:0'):\n self.lr = tf.placeholder(tf.float32, [], 'lr')\n self.x = tf.placeholder(tf.float32, [None, 784], 'x')\n x = tf.reshape(self.x, [-1, 28, 28, 1])\n\n x = tf.layers.conv2d(x, config.filters, 3, 1, 'same', activation = tf.nn.relu, name = 'conv1') #[-1, 28, 28, 32]\n\n x = tf.layers.conv2d(x, 2*config.filters, 3, 1, 'same', activation = tf.nn.relu, name = 'conv2') #[-1, 28, 28, 64]\n x = tf.layers.max_pooling2d(x, 2, 2) # #[-1, 14, 14, 64]\n\n x = tf.layers.conv2d(x, 4*config.filters, 3, 1, 'same', activation = tf.nn.relu, name = 'conv3') #[-1, 28, 28, 128]\n x = tf.layers.max_pooling2d(x, 2, 2) # #[-1, 7, 7, 128]\n\n x = tf.layers.flatten(x)\n x = tf.nn.relu(x)\n\n x = tf.layers.dense(x, 1000, activation=tf.nn.relu, name = 'dense1')\n y_predict = tf.layers.dense(x, config.classes, name = 'dense2')\n y_predict = tf.nn.softmax(y_predict)\n y_predict = tf.maximum(y_predict, config.eps)\n\n self.y = tf.placeholder(tf.int32, [None], 'y')\n y = tf.one_hot(self.y, config.classes)\n\n self.loss = -tf.reduce_mean(y*tf.log(y_predict))\n\n opt = tf.train.AdamOptimizer(self.lr)\n self.train_op = opt.minimize(self.loss)\n\n tf.summary.scalar('loss', self.loss)\n self.summary_op = tf.summary.merge_all()\n\n\n\n\nclass Samples:\n def __init__(self, config: Config):\n self.ds = read_data_sets(config.sample_path)\n\n def next_batch(self, batch_size):\n return self.ds.train.next_batch(batch_size)\n\n @property\n def num(self):\n return self.ds.train.num_examples\n\n\n\n\nclass Mnist:\n def __init__(self, config: Config):\n self.config = config\n graph = tf.Graph()\n with graph.as_default():\n self.tensors = Tensors(config)\n conf = tf.ConfigProto()\n conf.allow_soft_placement = True\n self.session = tf.Session(graph=graph, config = conf)\n self.saver = tf.train.Saver()\n self.file_writer = tf.summary.FileWriter(config.logdir, graph = graph)\n try:\n self.saver.restore(self.session, config.save_path)\n print('resotr the model successfully!')\n except:\n print('the mdoel doese not exst, we have to train a new one')\n self.train()\n\n def train(self):\n self.session.run(tf.global_variables_initializer())\n self.samples = Samples(self.config)\n batches =self.samples.num // self.config.batch_size\n for epoch in range(self.config.epoches):\n for batch in range(batches):\n x, y = self.samples.next_batch(self.config.batch_size)\n feed_dic = {\n self.tensors.x: x,\n self.tensors.y: y,\n self.tensors.lr: self.config.lr\n }\n _, lo, su = self.session.run([self.tensors.train_op, self.tensors.loss, self.tensors.summary_op], feed_dict=feed_dic)\n self.file_writer.add_summary(su, epoch * batches + batch)\n print('epoch=%d, loss=%f' % (epoch, lo))\n self.saver.save(self.session, self.config.save_path)\n\n def predict(self):\n pass\n\n def close(self):\n self.session.close()\n\n\n\n\n\n\n\nif __name__ == '__main__':\n config = Config()\n mnist = Mnist(config)\n mnist.close()\n","sub_path":"p07_mnist_tensorboard.py","file_name":"p07_mnist_tensorboard.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"230730444","text":"import torch\nimport regex\nimport logging\nimport numpy as np\nfrom math import ceil\n\nfrom multiprocessing import Pool as ProcessPool\nfrom multiprocessing.util import Finalize\n\nfrom ..retriever import TfidfDocRanker\nfrom ..retriever import DocDB\n\nfrom transformers import AutoModelForQuestionAnswering, AutoTokenizer\n\nDEFAULTS = {\n 'ranker': TfidfDocRanker,\n 'db': DocDB,\n 'reader_model': 'distilbert-base-cased-distilled-squad',\n}\n\nlogger = logging.getLogger(__name__)\n\n\n# ------------------------------------------------------------------------------\n# Multiprocessing functions to fetch text\n# ------------------------------------------------------------------------------\n\nPROCESS_DB = None\n\ndef init(db_class, db_opts):\n global PROCESS_DB\n PROCESS_DB = db_class(**db_opts)\n Finalize(PROCESS_DB, PROCESS_DB.close, exitpriority=100)\n\ndef fetch_text(doc_id):\n global PROCESS_DB\n return PROCESS_DB.get_doc_text(doc_id)\n\n\n# ------------------------------------------------------------------------------\n# Main DrQA pipeline\n# ------------------------------------------------------------------------------\n\n\nclass DrQATransformers(object):\n def __init__(\n self,\n reader_model=None,\n use_fast_tokenizer=True,\n group_length=500,\n batch_size=32,\n cuda=True,\n num_workers=None,\n db_config=None,\n ranker_config=None,\n ):\n \"\"\"Initialize the pipeline.\n\n Args:\n reader_model: name of the Huggingface transformer QA model.\n use_fast_tokenizer: whether to use fast tokenizer\n batch_size: batch size when processing paragraphs.\n cuda: whether to use gpu.\n num_workers: number of parallel CPU processes to use for retrieving\n db_config: config for doc db.\n ranker_config: config for ranker.\n group_length: target size for squashing short paragraphs together.\n 0 = read every paragraph independently\n infty = read all paragraphs together\n \"\"\"\n assert use_fast_tokenizer == True, 'Current version only support models with fast tokenizer'\n self.batch_size = batch_size\n self.device = 'cuda' if cuda else 'cpu'\n self.num_workers = num_workers\n self.group_length = group_length\n\n logger.info('Initializing document ranker...')\n ranker_config = ranker_config or {}\n ranker_class = ranker_config.get('class', DEFAULTS['ranker'])\n ranker_opts = ranker_config.get('options', {})\n self.ranker = ranker_class(**ranker_opts)\n\n logger.info('Initializing document reader & tokenizer...')\n reader_model = reader_model or DEFAULTS['reader_model']\n self.reader = AutoModelForQuestionAnswering \\\n .from_pretrained(reader_model) \\\n .eval() \\\n .to(self.device)\n self.need_token_type = self.reader.base_model_prefix not in {\n \"xlm\", \"roberta\", \"distilbert\", \"camembert\", \"bart\", \"longformer\"\n }\n tokenizer_kwargs = {}\n if self.reader.base_model_prefix in {'mobilebert'}:\n tokenizer_kwargs['model_max_length'] = self.reader.config.max_position_embeddings\n #\n self.tokenizer = AutoTokenizer.from_pretrained(reader_model, use_fast=use_fast_tokenizer, **tokenizer_kwargs)\n\n logger.info('Initializing document retrievers...')\n db_config = db_config or {}\n db_class = db_config.get('class', DEFAULTS['db'])\n db_opts = db_config.get('options', {})\n\n self.processes = ProcessPool(\n num_workers,\n initializer=init,\n initargs=(db_class, db_opts)\n )\n\n def _split_doc(self, doc):\n \"\"\"Given a doc, split it into chunks (by paragraph).\"\"\"\n curr = []\n curr_len = 0\n for split in regex.split(r'\\n+', doc):\n split = split.strip()\n if len(split) == 0:\n continue\n curr.append(split)\n curr_len += len(split)\n # Maybe group paragraphs together until we hit a length limit\n if len(curr) > 0 and curr_len > self.group_length:\n yield ' '.join(curr)\n curr = []\n curr_len = 0\n if len(curr) > 0:\n yield ' '.join(curr)\n\n def process(self, query, top_n=1, n_docs=5, return_context=False):\n \"\"\"Run a single query.\"\"\"\n\n # Rank documents for query.\n ranked = [self.ranker.closest_docs(query, k=n_docs)]\n all_docids, all_doc_scores = zip(*ranked)\n\n # Flatten document ids and retrieve text from database.\n # We remove duplicates for processing efficiency.\n flat_docids = list({d for docids in all_docids for d in docids})\n # did2didx = {did: didx for didx, did in enumerate(flat_docids)}\n didx2did = {didx: did for didx, did in enumerate(flat_docids)}\n doc_texts = self.processes.map(fetch_text, flat_docids)\n\n # Split and flatten documents. Maintain a mapping from doc (index in\n # flat list) to split (index in flat list).\n flat_splits = []\n didx2sidx = []\n sidx2didx = []\n for i, text in enumerate(doc_texts):\n splits = self._split_doc(text)\n didx2sidx.append([len(flat_splits), -1])\n for split in splits:\n flat_splits.append(split)\n sidx2didx.append(i)\n didx2sidx[-1][1] = len(flat_splits)\n\n # Tokenize\n inputs = self.tokenizer(\n [query]*len(flat_splits),\n flat_splits,\n padding=True,\n truncation='only_second',\n stride=96,\n return_overflowing_tokens=True,\n return_attention_mask=True,\n return_token_type_ids=self.need_token_type,\n return_offsets_mapping=True,\n return_tensors='pt',\n ).to(self.device)\n \n # Split batches\n n_examples = inputs.input_ids.shape[0]\n n_batch = ceil(n_examples / self.batch_size)\n batches = [i*self.batch_size for i in range(n_batch)] #[0,32,64,..]\n batches.append(n_examples)\n\n # Feed forward batches\n outputs = []\n for i in range(n_batch):\n with torch.no_grad():\n if self.need_token_type:\n output = self.reader(\n inputs.input_ids[batches[i]:batches[i+1]],\n inputs.attention_mask[batches[i]:batches[i+1]],\n token_type_ids=inputs.token_type_ids[batches[i]:batches[i+1]],\n )\n else:\n output = self.reader(\n inputs.input_ids[batches[i]:batches[i+1]],\n inputs.attention_mask[batches[i]:batches[i+1]],\n )\n outputs.append(output)\n\n # Join batch outputs\n start_logits = torch.cat([o.start_logits for o in outputs], dim=0)\n end_logits = torch.cat([o.end_logits for o in outputs], dim=0)\n\n # decode start-end logits to span slice & score\n start, end, score, idx_sort = self.decode_logits(start_logits, end_logits, topk=top_n)\n\n # Produce predictions, take top_n predictions with highest score\n all_predictions = []\n for i in range(top_n):\n split_id = inputs.overflow_to_sample_mapping[idx_sort[i]].item()\n start_char = inputs.offset_mapping[idx_sort[i], start[i], 0].item()\n end_char = inputs.offset_mapping[idx_sort[i], end[i], 1].item()\n prediction = {\n 'doc_id': didx2did[sidx2didx[split_id]],\n 'span': flat_splits[split_id][start_char:end_char],\n 'doc_score': float(all_doc_scores[0][sidx2didx[split_id]]),\n 'span_score': float(score[i]),\n }\n if return_context:\n prediction['context'] = {\n 'text': flat_splits[split_id],\n 'start': start_char,\n 'end': end_char,\n }\n all_predictions.append(prediction)\n\n return all_predictions\n\n\n def decode_logits(self, start_logits, end_logits, topk=1, max_answer_len=None):\n \"\"\"\n Take the output of any :obj:`ModelForQuestionAnswering` and generate score for each span to be the actual answer.\n\n In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or\n answer end position being before the starting position. The method supports output the k-best answer through\n the topk argument.\n\n Args:\n start_logits (:obj:`tensor`): Individual start logits for each token. # shape: batch, len(input_ids[0])\n end_logits (:obj:`tensor`): Individual end logits for each token.\n topk (:obj:`int`): Indicates how many possible answer span(s) to extract from the model output.\n max_answer_len (:obj:`int`): Maximum size of the answer to extract from the model's output.\n Output:\n starts: top_n predicted start indices\n ends: top_n predicted end indices\n scores: top_n prediction scores\n idx_sort: top_n batch element ids\n \"\"\"\n start = start_logits.cpu().numpy().clip(min=0.0)\n end = end_logits.cpu().numpy().clip(min=0.0)\n max_answer_len = max_answer_len or start.shape[1]\n\n # Compute the score of each tuple(start, end) to be the real answer\n outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))\n\n # Remove candidate with end < start and end - start > max_answer_len\n candidates = np.tril(np.triu(outer), max_answer_len - 1)\n\n # Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)\n scores_flat = candidates.flatten()\n if topk == 1:\n idx_sort = [np.argmax(scores_flat)]\n elif len(scores_flat) < topk:\n idx_sort = np.argsort(-scores_flat)\n else:\n idx = np.argpartition(-scores_flat, topk)[0:topk]\n idx_sort = idx[np.argsort(-scores_flat[idx])]\n\n idx_sort, starts, ends = np.unravel_index(idx_sort, candidates.shape)\n scores = candidates[idx_sort, starts, ends]\n\n return starts, ends, scores, idx_sort\n","sub_path":"drqa/pipeline/drqa_transformers.py","file_name":"drqa_transformers.py","file_ext":"py","file_size_in_byte":10346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"448192172","text":"def xulychuoi(s): #TÁCH SỐ RA TỪ CHUỖI CON\n\tlist = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n\tn = len(s)-1\n\tfor i in range(0, n+1): #CHO VÒNG LẶP I CHẠY, QUÉT TỪNG CHỮ CÁI CỦA CHUỖI\n\t\ts2 = s[i]\n\t\tnew = []\n\t\tchuoi = \"\"\n\t\tif (s2 in list): #NẾU THẤY XUẤT HIỆN CÓ SỐ THÌ CỨ QUÉT\n\t\t\ta = i+1\n\t\telse:\n\t\t\tbreak #NẾU KHÔNG THẤY THÌ DỪNG LẠI VÀ THOÁT\n\n\tnew = int(s[0:a]) #IN RA CHUỖI MÀ HẤT TỪ KÍ TỰ SỐ TRỞ VỀ BAN ĐẦU\n\treturn -new\n\n\ndef tachchuoi(s): #TÁCH RA LÀM CÁC CHUỖI CON\n\tdem = 0\n\tlist = s.split(\"-\") #TẠO RA CÁC CHUỖI CON KHI XOÁ \"-\"\n\tn = len(list)-1\n\tlist2 = []\n\tfor i in range(0, n+1): #LẤY RA CÁC CHUỖI CON MÀ CÓ BẮT ĐẦU LÀ SỐ\n\t\tfor j in range(0, 10):\n\t\t\tif (list[i].startswith(str(j))):\n\t\t\t\tlist2.append(list[i])\n\treturn list2 #TRẢ VỀ CÁC CHUỖI CON ĐÓ\n\n\nprint(\"Nhap vao chuoi: \")\ns = str(input())\n\ndem = 0\nketqua1 = tachchuoi(s)\nfor i in ketqua1:\n\tdem += 1\nfor j in range(0, dem):\n\ttest = xulychuoi(ketqua1[j])\n\tprint(test,end=\" \")\nprint(\" \")","sub_path":"bhh.py","file_name":"bhh.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"469687590","text":"import getopt\nimport os\nimport re\nimport sys\n\nimport pandas as pd\n\nimport Ngrams as ng\nimport Utils as ut\nfrom nlon import training_nlon\n\nnlon, nlon_model = training_nlon()\n\n\"\"\"\n il codice è modificato per non usare score_file\n\"\"\"\n\n\ndef load_mairesse_features(input_arff, output_csv):\n # Read and clean arff file\n _encoding = 'utf-8'\n arff_path = input_arff\n csv_path = arff_path + 'csv'\n with open(arff_path, 'r', encoding=_encoding) as fr:\n attributes = []\n write_sw = False\n with open(csv_path, 'w', encoding=_encoding) as fw:\n for line in fr.readlines():\n if write_sw:\n fw.write(line)\n elif '@data' in line:\n fw.write(','.join(attributes) + '\\n')\n write_sw = True\n elif '@attribute' in line:\n attributes.append(line.split()[1])\n\n # Read result as csv\n df = pd.read_csv(csv_path)\n os.remove(csv_path)\n for i in range(0, len(df)):\n df.iloc[i, df.columns.get_loc('filename')] = re.search(r'(.*\\/)(.*)', df['filename'][i]).groups()[1] # (\\.txt)\n\n # Remove useless columns\n df.drop([list(df)[len(df.columns) - i] for i in range(1, 6)], axis=1, inplace=True)\n df.drop(['BROWN-FREQ', 'K-F-FREQ', 'K-F-NCATS', 'K-F-NSAMP', 'T-L-FREQ'], axis=1, inplace=True)\n\n # Replace '?' with 0\n for i in range(0, len(df)):\n df.iloc[i] = df.iloc[i].replace('?', '0')\n\n df.rename(columns={'filename': 'ID'}, inplace=True)\n df.to_csv(output_csv, index=False)\n\n\ndef load_corpus(infile):\n _dict = dict()\n df = pd.read_csv(infile, sep=';', header=0)\n for row in df.itertuples():\n key = row[1]\n value = list()\n for line in row[2].split('\\n'):\n line = line.strip()\n if not line or line == '' or nlon.NLoNPredict(nlon_model, line)[0] == 'Not':\n pass\n else: # it's natural language\n value.append(line)\n _dict[key] = value\n\n return _dict\n\n\nif __name__ == \"__main__\":\n argv = sys.argv[1:]\n path = ''\n arff_file = ''\n out_file = ''\n in_file = ''\n input_ngrams_set_file = ''\n output_ngrams_set_file = ''\n ngrams_extraction = True\n nlon_cleaning = False\n task_num = '8'\n\n help_string = \"\\nUsage:\\n\" \\\n 'python newdata_feature_extraction.py [-t task] [--nlon] -i -a ' \\\n '-o [--ngrams ]'\n try:\n opts, args = getopt.getopt(argv, \"ht:i:o:a:n:\", [\"task=\", \"input=\", \"nlon\", \"output=\", \"arff=\", \"ngrams=\"])\n except getopt.GetoptError as e:\n print(e)\n print(help_string)\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print(help_string)\n sys.exit(0)\n elif opt in (\"--nlon\"):\n nlon_cleaning = True\n elif opt in (\"-a\", \"--arff\"):\n arff_file = arg\n if not os.path.isfile(arff_file):\n print('Arff file {0} not found'.format(arff_file))\n sys.exit(2)\n elif opt in (\"-t\", \"--task\"):\n task_num = arg\n if task_num.isdigit():\n task_num = int(task_num)\n else:\n print('Wrong task `{0}`. Must be a number 1-8'.format(task_num))\n sys.exit(2)\n elif opt in (\"-n\", \"--ngrams\"):\n input_ngrams_set_file = arg\n ngrams_extraction = False\n if not os.path.isfile(input_ngrams_set_file):\n print('N-grams file {0} not found'.format(input_ngrams_set_file))\n sys.exit(2)\n elif opt in (\"-i\", \"--input\"):\n in_file = arg\n if not os.path.isfile(in_file):\n print('Input dataset file {0} not found'.format(in_file))\n sys.exit(2)\n elif opt in (\"-o\", \"--output\"):\n out_file = arg\n\n if in_file:\n # load corpus as dictionary\n corpus_dict = load_corpus(in_file)\n else:\n print('No input file for the new data')\n print(help_string)\n sys.exit(2)\n\n # generates a file formatted as ID,cOPE,cCON,cEXT,cAGR,cNEU\n # with only IDs available\n score_file = ['classes_task', '.csv']\n if task_num in range(1, 9): # values allowed 1-8\n pass\n ut.get_empty_score_file(sorted(corpus_dict.keys()), str(task_num).join(score_file))\n else:\n print('Wrong task number')\n print(help_string)\n sys.exit(2)\n\n dataset_name = in_file.split('.')[0]\n if ngrams_extraction:\n output_ngrams_set_file = os.path.join('tmp', dataset_name + '_ngrams_task' + str(task_num) + '.set')\n\n ngram_df = ng.get_ngram_df(corpus_dict, input_ngrams_set_file, ngrams_extraction, output_ngrams_set_file)\n\n feature_file = ['feature_task', '.csv']\n ut.get_feature_csv_from_arff(arff_file, str(task_num).join(feature_file), str(task_num).join(score_file))\n\n # Merge feature df with ngram_df\n feature_df = pd.read_csv(str(task_num).join(feature_file), sep=',')\n complete_df = pd.merge(left=ngram_df, right=feature_df, on='ID')\n complete_df.to_csv(os.path.join(out_file), index=False)\n\n print(\"Features saved into {0} file\".format(out_file))\n sys.exit(0)\n","sub_path":"newdata_feature_extraction.py","file_name":"newdata_feature_extraction.py","file_ext":"py","file_size_in_byte":5269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"218661136","text":"#-*- coding:utf-8 -*-\nimport os\nimport codecs\nimport re\nimport shutil\nimport time\n\n\ndef process_lua(file_name):\n\tf = open(file_name,'r', encoding='UTF-8') \n\tresult = list() \n\txx_temp = \"^(?!.*--).*(?P{}\\.\\w+)\"\n\n\tfor line in f.readlines():\n\t\tfor x in namespaces:\t\t#遍历所有的命名空间,找出合适的\n\t\t\txx = xx_temp.format(x)\n\t\t\tm = re.search(xx, line)\n\n\t\t\tif \tm:\n\t\t\t\tmy_type = m.groupdict()[\"type\"]\n\t\t\t\tif not(my_type in lst):\n\t\t\t\t\tlst.append(my_type)\n\t\t\t\tbreak\n\t\t \n\ndef WalkAll(srcPath):\n\tif not os.path.isdir(srcPath) and not os.path.isfile(srcPath):\n\t\tprint(\"path not exist\" + srcPath)\n\t\treturn False\n\n\tif os.path.isfile(srcPath):\n\t\tif srcPath.find(\".meta\") > -1 or srcPath.find(\"~$\") >= 0:\n\t\t\t#print(\"skip file: \" + srcPath)\n\t\t\treturn\n\n\t\tprocess_lua(srcPath)\n\t\t#count = count + 1\n\n\telif os.path.isdir(srcPath):\n\t\t#os.makedirs(dest_path)\n\t\tfor x in os.listdir(srcPath):\n\t\t\tWalkAll(os.path.join(srcPath,x))\n\n#_dir = \"F:\\Wartune.Client______\\Assets\\LuaScripts\\cs2lua\\wartune\\singledunglogic.lua\"\n#_dir = \"\"\"F:\\Wartune.Client______\\Assets\\LuaScripts\\game\\modules\\\\vitality\\\\vitalitysysmanager.lua\"\"\"\n_dir = \"\"\"F:\\Wartune.Client______\\Assets\\LuaScripts\"\"\"\nlst = list()\nnamespaces = ['UnityEngine.UI.ProceduralImage',\n\t\t\t'UnityEngine.Profiling',\n\t\t\t'UnityEngine.SceneManagement',\n\t\t\t'UnityEngine.Events',\n\t\t\t'UnityEngine.EventSystems',\n\t\t\t'UnityEngine.Assertions',\n\t\t\t'UnityEngine.Serialization',\n\t\t\t'UnityEngine.UI',\n\t\t\t'UnityEngine.AI',\n\t\t\t'UnityEngine',\n\t\t\t'System.Xml',\n\t\t\t\"Paran\",\n\t\t\t\"FlatBuffers\",\n\t\t\t\"DG.Tweening\",\n\t\t\t#\"ETCInput\",\n\t\t\t\"RimFlow\",\n\t\t\t#\"ComponentExtensions\",\n\t\t\t\"SRF\",\n\t\t\t\"ScreenFade\",\n\t\t\t\"ScreenBlur\",\n\t\t\t\"UIHerDragCtrl\",\n\t\t\t]\n\nblack_name = [\"UnityEngine.Light\",\n\t\t\t\"UnityEngine.UI.Graphic\",\n\t\t\t\"UnityEngine.Events.UnityEvent_T0\",]\n\naddition = [\"XLuaExtensions\"]\n\nif __name__ == \"__main__\":\n\tWalkAll(_dir)\t\n\n\tfor j in black_name:\n\t\tlst.remove(j)\n\n\tfor x in addition:\n\t\tlst.append(x)\n\n\tlst_cs = list()\n\tfor x in lst:\n\t\tlst_cs.append('typeof({}),'.format(x))\n\n\tprint(\"\\n\".join(lst_cs))\n","sub_path":"find_all_unity_api.py","file_name":"find_all_unity_api.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"644656391","text":"import os.path\nfrom result_parser.util import *\nfrom result_parser.fio_parse.terse import FORMAT\n\n\nclass FioParse(object):\n \"\"\"FioParse - a class to parse and display terse outputs from FIO\"\"\"\n def __init__(self, path: str = None) -> None:\n \"\"\"\n The only thing needed to digest the output of a FIO test is the path\n to the terse-format results file. This file has consistent output\n between versions of FIO as the fields are standardized.\n Due to ease of parsing the output, there is no reason to read the file\n until we need to produce the results of it so the only thing stored\n is the absolute path.\n \"\"\"\n logger.debug(\n ' '.join(['Received request to generate FioParse object for:',\n '{}'.format(path)]) # Use format notation here to ensure\n ) # that any object logs properly\n # Validate our supplied path\n if path is None:\n logger.warning('FioParse object instantiated with no path')\n self.path = None\n elif os.path.isfile(path):\n logger.debug('Storing path: {}'.format(path))\n self.path = os.path.abspath(path)\n else:\n logger.error('FioParse object instantiated with invalid path')\n self.path = None\n\n @staticmethod\n def _is_zero(val: str) -> bool:\n \"\"\"\n FioParse._is_zero() returns true if the provided string is a string\n representation of zero, either as the integer zero, a decimal\n equivalent of zero, or as a representation of zero percent.\n \"\"\"\n val = val.strip()\n if val == '0':\n return True\n if val.strip('%').strip() == '0':\n return True\n try:\n if '.' not in val and int(val.strip('%').strip()) == 0:\n return True\n elif '.' in val and float(val.strip('%').strip()) == 0.0:\n return True\n except ValueError:\n pass\n return False\n\n def parse(self) -> list:\n \"\"\"\n FioParse.parse() returns a list of all of the parsed results present\n in path the object was instantiated with, returning an empty list if\n self.path wasn't saved (because it was invalid).\n \"\"\"\n if self.path is None:\n logger.warning(\n 'Request to generate a FIO output report with no path.'\n )\n return []\n\n ret = []\n with open(self.path, mode='r', encoding='utf-8') as f:\n for line in f.readlines():\n # Expect that the firsts field of terse output should start\n # with the number three, as this is the version of output that\n # we support. This strictly specifies compatibility with this\n # version of terse output (currently the default version in\n # modern releases of fio, and has been since 2012)\n # https://github.com/axboe/fio/commit/3449ab8c4d2addb716105ded698438a1d3811572\n data = line.split(';')\n if '3' in data and len(data) > 1 and data[1] == '3':\n parsed = {}\n # Prepare our parsed output for this line by identifying\n # the mapped block device and ensuring our return dict has\n # a section for it.\n blkdev_map = data[0]\n path = data[0].split(':')[0].split('->')[0]\n dev = data[0].split(':')[0].split('->')[-1]\n fstype = data[0].split('::')[-1].split('->')[0]\n # Our FORMAT dict should have all the fields neatly\n # labelled to support directly mapping the names to indexes\n # in our data\n for k in FORMAT:\n if not self._is_zero(data[k]):\n parsed[FORMAT[k]] = best_rep(data[k])\n logger.debug('Parsed the following disk output:')\n logger.debug(line)\n logger.debug('Generated:')\n logger.debug(parsed)\n # If there is a result with a job name that is for the same\n # path as this, just add another job to that container\n handled = False\n for result in ret:\n try:\n if result['path'] == path:\n result[parsed['INFO_job_name']] = parsed\n handled = True\n break\n except TypeError:\n # This is because we tried to slice into a\n # string via dictionary key - it's fine\n pass\n # If one doesn't exist, we'll create a new one\n if not handled:\n ret.append({\n 'blkdev_map': blkdev_map,\n 'path': path,\n 'dev': dev,\n 'fstype': fstype,\n parsed['INFO_job_name']: parsed\n })\n else:\n logger.warning(\n 'Cannot parse output of file {} line:'.format(\n self.path\n )\n )\n logger.warning(line.strip())\n return ret\n","sub_path":"result_parser/fio_parse/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"282133319","text":"# The clock shows 'h' hours, 'm' minutes and 's' seconds after midnight.\n# Your task is to make the 'past' function return the time converted to milliseconds.\n# More examples in the test cases below.\n\ndef past(h, m, s):\n\n # need to know how many milliseconds are in a hour/minute/second\n # convert hh into milliseconds\n hours_in_ms = h * 3600000\n # convert mm into milliseconds\n minutes_in_ms = m * 60000\n # convert ss into milliseconds\n seconds_in_ms = s * 1000\n # add hh, mm, ss millisecond values together\n # print output in milliseconds\n\n return hours_in_ms + minutes_in_ms + seconds_in_ms\n","sub_path":"tasks/exercise002.py","file_name":"exercise002.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"553242367","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n package.module\n ~~~~~~~~~~~~~~\n\n API for project pod monitor\n\n :copyright: (c) 2020/9/21 by Richie.Song.\n :license: OPS, see LICENSE_FILE for more details.\n\n####\n实现一个 Trie (前缀树),包含 insert, search, 和 startsWith 这三个操作。\n\n示例:\n\nTrie trie = new Trie();\n\ntrie.insert(\"apple\");\ntrie.search(\"apple\"); // 返回 true\ntrie.search(\"app\"); // 返回 false\ntrie.startsWith(\"app\"); // 返回 true\ntrie.insert(\"app\");\ntrie.search(\"app\"); // 返回 true\n说明:\n\n你可以假设所有的输入都是由小写字母 a-z 构成的。\n保证所有输入均为非空字符串。\n\n\n\"\"\"\n\n\nclass Trie(object):\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.root = {}\n self.end_of_word = \"#\"\n\n def insert(self, word):\n \"\"\"\n Inserts a word into the trie.\n :type word: str\n :rtype: None\n \"\"\"\n node = self.root\n for char in word:\n node = node.setdefault(char, {})\n node[self.end_of_word] = self.end_of_word\n\n def search(self, word):\n \"\"\"\n Returns if the word is in the trie.\n :type word: str\n :rtype: bool\n \"\"\"\n node = self.root\n for char in word:\n if char not in node:\n return False\n node = node[char]\n return self.end_of_word in node\n\n def startsWith(self, prefix):\n \"\"\"\n Returns if there is any word in the trie that starts with the given prefix.\n :type prefix: str\n :rtype: bool\n \"\"\"\n node = self.root\n for char in prefix:\n if char not in node:\n return False\n node = node[char]\n return True\n\n\nif __name__ == '__main__':\n t = Trie()\n c = \"songxiaoming\"\n print(t.insert(c))\n print(t.search(\"song\"))\n print(t.startsWith(\"song\"))\n","sub_path":"算法/情景题/208. 实现 Trie (前缀树).py","file_name":"208. 实现 Trie (前缀树).py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"42752070","text":"from .Events import *\nfrom .EMS import Event_Management_System, event_creation, Seminar_methods\nfrom .Users import User, Student, Admin, Attendance\nfrom .Session import session_attendees\nfrom flask_login import login_user\nfrom datetime import datetime\n\ndef date_converter(date_input):\n date_format = \"%Y-%m-%d\"\n date = datetime.strptime(date_input, date_format)\n return date\n\ndef bootstrap_system():\n\n system = Event_Management_System()\n b = 0\n with open(\"user.csv\") as fp: #close file is called automatically\n for line in fp:\n if b > 0:\n l = line.split(\",\")\n\n if 'trainer' in line:\n new_user = Admin(l[0], l[1], l[2], l[3], l[4], \"secret\")\n system.add_users(new_user)\n else:\n new_user = Student(l[0], l[1], l[2], l[3], l[4])\n system.add_users(new_user)\n b += 1\n return system \n'''\n user = Admin(\"Stephanie\", \"5133975\", \"z5133975@unsw.net\", \"pass\", \"trainer\", \"secret\")\n system.add_users(user)\n Course = event_creation.create_course(\"course details\", date_converter(\"2018-5-13\"), date_converter(\"2018-5-13\"), date_converter(\"2018-5-12\"), 50, date_converter(\"2018-5-12\"), 10, \"test input\", user, \"UNSW\", 0, 0)\n event_creation.add_course(system, Course) # add new course in system\n user.post_event(Course) # US1: add to admin subclass my event list\n event_creation.add_order(system, Course) # add new event in system\n Attendance.add_attending(user, Course) #add to users own record list\n Course.listAttendee().append(user)\n\n\n newSem = Seminar_methods.create_seminar('details', date_converter('2018-01-12'),date_converter('2018-01-15'), date_converter('2018-01-10'), 100, date_converter('2018-01-10'), 2, 'numSess', 2, 1, 1)\n #def create_seminar(details, start_date, end_date, deregistration, fee, early_bird, maxAttendee, SemName, numSess, typeof, eveID):\n user.post_event(newSem)\n Seminar_methods.add_seminar(system, newSem)\n\n s1 = Session('SessName', user, date_converter('2018-01-03'), 'Venue', 'SessDetails', 1, 0)\n newSem.add_sess(s1)\n Attendance.add_attending(user, newSem) #add to users own record list\n session_attendees.add_attendees(s1, user) #add to the session's list attendees\n newSem.listAttendee().append(user) #aooe\n\n s2 = Session('SessName', user, date_converter('2018-01-04'), 'Venue', 'SessDetails', 1, 1)\n newSem.add_sess(s2)\n session_attendees.add_attendees(s2, user) #add to the session's list attendees\n '''\n","sub_path":"Project/Src_Code/src/initialize.py","file_name":"initialize.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"612099854","text":"import numpy as np\n\nGROUND_CLASS = {\n \"NBE\": None,\n \"G\": 0,\n 'Z': 1,\n 'L': 2,\n 'K': 3,\n 'V': 4,\n \"H\": 4,\n \"S\": 2\n}\n\nIC_MAPPING = {\n \"G\": 0.5,\n \"Z\": 2.6,\n \"L\": 2.95,\n \"K\": 4,\n \"V\": 100\n}\n\nGAMMA_MAPPING = {\n \"G\": 20,\n \"Z\": 20,\n \"L\": 20,\n \"K\": 17,\n \"V\": 11,\n \"H\": 11,\n \"S\": 20\n}\n\nMAIN_SOIL = [\"G\", \"Z\", \"L\", \"K\", \"V\"]\nGAMMA_ARRAY = np.array([GAMMA_MAPPING[k] for k in MAIN_SOIL])\n\n\ndef det_u_hydrostatic(l, water_level=1.):\n \"\"\"\n\n :param l: (array)\n :param water_level: (flt) Depth of the water level.\n :return: (array) Hydrostatic water pressure\n \"\"\"\n u = (l - water_level) * 10e-3\n u[u < 0] = 0\n return u\n\n\ndef det_layer_weight(sp, dl):\n return dl * np.dot(GAMMA_ARRAY / 1e3, sp)\n\n\ndef det_ground_pressure(l, sp):\n \"\"\"\n Assumes the last layer is the same thickness as the previous layer.\n\n Note that the l array could be like this:\n [1.2, 1.22, 1.24, ... n]\n\n The first values will be assumed to be 1.2 meters of the first layer.\n\n :param l: (array) Length\n :param sp: (array) Soil print\n :return: (array) Ground pressure\n \"\"\"\n dl = np.diff(l)\n return np.cumsum(np.dot(sp, GAMMA_ARRAY * 1e-3) * np.append(l[0], dl))\n\n\ndef det_effective_stress(l, u2, sp):\n \"\"\"\n\n :param l: (array) Length\n :param u2: (array) Water pressure\n :param sp: (2D array) Soil print\n :return: (array) Effective stress\n \"\"\"\n return det_ground_pressure(l, sp) - u2","sub_path":"pygef/soil.py","file_name":"soil.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"488117090","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom argparse import ArgumentParser, FileType\n\nfrom mpmath import mp\nimport numpy as np\n\nfrom pyfr import __version__ as version\nfrom pyfr.backends import get_backend\nfrom pyfr.inifile import Inifile\nfrom pyfr.mpiutil import register_finalize_handler\nfrom pyfr.rank_allocator import get_rank_allocation\nfrom pyfr.progress_bar import ProgressBar\nfrom pyfr.readers.native import read_pyfr_data\nfrom pyfr.solvers import get_solver\n\n\ndef process_run(args):\n return read_pyfr_data(args.mesh), None, Inifile.load(args.cfg)\n\n\ndef process_restart(args):\n mesh = read_pyfr_data(args.mesh)\n soln = read_pyfr_data(args.soln)\n\n # Ensure the solution is from the mesh we are using\n if soln['mesh_uuid'] != mesh['mesh_uuid']:\n raise RuntimeError('Invalid solution for mesh.')\n\n # Process the config file\n if args.cfg:\n cfg = Inifile.load(args.cfg)\n else:\n cfg = Inifile(soln['config'])\n\n return mesh, soln, cfg\n\n\n@mp.workdps(60)\ndef main():\n from mpi4py import MPI\n\n ap = ArgumentParser(prog='pyfr-sim', description='Runs a PyFR simulation')\n ap.add_argument('--verbose', '-v', action='count')\n ap.add_argument('--backend', '-b', default='cuda', help='Backend to use')\n ap.add_argument('--progress', '-p', action='store_true',\n help='show a progress bar')\n ap.add_argument('--nansweep', '-n', metavar='N', type=int,\n help='check for NaNs every N steps')\n\n sp = ap.add_subparsers(help='sub-command help')\n\n ap_run = sp.add_parser('run', help='run --help')\n ap_run.add_argument('mesh', help='mesh file')\n ap_run.add_argument('cfg', type=FileType('r'), help='config file')\n ap_run.set_defaults(process=process_run)\n\n ap_restart = sp.add_parser('restart', help='restart --help')\n ap_restart.add_argument('mesh', help='mesh file')\n ap_restart.add_argument('soln', help='solution file')\n ap_restart.add_argument('cfg', nargs='?', type=FileType('r'),\n help='new config file')\n ap_restart.set_defaults(process=process_restart)\n\n # Parse the arguments\n args = ap.parse_args()\n mesh, soln, cfg = args.process(args)\n\n # Ensure MPI is suitably cleaned up\n register_finalize_handler()\n\n # Create a backend\n backend = get_backend(args.backend, cfg)\n\n # Get the mapping from physical ranks to MPI ranks\n rallocs = get_rank_allocation(mesh, cfg)\n\n # Construct the solver\n solver = get_solver(backend, rallocs, mesh, soln, cfg)\n\n # If we are running interactively then create a progress bar\n if args.progress and MPI.COMM_WORLD.rank == 0:\n pb = ProgressBar(solver.tstart, solver.tcurr, solver.tend)\n\n # Register a callback to update the bar after each step\n callb = lambda intg: pb.advance_to(intg.tcurr)\n solver.completed_step_handlers.append(callb)\n\n # NaN sweeping\n if args.nansweep:\n def nansweep(intg):\n if intg.nsteps % args.nansweep == 0:\n if any(np.isnan(np.sum(s)) for s in intg.soln):\n raise RuntimeError('NaNs detected at t = {}'\n .format(intg.tcurr))\n solver.completed_step_handlers.append(nansweep)\n\n # Execute!\n solver.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pyfr/scripts/sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"416928153","text":"\nimport weka.core.jvm as jvm\njvm.start()\n\ndata_dir = \"D:\\\\Programing\\\\snipplets\\\\FraudData\"\nfrom weka.core.converters import Loader\nloader = Loader(classname=\"weka.core.converters.ArffLoader\")\ndata = loader.load_file(data_dir + \"iris.arff\")\ndata.class_is_last()\n\nprint(data)","sub_path":"FraudData/weka_thingy.py","file_name":"weka_thingy.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"567625627","text":"from odoo import models, fields, api, exceptions\nfrom datetime import date,datetime\nfrom dateutil.relativedelta import relativedelta\nfrom collections import OrderedDict\nimport xlsxwriter\nfrom io import StringIO\nimport base64\nimport pandas as pd\nimport numpy as np\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass libro_mayor_reportes_chile(models.TransientModel):\n _inherit = 'wizard.reportes.chile'\n\n @api.multi\n def dic_libro_mayor(self):\n dic = OrderedDict([\n ('Fecha',''),\n ('Comprobante',''),\n ('Rut',''),\n ('Partner',''),\n ('Cuenta',''),\n ('Glosa',''),\n ('Documento',''),\n ('Debe',0.0),\n ('Haber',0.0),\n ('Saldo',0.0)\n ])\n return dic\n\n @api.multi\n def _libro_mayor_sql(self,wizard=False):\n if wizard:\n wiz = self.search([('id','=',wizard)])\n else:\n wiz = self\n company = wiz.company_id.id\n #if not (wiz.fecha_inicio or wiz.fecha_term):\n # raise exceptions.Warning('Debe seleccionar al menos un periodo')\n #periodo = wiz.period_ids[0]\n fecha_inicio = wiz.fecha_inicio\n fecha_term = wiz.fecha_term\n #fiscal = periodo.fiscalyear_id.id\n #perids = set(wiz.period_ids.ids)\n #periodos= '('+\",\".join(map(str,perids))+')'\n cuentas = ''\n if wiz.acount_ids:\n cuentas = 'and aa.id in ('+\",\".join(map(str,set(wiz.acount_ids.ids)))+')'\n wiz.env.cr.execute(\"\"\"\n SELECT\n null as Fecha,\n 'Saldo Inicial' as comprobante,\n null,\n null,\n concat_ws(' - ', aa.code::text, aa.name::text) as cuenta,\n null,\n null,\n sum(aml.debit),\n sum(aml.credit)\n\n FROM\n account_move_line aml,\n account_account aa,\n account_move am\n\n WHERE\n aml.account_id=aa.id and\n aml.move_id=am.id and\n am.state='posted' and\n aml.company_id = %s and\n aml.date <= '%s'\n %s\n\n GROUP BY\n cuenta\n\n UNION ALL\n\n SELECT\n q1.fecha,\n q1.comprobante,\n q2.rut,\n q2.partner,\n q1.cuenta,\n q1.ref,\n q1.nombre,\n q1.debit,\n q1.credit\n\n FROM\n (\n SELECT\n\n aml.date as fecha,\n am.name as comprobante,\n concat_ws(' - ', aa.code::text, aa.name::text) as cuenta,\n aml.ref as ref,\n aml.name as nombre,\n aml.debit as debit,\n aml.credit as credit,\n aml.partner_id as partner_id\n\n FROM\n account_move_line aml,\n account_account aa,\n account_move am\n\n WHERE\n aml.account_id=aa.id and\n aml.move_id=am.id and\n am.state='posted' and\n aml.company_id = %s and\n aml.date >= '%s' and\n aml.date <= '%s'\n %s\n )q1\n\n LEFT JOIN\n\n (\n SELECT\n\n rp.id id,\n rp.document_number rut,\n rp.name partner\n\n FROM\n\n res_partner rp\n )q2\n\n ON\n q2.id=q1.partner_id\n\n ORDER BY\n cuenta, fecha NULLS FIRST, comprobante\n \"\"\" %(company,fecha_inicio,cuentas,company,fecha_inicio,fecha_term,cuentas))\n dic = wiz.dic_libro_mayor()\n lista = []\n docs = wiz.env.cr.fetchall()\n # cuentas = set([(record[9],record[4]) for record in docs])\n # for record in cuentas:\n # lista += wiz._libro_mayor_saldo_inicial_sql(record[0],record[1])\n for record in docs:\n dicti = OrderedDict()\n dicti.update(dic)\n dicti['Fecha']=record[0]\n dicti['Comprobante']=record[1]\n dicti['Rut']=record[2]\n dicti['Partner']=record[3]\n dicti['Cuenta']=record[4]\n dicti['Glosa']=record[5]\n dicti['Documento']=record[6]\n dicti['Debe']=float(record[7])\n dicti['Haber']=float(record[8])\n lista.append(dicti)\n tabla = pd.DataFrame(lista)\n if not tabla.empty:\n tabla['Saldo'] = (tabla['Debe']-tabla['Haber'])\n tabla['Saldo'] = tabla.groupby('Cuenta')['Saldo'].transform(pd.Series.cumsum)\n return tabla\n\n","sub_path":"models/libro_mayor.py","file_name":"libro_mayor.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"182735298","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport unittest\nimport subprocess\nimport shutil\nimport glob\n\nSIIPSIGN = os.path.join('scripts', 'siip_sign.py')\n\n\nclass TestSIIPSign(unittest.TestCase):\n '''A set of integration test cases (for now)'''\n\n def setUp(self):\n '''Create required files'''\n pass\n\n def tearDown(self):\n '''Clean up generated files'''\n\n shutil.rmtree('extract', ignore_errors=True)\n files_to_clean = glob.glob('key*.pem')\n files_to_clean.append('payload.bin')\n files_to_clean.append('signed.bin')\n files_to_clean.append('fkm.bin')\n\n for f in files_to_clean:\n try:\n os.remove(f)\n except FileNotFoundError:\n pass\n\n def test_empty_args(self):\n '''Test empty args'''\n\n cmd = ['python', SIIPSIGN]\n with self.assertRaises(subprocess.CalledProcessError) as cm:\n subprocess.check_call(cmd)\n self.assertEqual(cm.exception.returncode, 2)\n\n def test_invalid_args(self):\n '''Test invalid args'''\n\n cmd = ['python', SIIPSIGN, 'build']\n with self.assertRaises(subprocess.CalledProcessError) as cm:\n subprocess.check_call(cmd)\n self.assertEqual(cm.exception.returncode, 2)\n\n cmd = ['python', SIIPSIGN, 'sign', '-s', 'sha_foo']\n with self.assertRaises(subprocess.CalledProcessError) as cm:\n subprocess.check_call(cmd)\n self.assertEqual(cm.exception.returncode, 2)\n\n def test_help(self):\n '''Test help message'''\n\n cmd = ['python', SIIPSIGN, '-h']\n subprocess.check_call(cmd)\n\n def test_version(self):\n '''Test version info'''\n cmd = ['python', SIIPSIGN, '-V']\n subprocess.check_call(cmd)\n\n def test_signing_rsa2k(self):\n '''Test signing'''\n\n hash_options = ['sha256', 'sha384', 'sha512']\n pld_file = 'payload.bin'\n out_file = 'signed.bin'\n\n with open(pld_file, 'wb') as pld:\n pld.write(os.urandom(1024*1024))\n\n # Create a new test RSA key\n cmd = ['openssl', 'genrsa', '-out', 'key.pem', '2048']\n subprocess.check_call(cmd)\n\n # Get public key from test key\n cmd = ['openssl', 'rsa', '-pubout', '-in',\n 'key.pem', '-out', 'key.pub.pem']\n subprocess.check_call(cmd)\n\n for hash_alg in hash_options:\n cmd = ['python', SIIPSIGN, 'sign', '-i', pld_file, '-o', out_file,\n '-k', 'key.pem', '-s', hash_alg]\n subprocess.check_call(cmd)\n\n cmd = ['python', SIIPSIGN, 'verify', '-i', out_file,\n '-p', 'key.pub.pem', '-s', hash_alg]\n subprocess.check_call(cmd)\n\n cmd = ['python', SIIPSIGN, 'decompose', '-i', out_file]\n subprocess.check_call(cmd)\n\n def test_signing_rsa3k(self):\n '''Test signing'''\n\n hash_options = ['sha256', 'sha384', 'sha512']\n pld_file = 'payload.bin'\n out_file = 'signed.bin'\n\n with open(pld_file, 'wb') as pld:\n pld.write(os.urandom(1024*1024))\n\n # Create a new test RSA key\n cmd = ['openssl', 'genrsa', '-out', 'key.pem', '3072']\n subprocess.check_call(cmd)\n\n # Get public key from test key\n cmd = ['openssl', 'rsa', '-pubout', '-in',\n 'key.pem', '-out', 'key.pub.pem']\n subprocess.check_call(cmd)\n\n for hash_alg in hash_options:\n cmd = ['python', SIIPSIGN, 'sign', '-i', pld_file, '-o', out_file,\n '-k', 'key.pem', '-s', hash_alg]\n subprocess.check_call(cmd)\n\n cmd = ['python', SIIPSIGN, 'verify', '-i', out_file,\n '-p', 'key.pub.pem', '-s', hash_alg]\n subprocess.check_call(cmd)\n\n cmd = ['python', SIIPSIGN, 'decompose', '-i', out_file]\n subprocess.check_call(cmd)\n\n def test_key_size_too_small(self):\n '''Test with signing key with size smaller than 2048 bit'''\n\n pld_file = 'payload.bin'\n out_file = 'signed.bin'\n\n with open(pld_file, 'wb') as pld:\n pld.write(os.urandom(1024*1024))\n\n # Create a new test RSA key\n cmd = ['openssl', 'genrsa', '-out', 'key.pem', '1024']\n subprocess.check_call(cmd)\n\n cmd = ['python', SIIPSIGN, 'sign', '-i', pld_file, '-o', out_file,\n '-k', 'key.pem', '-s', 'sha384']\n with self.assertRaises(subprocess.CalledProcessError) as cm:\n subprocess.check_call(cmd)\n self.assertEqual(cm.exception.returncode, 1)\n\n def test_fkm_subcommand(self):\n '''Test FKM generation and verify subcommand'''\n\n hash_options = ['sha256', 'sha384', 'sha512']\n out_file = 'fkm_only.bin'\n fkm_key = ('key1.pem', 'key1.pub.pem')\n pld_key = ('key2.pem', 'key2.pub.pem')\n\n # Create test keys\n for priv, pub in [fkm_key, pld_key]:\n cmd = ['openssl', 'genrsa', '-out', priv, '3072']\n subprocess.check_call(cmd)\n\n cmd = ['openssl', 'rsa', '-pubout', '-in', priv, '-out', pub]\n subprocess.check_call(cmd)\n\n for hash_alg in hash_options:\n cmd = ['python', SIIPSIGN, 'fkmgen',\n '-k', fkm_key[0],\n '-p', pld_key[1], # Use public key\n '-s', hash_alg,\n '-o', out_file]\n print(\" \".join(cmd))\n subprocess.check_call(cmd)\n\n cmd = ['python', SIIPSIGN, 'fkmcheck', '-i', out_file,\n '-p', fkm_key[1],\n '-t', pld_key[1]] # pubkey\n subprocess.check_call(cmd)\n\n cmd = ['python', SIIPSIGN, 'decompose', '-i', out_file]\n subprocess.check_call(cmd)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"siiptool/tests/test_siip_sign.py","file_name":"test_siip_sign.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"544157156","text":"# *-* encoding: utf-8 *-*\nimport argparse,json,os,sys,csv,datetime,re\n\ndef extractFollowerNetw(filein,fileout):\n counter_sk = 0\n counter_nofr = 0\n counter_nofl = 0\n counter_notw = 0\n counter_nousr = 0\n counter_line = 0\n counter_ok = 0\n\n writer = csv.writer(fileout,delimiter=',')\n writer.writerow(['ts','uid','friends_count','followers_count'])\n\n for line in filein.readlines():\n counter_line += 1\n if counter_line % 10000 == 0:\n print(counter_line,'lines treated')\n try:\n tw = json.loads(line)\n except:\n counter_sk += 1\n continue\n if 'twitter' not in tw.keys():\n counter_notw += 1\n continue\n\n d = [int(i) for i in re.findall(r'[0-9]+',tw['twitter']['created_at'])]\n dt = datetime.datetime(*d[0:6])\n ts = dt.timestamp()\n try:\n uid = int(tw['twitter']['user']['id'])\n except:\n counter_nousr +=1\n continue\n if 'friends_count' not in tw['twitter']['user'].keys():\n nfr = 0\n counter_nofr += 1\n else:\n nfr = int(tw['twitter']['user']['friends_count'])\n\n if 'followers_count' not in tw['twitter']['user'].keys():\n nfl = 0\n else:\n nfl = tw['twitter']['user']['followers_count']\n counter_ok +=1\n writer.writerow([ts,uid,nfr,nfl])\n\n print('Stats for file',filein.name)\n print('Total lines:\\t\\t',counter_line)\n print('Parsed correctly:\\t',counter_ok)\n print('No json:\\t\\t',counter_sk)\n print('No tweet:\\t\\t',counter_notw)\n print('No users:\\t\\t',counter_nousr)\n print('No friends:\\t\\t',counter_nofr)\n print('No followers:\\t',counter_nofl)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i','--input',\n type=argparse.FileType('r'),\n default=sys.stdin,\n help=\"Input file, tweets, json format\")\n parser.add_argument('-o','--output',\n type=argparse.FileType('w'),\n default=sys.stdout,\n help=\"Output file, csv, \")\n\n args = parser.parse_args()\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n extractFollowerNetw(args.input,args.output)\n\n args.input.close()\n args.output.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"version_20160401.dir/linkprediction.dir/preliminarystudy.dir/test_sim_june_month.dir/followernetwork.dir/extract_users_friends_followers.py","file_name":"extract_users_friends_followers.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"502812787","text":"import time\nfrom flask import Flask, jsonify\nimport random\napp = Flask(__name__)\nlist0=['motor0','motor1','motor2','motor3','sensor0','sensor1','sensor2','sensor3']\nlist1=['motor4','motor5','motor6','sensor4','sensor5','sensor6','sensor7']\ndef random_s():\n dic = {}\n for i in range(8):\n dic[str(list0[i])] = str(random.randrange(2))\n return dic\n\ndef random_n():\n dic = {}\n for i in range(7):\n dic[str(list1[i])] = str(random.randrange(2))\n return dic\n\n@app.route('/s/status/')\ndef status_s():\n d=random_s()\n return jsonify(d)\n\n@app.route('/n/status/')\ndef status_n():\n d=random_n()\n return jsonify(d)\n\nif __name__ == '__main__':\n app.run(port=5000)\n\n","sub_path":"table_roller/Lines.py","file_name":"Lines.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"260013784","text":"'''\nCreated on 23 Dec 2016\n\n@author: chrisdoherty\n'''\n\nfrom Detection import Detection\nfrom threading import Lock\n\n# Manages new detections using the MotionSensor.\nclass DetectionManager():\n # the current detection\n _detection = None\n \n # Lock to stop the detection from restarting.\n _lock = Lock()\n \n # Used when a detection finishes. Should accept 1 argument - the detection.\n new_detection = None\n \n # Initialises internal variables.\n def __init__(self):\n print(\"DetectionManager: initialised\")\n \n # Begins a new detection based on the MotionSensor object.\n def start_detection(self):\n print(\"DetectionManager: start detection\")\n self._lock.acquire()\n self._detection = Detection()\n self._detection.start()\n self._detection.stop()\n \n if self.new_detection != None:\n self.new_detection(self._detection)\n\t\n self._detection = None\n self._lock.release()\n print(\"DetectionManager: start deteciton function finished\")\n \n # Ends a detection and passes it to the specified callback\n def stop_detection(self):\n print(\"DetectionManager: detection ended\")\n","sub_path":"src/app/DetectionManager.py","file_name":"DetectionManager.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"331210762","text":"from lib.constants import Constants\nfrom lib.commonConnection.commonConnection import CommonConnection\nfrom lib.logger.logger import Logger\nfrom lib.helpers.fileHelper import FileHelper\nfrom lib.serverConnection.queueHandler import QueueHandler\nimport random\nimport os\nimport socket\n\n\nclass Connection:\n def startCommunicating(s, fs, sPath, queue, recvMsg, cont, v, q, lr):\n s.settimeout(1.0)\n while True:\n r = random.random()\n try:\n data, addr = s.recvfrom(Constants.bytesChunk())\n if r >= lr:\n mode = data[0:1]\n if mode.decode() == 'A':\n msg = data.decode()\n qMsg = msg + '-' + str(addr[0]) + '-' + str(addr[1])\n recvMsg[qMsg] = True\n Connection.process(s, fs, data, addr, sPath, queue, v, q)\n except socket.timeout:\n close = cont.get('exit', False)\n if close:\n break\n except Exception:\n break\n return\n\n def process(s, f, msg, addr, pth, queue, v, q):\n mode = msg[0:1].decode()\n data = msg[1:]\n if mode == Constants.uploadProtocol():\n Connection.startUpload(s, f, data, addr, pth, queue, v, q)\n elif mode == Constants.downloadProtocol():\n Connection.startDownload(s, f, data, addr, pth, queue, v, q)\n elif mode == Constants.errorProtocol():\n Connection.processError(s, f, data, addr, v, q)\n elif mode == Constants.endProtocol():\n Connection.processEnd(s, f, data, addr, v, q)\n elif mode == Constants.ackProtocol():\n Connection.processAck(s, f, data, addr, queue, v, q)\n elif mode == Constants.fileTransferProtocol():\n Connection.processTransfer(s, f, data, addr, queue, v, q)\n return\n\n def startUpload(s, files, msg, addr, sPath, msgQueue, verbose, quiet):\n message = msg.decode()\n try:\n file = open(sPath+message, \"wb\")\n Logger.logIfVerbose(verbose, \"File \" + message + \" opened\")\n files[message] = file\n CommonConnection.sendACK(s, addr[0], addr[1], 'U', message, 0)\n except OSError:\n Logger.log(\"Error opening file \" + message)\n msg = CommonConnection.sendError(s, message, addr[0], addr[1])\n msgQueue.put(QueueHandler.makeSimpleExpected(msg, addr))\n return\n return\n\n def processTransfer(s, files, data, addr, msgQueue, v, q):\n values = data[0: Constants.maxHeaderTransProtocolSize() - 1].decode()\n separatorPossition = values.find(';')\n fname = values[0:separatorPossition]\n processedData = values[separatorPossition+1:]\n separatorPossition = processedData.find(';')\n bytesRecv = int(processedData[0:separatorPossition])\n msg = data[Constants.maxHeaderTransProtocolSize() - 1:]\n try:\n f = files[fname]\n Connection.upload(s, f, fname, bytesRecv, msg, addr, v, q)\n except OSError as e:\n print(e)\n Logger.logIfNotQuiet(q, \"Error processing transfer\")\n msg = CommonConnection.sendError(s, fname, addr[0], addr[1])\n msgQueue.put(QueueHandler.makeSimpleExpected(msg, addr))\n return\n except Exception:\n return\n\n def startDownload(s, files, data, addr, sPath, msgQueue, v, q):\n filename = data.decode()\n try:\n file = open(sPath+filename, \"rb\")\n files[filename] = file\n data = file.read(Constants.getMaxReadSize())\n h = addr[0]\n p = addr[1]\n Logger.logIfVerbose(v, filename\n + \" begins to be sent to the client \" +\n str(addr))\n msg = CommonConnection.sendMessage(s, h, p, filename, data, 0)\n msgQueue.put(QueueHandler.makeMessageExpected(msg, addr))\n except Exception:\n Logger.log(\"Error opening file \" + filename)\n msg = CommonConnection.sendError(s, filename, addr[0], addr[1])\n msgQueue.put(QueueHandler.makeSimpleExpected(msg, addr))\n return\n return\n\n def processError(s, files, data, addr, v, q):\n filename = data.decode()\n try:\n files[filename].close()\n CommonConnection.sendACK(s, addr[0], addr[1], 'F', filename, 0)\n except Exception:\n Logger.log(\"Error processing error\")\n return\n return\n\n def processEnd(s, files, data, addr, v, q):\n filename = data.decode()\n try:\n files[filename].close()\n CommonConnection.sendACK(s, addr[0], addr[1], 'E', filename, 0)\n Logger.log(\"File \" + filename + \" was successfully stored\")\n except Exception:\n Logger.log(\"Error processing end file\")\n return\n return\n\n def processAck(s, files, msg, addr, queue, v, q):\n data = msg.decode()\n md = data[0]\n processedData = data[1:]\n if md == Constants.endProtocol():\n separatorPossition = processedData.find(';')\n fname = processedData[0:separatorPossition]\n Logger.log(\"File \" + fname + \" was successfully sent\")\n return\n elif md == Constants.errorProtocol():\n return\n elif md == Constants.fileTransferProtocol():\n separatorPossition = processedData.find(';')\n fname = processedData[0:separatorPossition]\n bRecv = int(processedData[separatorPossition+1:])\n f = files[fname]\n return Connection.download(s, f, fname, bRecv, addr, queue, v, q)\n return\n\n def download(s, f, fname, br, addr, msgQueue, v, q):\n f.seek(br, os.SEEK_SET)\n data = f.read(Constants.getMaxReadSize())\n if len(data) == 0:\n Logger.logIfVerbose(v, \"Sending EndFile to client: \" + str(addr))\n msg = CommonConnection.sendEndFile(s, addr[0], addr[1], fname, 0)\n msgQueue.put(QueueHandler.makeSimpleExpected(msg, addr))\n else:\n h = addr[0]\n port = addr[1]\n Logger.logIfVerbose(v, \"Sending \" + str(br)\n + \" bytes to client: \" + str(addr))\n msg = CommonConnection.sendMessage(s, h, port, fname, data, br)\n msgQueue.put(QueueHandler.makeMessageExpected(msg, addr))\n return\n\n def upload(s, f, fname, bytesRecv, msg, addr, v, q):\n filesize = FileHelper.getFileSize(f)\n if bytesRecv == filesize:\n Logger.logIfVerbose(v, \"Recieved: \" + str(bytesRecv) + \" from \" +\n str(addr))\n f.seek(bytesRecv, os.SEEK_SET)\n Logger.logIfVerbose(v, \"Writing file \" + fname)\n f.write(msg)\n filesize = FileHelper.getFileSize(f)\n CommonConnection.sendACK(s, addr[0], addr[1], 'T', fname, filesize)\n elif bytesRecv < filesize:\n CommonConnection.sendACK(s, addr[0], addr[1], 'T', fname,\n bytesRecv + Constants.getMaxReadSize())\n return\n","sub_path":"src/lib/serverConnection/serverConnection.py","file_name":"serverConnection.py","file_ext":"py","file_size_in_byte":7232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"50204498","text":"# metabolic_robustness/utils/normdist.py\n\nfrom scipy.stats import truncnorm\nfrom scipy.stats import norm\nfrom math import sqrt\n\n\ndef truncnorm_pdf(x, mean, var, min, max):\n a, b = _truncnorm_params_transform(mean, var, min, max)\n val = truncnorm.pdf(\n x, a, b,\n loc = mean,\n scale = sqrt(var),\n )\n return(val)\n\n\ndef truncnorm_sample(mean, var, min, max):\n a, b = _truncnorm_params_transform(mean, var, min, max)\n val = truncnorm.rvs(\n a, b,\n loc = mean,\n scale = sqrt(var),\n )\n return(val)\n\n\ndef _truncnorm_params_transform(mean, var, min, max):\n a = (min - mean) / sqrt(var)\n b = (max - mean) / sqrt(var)\n return(a, b)\n\n\ndef norm_pdf(x, mean, var):\n val = norm.pdf(x, loc=mean, scale=sqrt(var))\n return(val)\n\n\ndef norm_sample(mean, var):\n val = norm.rvs(\n loc = mean,\n scale = sqrt(var)\n )\n return(val)\n","sub_path":"utils/normdist.py","file_name":"normdist.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"635632038","text":"#!/usr/bin/env python2\n\nimport argparse\nimport os\nimport subprocess\nimport psycopg2\nimport psycopg2.extras\nimport time\nimport logging\nimport threading\nfrom Queue import Queue\nfrom multiprocessing import cpu_count\nargs = None\nworkers = []\nqueue = Queue()\nerr_count = 0\n\n\ndef executeOnDB(sql, params=None, dbname='postgres'):\n conn = psycopg2.connect(host=args.host, port=args.port, dbname=dbname, user=args.username)\n conn.autocommit = True\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(sql, params)\n if cur.statusmessage.startswith('SELECT') or cur.description:\n return cur.fetchall()\n else:\n return [{'rows_affected': str(cur.rowcount)}]\n\ndef shell_exec_with_output(commands, ok_code=0):\n process = subprocess.Popen(commands, stdout=subprocess.PIPE, shell=True, stderr=subprocess.STDOUT)\n exitcode = process.wait()\n output = process.stdout.read().strip()\n if exitcode != ok_code:\n logging.error('Error executing: %s', commands)\n logging.error(output)\n return exitcode, output\n\ndef workerProcess(id):\n \"\"\"Takes tables from the queue if any, and pg_dumps the table to /dev/null\"\"\"\n logging.info('Starting worker process %s ...', id)\n while True:\n dbname, tbl = queue.get()\n cmd = '{pg_dump} -h {host} -p {port} -U \"{user}\" -t {tbl} \"{dbname}\" >/dev/null'.format(\n pg_dump=os.path.join(args.bindir, 'pg_dump'), host=args.host, port=args.port, user=args.username, tbl=tbl, dbname=dbname)\n logging.info('Executing %s', cmd)\n retcode, output = shell_exec_with_output(cmd)\n if retcode != 0:\n if output.find(\"No matching tables were found\") >= 0:\n logging.warning(\"Table %s could not be found [%s]\", tbl, dbname)\n continue\n logging.error(\"Failed to dump contents of table %s [%s]\", tbl, dbname)\n global err_count\n err_count += 1\n return\n\n\ndef launchWorkers():\n logging.info('Launching %s worker processes', args.jobs)\n\n for i in range(args.jobs):\n t = threading.Thread(target=workerProcess, args=(i,))\n t.daemon = True\n t.start()\n workers.append(t)\n\n\ndef verifyGlobals():\n logging.info('Dumping globals with pg_dumpall...')\n cmd = '{pg_dumpall} -g -h {host} -p {port} -U \"{user}\" >/dev/null'.format(\n pg_dumpall=os.path.join(args.bindir, 'pg_dumpall'), host=args.host, port=args.port, user=args.username)\n logging.info('Executing %s', cmd)\n retcode, output = shell_exec_with_output(cmd)\n if retcode != 0:\n raise Exception('Failed to verify globals')\n\n\ndef verifySchema(dbname):\n logging.info('Dumping schema with pg_dump --schema-only for %s', dbname)\n cmd = '{pg_dump} -h {host} -p {port} -U \"{user}\" --schema-only \"{dbname}\" >/dev/null'.format(\n pg_dump=os.path.join(args.bindir, 'pg_dump'), host=args.host, port=args.port, user=args.username, dbname=dbname)\n logging.info('Executing %s', cmd)\n retcode, output = shell_exec_with_output(cmd)\n if retcode != 0:\n raise Exception('Failed to verify schema for DB ' + dbname)\n\n\ndef addTablesFromDB(dbname):\n logging.info('Processing DB: %s', dbname)\n sql = \"\"\"select quote_ident(nspname)||'.'||quote_ident(relname) as tbl\n from pg_class c\n join pg_namespace n on n.oid = c.relnamespace \n where relkind = 'r'\n and relpersistence = 'p'\n and not nspname like any(array['information_schema', E'pg\\\\_%'])\n order by relpages desc\"\"\"\n tables = executeOnDB(sql, dbname=dbname)\n tables = [x['tbl'] for x in tables]\n if tables:\n for t in tables:\n queue.put((dbname, t))\n logging.info('Added %s tables to the queue', len(tables))\n else:\n logging.info('No tables found')\n return len(tables)\n\n\ndef allWorkersAlive():\n for w in workers:\n if not w.is_alive():\n return False\n return True\n\n\ndef main():\n argp = argparse.ArgumentParser(description='A helper to dump all dbs/tables to /dev/null in parallel.'\n 'Meant to be used on replicas where parallel pg_dump cannot be used (pre PG 10) to validate data files integrity.'\n 'NB! Integrity is still not fully guaranteed with this approach (no snapshot, no constraint/index validation)', add_help=False)\n\n argp.add_argument('--help', help='Show help', action='help')\n argp.add_argument('-b', '--bindir', help='Postgres binaries folder', required=True)\n argp.add_argument('-h', '--host', help='PG host. IP or unix socket', required=True)\n argp.add_argument('-p', '--port', help='PG port', default=5432, type=int)\n argp.add_argument('-U', '--username', help='PG user', default=os.getenv('USER')) # password is assumed to be in .pgpass\n\n argp.add_argument('-j', '--jobs', help='Max parallel processes to use. Default is count of CPUs/2',\n default=max(cpu_count()/2, 1), type=int, metavar=max(cpu_count()/2, 1))\n argp.add_argument('-d', '--dbname', help='Test only a single DB')\n argp.add_argument('-q', '--quiet', help='Only errors', action='store_true')\n\n global args\n args, unknown_args = argp.parse_known_args()\n\n logging.basicConfig(level=(logging.ERROR if args.quiet else logging.INFO), format='%(asctime)s (%(levelname)s) PID=%(process)d: %(message)s')\n logging.info('Args: %s, unknown_args: %s', args, unknown_args)\n\n if not os.path.exists(os.path.join(args.bindir, 'pg_dumpall')):\n raise Exception('Invalid BINDIR! Could not find pg_dumpall')\n\n dbs = executeOnDB('select datname from pg_database where not datistemplate and datallowconn order by datname', dbname='template1')\n dbs = [x['datname'] for x in dbs]\n logging.info('dbs found: %s', dbs)\n\n if args.dbname:\n if args.dbname not in dbs:\n raise Exception('DB not found: ' + args.dbname)\n dbs = [args.dbname]\n\n if not args.dbname:\n verifyGlobals()\n\n launchWorkers()\n\n tables_added = 0\n\n for db in dbs:\n verifySchema(db)\n table_count = addTablesFromDB(db)\n tables_added += table_count\n\n if tables_added == 0:\n raise Exception('No tables found to be dumped!')\n\n logging.info('Waiting for %s tables to be pg_dumped...', tables_added)\n i = 0\n while not queue.empty():\n if not allWorkersAlive():\n logging.error('Not all worker processes are alive. Exiting')\n exit(1)\n time.sleep(5)\n i += 5\n if i % 60 == 0: # progress reporting\n logging.info(\"%s tables in the queue...\", queue.qsize())\n\n if err_count == 0:\n logging.info(\"Done. No errors encountered\")\n else:\n logging.info(\"Errors encountered\")\n exit(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"quick_verify_threads.py","file_name":"quick_verify_threads.py","file_ext":"py","file_size_in_byte":6925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"537462055","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nData Provider\n\nDPED Dataset see:http://people.ee.ethz.ch/~ihnatova/\n\n\"\"\"\nimport os\nimport numpy as np\nfrom PIL import Image\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset, DataLoader\n\n\nto_tensor = transforms.Compose([\n transforms.ToTensor()\n])\n\n\nclass DPEDDataset(Dataset):\n\n def __init__(self, data_dir, phone='iphone'):\n\n self.phone_dir = data_dir + phone + '/'\n self.camera = data_dir+'canon/'\n\n self.num_images = len([name for name in os.listdir(self.phone_dir)\n if os.path.isfile(os.path.join(self.phone_dir, name))])\n\n # Load Data\n self.input_images = []\n self.target_images = []\n for i in xrange(self.num_images):\n input_ = Image.open(self.phone_dir+'%s.jpg' % i)\n target_ = Image.open(self.camera+'%s.jpg' % i)\n\n self.input_images.append(to_tensor(input_))\n self.target_images.append(to_tensor(target_))\n\n def __len__(self):\n return self.num_images\n\n def __getitem__(self, idx):\n\n return self.input_images[idx], self.target_images[idx]\n\n\n","sub_path":"data/data_provider.py","file_name":"data_provider.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"285794438","text":"import argparse\nimport json\nimport pprint\nimport sys\n\nimport numpy\nimport pandas as pd\n\nfrom pathlib import Path\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, numpy.integer):\n return int(obj)\n elif isinstance(obj, numpy.floating):\n return float(obj)\n elif isinstance(obj, numpy.ndarray):\n return obj.tolist()\n else:\n return super(MyEncoder, self).default(obj)\n\nname_map = {\n 'ACCURACY': 'accuracy',\n 'PRECISION': 'precision',\n 'RECALL': 'recall',\n 'F1': 'f1',\n 'F1_MICRO': 'f1Micro',\n 'F1_MACRO': 'f1Macro',\n 'ROC_AUC': 'rocAuc',\n 'ROC_AUC_MICRO': 'rocAucMicro',\n 'ROC_AUC_MACRO': 'rocAucMacro',\n 'MEAN_SQUARED_ERROR': 'meanSquaredError',\n 'ROOT_MEAN_SQUARED_ERROR': 'rootMeanSquaredError',\n 'MEAN_ABSOLUTE_ERROR': 'meanAbsoluteError',\n 'R_SQUARED': 'rSquared',\n 'NORMALIZED_MUTUAL_INFORMATION': 'normalizedMutualInformation',\n 'JACCARD_SIMILARITY_SCORE': 'jaccardSimilarityScore',\n 'PRECISION_AT_TOP_K': 'precisionAtTopK',\n 'OBJECT_DETECTION_AVERAGE_PRECISION': 'objectDetectionAP',\n 'HAMMING_LOSS': 'hammingLoss'\n}\n\ndef to_dict(row):\n result = dict(row)\n result['metric'] = name_map[result['metric']]\n return result\n\ndef add_score(score_dir, info_dir):\n print(score_dir)\n for score_csv in score_dir.glob('*.csv'):\n print(score_csv)\n score_df = pd.read_csv(score_csv)\n score = [to_dict(score_df.iloc[i, :]) for i in range(score_df.shape[0])]\n info_file = info_dir / Path(score_csv.stem).with_suffix('.json')\n if info_file.exists():\n with open(info_file, 'r') as fp:\n info = json.load(fp)\n info['score'] = score\n with open(info_file, 'w') as fp:\n json.dump(info, fp, indent=4, cls=MyEncoder)\n else:\n print('Missing info file:', info_file)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Add score to pipeline info\",\n )\n parser.add_argument('score_dir', help=\"Directory containing *.score.csv files\")\n parser.add_argument('info_dir', help=\"Directory containing *.json info files\")\n\n args = parser.parse_args()\n score_dir = Path(args.score_dir)\n info_dir = Path(args.info_dir)\n\n if not score_dir.exists():\n print('Missing score dir:', score_dir)\n sys.exit(1)\n\n if not info_dir.exists():\n print('Missing score dir:', info_dir)\n sys.exit(1)\n\n add_score(score_dir, info_dir)\n","sub_path":"python/script/add_score.py","file_name":"add_score.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"125565990","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport gym\nimport time\nfrom spinup.algos.pytorch.ppo.ppo import PPOBuffer\nfrom spinup.utils.logx import EpochLogger\n\n\ndef mlp(sizes, activation=nn.ReLU, output_activation=nn.Identity):\n layers = []\n for i in range(len(sizes) - 1):\n act = output_activation if i == len(sizes) - 2 else activation\n layers += [nn.Linear(sizes[i], sizes[i+1]), act()]\n return nn.Sequential(*layers)\n\nclass GaussianPolicy:\n def __init__(self, action_space, observation_space, hidden_sizes):\n self.observation_space = observation_space\n self.action_space = action_space\n obs_dim = observation_space.shape[0]\n if isinstance(action_space, gym.spaces.Box):\n self.act_dim = self.action_space.shape[0]\n self.actor_net = mlp(sizes = [obs_dim] + hidden_sizes + [self.act_dim], output_activation=nn.Identity)\n elif isinstance(action_space, gym.spaces.Discrete):\n raise NotImplementedError\n log_std = -0.5 * np.ones(self.act_dim, dtype=np.float32)\n self.log_std = torch.nn.Parameter(torch.as_tensor(log_std))\n\n def get_policy(self, o):\n mus = self.actor_net(torch.as_tensor(o, dtype=torch.float32))\n # mus = outs[:,:self.act_dim]\n # log_sigmas = outs[:,self.act_dim:]\n # sigmas = 0.1 * torch.as_tensor(np.ones(self.act_dim, dtype=np.float32))\n sigmas = torch.exp(self.log_std)\n return torch.distributions.Normal(mus, sigmas)\n\n def get_logp(self, o, a):\n return self.get_policy(o).log_prob(a).sum(axis=-1)\n \n def act(self, o):\n return self.get_policy(o).sample()\n\nclass CategoricalPolicy:\n def __init__(self, action_space, observation_space, hidden_sizes):\n self.observation_space = observation_space\n self.action_space = action_space\n obs_dim = observation_space.shape[0]\n if isinstance(action_space, gym.spaces.Box):\n raise NotImplementedError\n elif isinstance(action_space, gym.spaces.Discrete):\n act_dim = self.action_space.n\n self.actor_net = mlp(sizes = [obs_dim] + hidden_sizes + [act_dim], output_activation=nn.Softmax)\n\n def get_policy(self, o):\n probs = self.actor_net(torch.as_tensor(o, dtype=torch.float32))\n return torch.distributions.Categorical(probs=probs)\n\n def get_logp(self, o, a):\n return self.get_policy(o).log_prob(a)\n \n def act(self, o):\n return self.get_policy(o).sample()\n\nclass ValueNet(nn.Module):\n def __init__(self, obs_dim, hidden_sizes, activation=nn.Tanh):\n super().__init__()\n assert len(obs_dim) == 1 # Only support 1D input\n obs_len = obs_dim[0]\n self.v_net = mlp(sizes = [obs_len] + hidden_sizes + [1], activation=activation)\n\n def forward(self, obs):\n return torch.squeeze(self.v_net(obs), -1)\n\ndef my_ppo(env_fn, seed=0, steps_per_epoch=4000, epochs=50, max_ep_len=1000,\n hidden_sizes=[64,64], clip_ratio=0.2, train_pi_iters=80, train_v_iters=80,\n logger_kwargs=dict(), save_freq=10,\n pi_lr=3e-4, vf_lr=1e-3,\n gamma=0.99, lam=0.97, target_kl=0.01):\n \"\"\"\n My PPO implementation\n\n Args:\n env_fn : A function which creates a copy of the environment.\n The environment must satisfy the OpenAI Gym API.\n\n seed (int): Seed for random number generators.\n\n steps_per_epoch (int): Number of steps of interaction (state-action pairs) \n for the agent and the environment in each epoch.\n\n epochs (int): Number of epochs of interaction (equivalent to\n number of policy updates) to perform.\n\n max_ep_len (int): Maximum length of trajectory / episode / rollout.\n\n logger_kwargs (dict): Keyword args for EpochLogger.\n\n gamma (float): Discount factor. (Always between 0 and 1.)\n\n lam (float): Lambda for GAE-Lambda. (Always between 0 and 1,\n close to 1.)\n\n target_kl (float): Roughly what KL divergence we think is appropriate\n between new and old policies after an update. This will get used \n for early stopping. (Usually small, 0.01 or 0.05.)\n\n \"\"\"\n\n # Set up logger and save configuration\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n # Random seed\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n # Instantiate environment\n env = env_fn()\n obs_dim = env.observation_space.shape\n act_dim = env.action_space.shape\n print(\"env.observation_space\", env.observation_space)\n print(\"env.observation_space.shape\", env.observation_space.shape)\n print(\"env.action_space\", env.action_space)\n\n # Set up experience buffer\n # buf = Buffer()\n buf = PPOBuffer(obs_dim, act_dim, steps_per_epoch, gamma, lam)\n\n # Instantiate policy\n if isinstance(env.action_space, gym.spaces.Box):\n policy = GaussianPolicy(env.action_space, env.observation_space, hidden_sizes)\n elif isinstance(env.action_space, gym.spaces.Discrete):\n policy = CategoricalPolicy(env.action_space, env.observation_space, hidden_sizes)\n pi_optimizer = torch.optim.Adam(policy.actor_net.parameters(), lr=pi_lr)\n\n value_net = ValueNet(obs_dim, hidden_sizes)\n vf_optimizer = torch.optim.Adam(value_net.v_net.parameters(), lr=vf_lr)\n\n # Set up model saving\n logger.setup_pytorch_saver(policy)\n # TODO: Save value network as well\n\n # Prepare for interaction with environment\n start_time = time.time()\n o, ep_ret, ep_len = env.reset(), 0, 0\n\n # Main loop: collect experience in env and update/log each epoch\n for epoch in range(epochs):\n with torch.no_grad():\n for t in range(steps_per_epoch):\n a = policy.act(torch.tensor(o, dtype=torch.float32).unsqueeze(0))\n logp = policy.get_logp(o, a)\n a = a.numpy()[0] # Remove batch dimension\n v = value_net(torch.tensor(o, dtype=torch.float32).unsqueeze(0))\n\n next_o, r, d, _ = env.step(a)\n ep_ret += r\n ep_len += 1\n\n # buf.append(o, a, r, next_o)\n buf.store(o, a, r, v, logp)\n logger.store(VVals=v)\n\n # Update obs (critical!)\n o = next_o\n\n timeout = ep_len == max_ep_len\n terminal = d or timeout\n epoch_ended = t==steps_per_epoch-1\n\n if terminal or epoch_ended:\n if epoch_ended and not(terminal):\n print('Warning: trajectory cut off by epoch at %d steps.'%ep_len, flush=True)\n if timeout or epoch_ended:\n v = value_net(torch.tensor(o, dtype=torch.float32).unsqueeze(0))\n # _, v, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))\n else:\n v = 0\n # buf.fill_episode_returns(ep_ret)\n buf.finish_path(v)\n if terminal:\n # only save EpRet / EpLen if trajectory finished\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n o, ep_ret, ep_len = env.reset(), 0, 0\n\n\n # Save model\n if (epoch % save_freq == 0) or (epoch == epochs-1):\n logger.save_state({'env': env}, None)\n\n # Perform PPO update!\n def update():\n # o, a, r, next_o, R = buf.get()\n data = buf.get()\n\n # Policy function update\n o, a, advantage, logp_a_prev = data['obs'], data['act'], data['adv'], data['logp']\n # advantage = R\n for i in range(train_pi_iters):\n pi_optimizer.zero_grad()\n\n # First term: Ratio * A\n logp_a = policy.get_logp(o, a)\n ratio = torch.exp(logp_a - logp_a_prev)\n term_1 = ratio * advantage\n # Second term: clip(Ratio, 1 - e, 1 + e) * A\n term_2 = torch.clamp(ratio, 1 - clip_ratio, 1 + clip_ratio) * advantage\n # Maximize expectation of min\n policy_loss = -(torch.min(term_1, term_2)).mean()\n\n # Calculate KL-div\n kl = (logp_a_prev - logp_a).mean().item()\n # Maybe early stop, if KL is large\n if kl > 1.5 * target_kl:\n logger.log('Early stopping at step %d due to reaching max kl.'%i)\n break\n\n policy_loss.backward()\n pi_optimizer.step()\n \n # Value function update\n o, R = data['obs'], data['ret']\n for i in range(train_v_iters):\n vf_optimizer.zero_grad()\n R_pred = value_net(torch.tensor(o, dtype=torch.float32))\n assert R_pred.shape == R.shape\n value_loss = ((R_pred - R)**2).mean()\n value_loss.backward()\n vf_optimizer.step()\n\n update()\n\n # Log info about epoch\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('TotalEnvInteracts', (epoch+1)*steps_per_epoch)\n logger.log_tabular('Time', time.time()-start_time)\n logger.dump_tabular()\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='HalfCheetah-v2')\n parser.add_argument('--seed', '-s', type=int, default=0)\n parser.add_argument('--steps', type=int, default=4000)\n parser.add_argument('--epochs', type=int, default=50)\n parser.add_argument('--train-pi-iters', type=int, default=80)\n parser.add_argument('--train-v-iters', type=int, default=80)\n parser.add_argument('--exp_name', type=str, default='my_ppo')\n args = parser.parse_args()\n\n from spinup.utils.run_utils import setup_logger_kwargs\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n my_ppo(lambda : gym.make(args.env), seed=args.seed, \n steps_per_epoch=args.steps, epochs=args.epochs,\n train_pi_iters=args.train_pi_iters,\n train_v_iters=args.train_v_iters,\n logger_kwargs=logger_kwargs)","sub_path":"spinup/algos/pytorch/my_ppo/my_ppo.py","file_name":"my_ppo.py","file_ext":"py","file_size_in_byte":10280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"283815581","text":"import numpy as np\n\n\nclass DFS8:\n \"\"\"\n #Clase para la obtencion de vectores y condiciones necesarias para la implementaciond de \n #el algoritmo de ruta DFS. Los vectores se implementan de la sigujiente manera:\n #Vector n(i,j) vector de posibles posiciones (Se ignoran valores con -1)\n #VectorA a(i,j) vector de adyancencia, posibles vecinos que se pueden ocupar, los visitados no cuadran\n #VectorV v(i,j) vector de posiciones visitadas. Cuando posiciones visitadas = vector n se recorrio toda la matriz\n #VectorP p(i,j) vector padre. Vector que guarda las ultimas pociciones visitadas, se usa apra recorrer a la \n #inversar la matriz en caso de querer buscar una semilla\n #Vector semilla s(i,j) cuando |a(i,j)|>1 quiere decir que es un punto de desicion, es decir una rama de el arbol de expansion\n #VectorR ruta(i,j) guarda toda la ruta. Este es el resultado es decir la ruta.\n Se recorre en contra de las manecillas del reloj.\n \"\"\"\n matriz = []\n n = []\n v = []\n s = []\n columns = 0\n rows = 0\n\n \n\n def __init__(self,matriz,colums,rows):\n self.matriz = matriz\n self.columns = colums\n self.rows = rows\n self.n = []\n self.v = []\n self.s = []\n \n \n def getVectorN(self):\n \"\"\"\n Obtiene el vector N: El vector N son todas las posiciones que puede ocupar el robot.\n Solo se llama una sola vez \n \"\"\"\n\n\n for i in range(self.rows):\n for j in range(self.columns):\n if (self.matriz[i,j] != -1):\n self.n.append([i,j])\n \n \n def isCurrenPositionSeed(self,i,j):\n a =[]\n for element in self.n:\n if(element == [i -1,j]):\n a.append([i - 1 ,j])\n if(element == [i -1,j - 1]):\n a.append([i - 1 ,j - 1])\n if(element == [i , j - 1]):\n a.append([i, j - 1])\n if(element == [i + 1, j - 1]):\n a.append([i + 1, j - 1])\n if (element == [i + 1 , j]):\n a.append([i + 1,j])\n if (element == [i + 1 , j + 1]):\n a.append([i + 1,j + 1])\n if (element == [i, j + 1]):\n a.append([i,j + 1]) \n if (element == [i - 1, j + 1]):\n a.append([i - 1,j + 1]) \n\n for visited in self.v:\n try:\n a.remove(visited)\n except:\n pass\n if(len(a)>1):\n return True\n else:\n return False\n\n\n def getVectorA(self,i,j):\n \"\"\"\n Obtiene el vector A: El vector A se denomina vector Adyacencia, es decir, las posiciones a las cuales\n se puede mover el robot. Aqui se tiene en cuenta el vector de lugares ya visitados.\n @param i: Posicion i donde se desea obtener el vector adyacencia.\n @param j: Posicion j donde se desea obtener el vector adyacencia\n @return a: Vector adyecencia.\n \"\"\"\n a =[]\n for element in self.n:\n if(element == [i -1,j]):\n a.append([i - 1 ,j])\n if(element == [i -1,j - 1]):\n a.append([i - 1 ,j - 1])\n if(element == [i , j - 1]):\n a.append([i, j - 1])\n if(element == [i + 1, j - 1]):\n a.append([i + 1, j - 1])\n if (element == [i + 1 , j]):\n a.append([i + 1,j])\n if (element == [i + 1 , j + 1]):\n a.append([i + 1,j + 1])\n if (element == [i, j + 1]):\n a.append([i,j + 1]) \n if (element == [i - 1, j + 1]):\n a.append([i - 1,j + 1]) \n \n for visited in self.v:\n try:\n a.remove(visited)\n except:\n pass\n maximunValue = 0\n countOfSeed = 0\n for element in a:\n if self.matriz[element[0]][element[1]] > maximunValue:\n maximunValue = self.matriz[element[0]][element[1]]\n \n removeElement = []\n for elementA in a:\n prueba = self.matriz[elementA[0]][elementA[1]]\n if self.matriz[elementA[0]][elementA[1]] < maximunValue:\n removeElement.append(elementA)\n for element in removeElement:\n a.remove(element)\n try:\n a.remove(self.findStartAndGoal()[1])\n except:\n pass \n return a\n\n \n def addVisitedPosition(self,visited):\n \"\"\"\n Agrega una nueva posicion al vector de posiciones visitadas. Esto es para evaluar si una posicion ya esta\n visitada antes de llegar. La idea es agregar una posicion visitade en el lugar que se llega para mantener\n el vector de vistas actualizado a cada momento.\n @param visited: Posicion visitada para agregar al vector de lugares visitadas. \n \"\"\"\n for element in self.v:\n if (element == visited):\n return\n self.v.append(visited)\n \n \n def addNewSeed(self, seed):\n \"\"\"\n Agrega una nueva semilla. Es decir un lugar donde se ha hecho una desicion\n @param seed: La dupla de valores donde la posicion es una semilla\n \"\"\"\n self.s.append(seed) \n def getGetVectorS(self):\n \"\"\"\n Retorna el vectot de semillas\n \"\"\"\n return self.s\n \n def removeSeedInS(self):\n \"\"\"\n Eliminar la ultima semilla en el vector. Esto se hace a medidac que vamos hacia atras en caso de no tener mas \n posiciones que vistar.\n \"\"\"\n try:\n del self.s[-1]\n except:\n pass\n \n\n def findStartAndGoal(self):\n a = []\n for i in range(self.rows):\n for j in range(self.columns):\n if self.matriz[i,j] == 1:\n a.insert(0,[i,j])\n if self.matriz[i,j] == 2:\n a.insert(1,[i,j])\n return a\n \n\n def getNextPosition(self,a,currentPosition):\n \"\"\"\n Esta funcion retorna la siguiente posicion dandole un vector de adyacencia. Esto es para asegurar\n que se cumpla la prioridad anti horaria.\n @param a: Vector adyacencia.\n \"\"\"\n if len(a) > 1:\n for position in a:\n if (position[0] == (currentPosition[0] - 1) and position[1] == currentPosition[1]):\n return position\n for position in a:\n if (position[0] == (currentPosition[0] - 1) and position[1] == currentPosition[1] - 1):\n return position\n for position in a:\n if (position[0] == currentPosition[0] and position[1] == currentPosition[1] - 1):\n return position\n \n for position in a:\n if (position[0] == (currentPosition[0] + 1) and position[1] == currentPosition[1] - 1):\n return position\n for position in a:\n if (position[0] == currentPosition[0] + 1 and position[1] == currentPosition[1]):\n return position\n for position in a:\n if (position[0] == currentPosition[0] + 1 and position[1] == currentPosition[1] + 1):\n return position\n for position in a:\n if (position[0] == currentPosition[0] and position[1] == currentPosition[1] + 1):\n return position\n for position in a:\n if (position[0] == currentPosition[0] - 1 and position[1] == currentPosition[1] + 1):\n return position\n else:\n return a[0]\n \n \n\n def getCoverRouteWitSeed(self):\n currentPosition = self.findStartAndGoal()[0]\n parentVector = []\n covertura = []\n covertura.append(currentPosition)\n self.getVectorN()\n while (len(self.v) < (len(self.n) - 1)):\n self.addVisitedPosition(currentPosition)\n parentVector.append(currentPosition)\n if(self.isCurrenPositionSeed(currentPosition[0],currentPosition[1])):\n #Es una semilla\n self.addNewSeed(currentPosition)\n vectorA = self.getVectorA(currentPosition[0],currentPosition[1])\n nextPosition = self.getNextPosition(vectorA,currentPosition)\n print(nextPosition)\n covertura.append(nextPosition)\n currentPosition = nextPosition \n \n else:\n #no es una semilla\n if (len(self.getVectorA(currentPosition[0],currentPosition[1])) == 0):\n try:\n if not(len(self.v) < (len(self.n) - 1)):\n break\n removeElement = []\n for element in reversed(parentVector):\n if (not(element == [0,0])):\n print(element)\n removeElement.append(element)\n covertura.append(element)\n if element == self.getGetVectorS()[-1]: \n currentPosition = self.getGetVectorS()[-1]\n break\n except:\n pass\n self.removeSeedInS()\n for element in removeElement:\n parentVector.remove(element)\n else:\n vectorA = self.getVectorA(currentPosition[0],currentPosition[1])\n currentPosition = self.getNextPosition(vectorA,currentPosition)\n print(currentPosition)\n covertura.append(currentPosition)\n print(self.findStartAndGoal()[1])\n covertura.append(self.findStartAndGoal()[1])\n print(\"Ruta encontrada\")\n return covertura\n","sub_path":"DFS8.py","file_name":"DFS8.py","file_ext":"py","file_size_in_byte":10995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"408269668","text":"import time\nimport serial\nfrom serial.tools import list_ports\nimport traceback\n\n\n#Valve control module\n#Uses either PI GPIO or an Arduino (serial)\n\ngpioExists = False\n\n#Allow this module to be used on non-raspberry pi systems\ntry:\n\timport RPi.GPIO as gpio\n\tgpioExists = True\nexcept:\n\tprint(\"Couldn't import RPi.GPIO\")\n\n_serial = None\n_serialBaud = 115200\n\n# _pinMap = [8, 10, 12, 16, 18, 22, 24, 26]\t#Pump number (index) maps to pin number (value at index)\nnumValves = 12\n_states = [False] * numValves\t#Boolean array stores states of valves\nvalveKeys = \"`1234567890-\"\t#Keys to use to control valves\n\ndef init(port = \"\", useGPIO = False):\t#Initialize GPIO\n\tglobal _serial, gpioExists\n\n\tif not useGPIO:\n\t\tgpioExists = False\n\n\tif gpioExists:\t#If this is a PI and we want to use GPIO\n\t\tgpio.setwarnings(False)\n\n\t\tgpio.setmode(gpio.BOARD)\n\n\t\tfor i in _pinMap:\n\t\t\tprint(\"Setting output mode for pin \" + str(i))\n\t\t\tgpio.setup(i, gpio.OUT)\n\n\tprint(\"Opening Serial Port...\")\n\ttry:\n\t\t_serial = serial.Serial(port = port, baudrate = _serialBaud)\n\t\ttime.sleep(3)\n\t\tprint(\"Serial port opened!\")\n\texcept:\n\t\ttraceback.print_exc()\n\t\tprint(\">>>>>>>> Couldn't initialize serial port. Does it exist?\")\n\n\ndef on(index):\t#Turn on pin\n\tif gpioExists:\n\t\tgpio.output(_pinMap[index], gpio.LOW)\n\telif _serial:\n\t\tcommand = \"P\" + str(index) + \"S1\\n\"\n\t\t#print(\"Sending\", command)\n\t\t_serial.write(bytes(command, \"utf-8\"))\n\t\t#print(\"Sent\")\n\telse:\n\t\tprint(\">>>>>>>> Couldn't set valve state\")\n\t_states[index] = True\n\ndef off(index):\t#Turn off pin\n\tif gpioExists:\n\t\tgpio.output(_pinMap[index], gpio.HIGH)\n\telif _serial:\n\t\tcommand = \"P\" + str(index) + \"S0\\n\"\n\t\t#print(\"Sending\", command)\n\t\t_serial.write(bytes(command, \"utf-8\"))\n\t\t#print(\"Sent\")\n\telse:\n\t\tprint(\"[ValveManager] Couldn't set valve\")\n\t_states[index] = False\n\ndef toggle(index):\n\tif _states[index]:\n\t\toff(index)\n\telse:\n\t\ton(index)\n\n\ndef demo():\t#Just test all the pins\n\tfor i in range(numValves):\n\t\ton(i)\n\t\ttime.sleep(.2)\n\n\tfor i in range(numValves):\n\t\toff(i)\n\t\ttime.sleep(.2)\n\n\tfor a in range(3):\n\t\tallOn()\n\t\ttime.sleep(.2)\n\t\tallOff()\n\t\ttime.sleep(.2)\n\n\ndef allOn():\t#Turn everything on\n\tfor i in range(numValves):\n\t\ton(i)\n\ndef allOff():\t#Turn everything off\n\tfor i in range(numValves):\n\t\toff(i)\n\n\n\ndef getState(index):\t#Fetch the state of an output\n\treturn _states[index]\n","sub_path":"valvemanager.py","file_name":"valvemanager.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"181187167","text":"from django.forms import ModelForm, TextInput, Textarea\nfrom .models import Message\n\n\nclass MessageForm(ModelForm):\n class Meta:\n model = Message\n fields = ['name', 'email', 'message']\n widgets = {\n 'name': TextInput(attrs={'type': 'text', 'name': 'name', 'placeholder': 'Name'}),\n 'email': TextInput(attrs={'type': 'email', 'name': 'email', 'placeholder': 'Email'}),\n 'message': Textarea(attrs={'rows': '5', 'name': 'message', 'placeholder': 'Say something...'})\n }\n","sub_path":"design/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"644625403","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# template matching\ndef Template_matching(img, template):\n # get original image shape\n H, W, C = img.shape\n\n # get template image shape\n Ht, Wt, Ct = template.shape\n\n # Templete matching\n # prepare x, y index\n i, j = -1, -1\n # prepare evaluate value\n v = 255 * H * W * C\n\n for y in range(H - Ht):\n for x in range(W - Wt):\n # get SSD value\n _v = np.sum((img[y : y + Ht, x : x + Wt] - template) ** 2)\n\n # if SSD is min\n if _v < v:\n v = _v\n i, j = x, y\n\n out = img.copy()\n # draw rectangle\n cv2.rectangle(out, pt1=(i, j), pt2=(i+Wt, j+Ht), color=(0,0,255), thickness=1)\n out = out.astype(np.uint8)\n\n return out\n\n# Read image\nimg = cv2.imread(\"imori.jpg\").astype(np.float32)\n\n# Read templete image\ntemplate = cv2.imread(\"imori_part.jpg\").astype(np.float32)\n\n# Template matching\nout = Template_matching(img, template)\n\n \n# Save result\ncv2.imwrite(\"out.jpg\", out)\ncv2.imshow(\"result\", out)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"Question_51_60/answers/answer_54.py","file_name":"answer_54.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"4379090","text":"import tensorflow as tf\n\n\n# This signifies the max integer that the controller RNN could predict for the\n# augmentation scheme.\nMAX_LEVEL = 10.\n\n\n# Represents an invalid bounding box that is used for checking for padding\n# lists of bounding box coordinates for a few augmentation operations\nINVALID_BOX = [[-1.0, -1.0, -1.0, -1.0]]\n\n\nCUTOUT_MAX_PAD_FRACTION = 0.75\nCUTOUT_BOX_REPLACE_WITH_MEAN = False\nCUTOUT_CONST = 100\nTRANSLATE_CONST = 250\nCUTOUT_BOX_CONST = 50\nTRANSLATE_BOX_CONST = 120\nREPLACE_VALUE = 128\n\n\ndef blend(image1, image2, factor):\n \"\"\"Blend image1 and image2 using \"factor\".\n\n Factor can be above 0.0. A value of 0.0 means only image1 is used.\n A value of 1.0 means only image2 is used. A value between 0.0 and\n 1.0 means we linearly interpolate the pixel values between the two\n images. A value greater than 1.0 \"extrapolates\" the difference\n between the two pixel values, and we clip the results to values\n between 0 and 255.\n\n Args:\n image1: An image Tensor of type uint8.\n image2: An image Tensor of type uint8.\n factor: A floating point value above 0.0.\n Returns:\n A blended image Tensor of type uint8.\n \"\"\"\n if tf.equal(factor, 0.0):\n return tf.cast(image1, tf.uint8)\n if tf.equal(factor, 1.0):\n return tf.cast(image2, tf.uint8)\n\n image2 = tf.cast(image2, tf.float32)\n image1 = tf.cast(image1, tf.float32)\n difference = image2 - image1\n scaled = difference * factor\n\n # Do addition in float.\n tmp = image1 + scaled\n\n # Interpolate\n if tf.greater(factor, 0.) and tf.less(factor, 1.):\n return tf.cast(tmp, tf.uint8)\n\n # Extrapolation\n return tf.cast(tf.clip_by_value(tmp, 0, 255), tf.uint8)\n\n\ndef clip_box(min_y, min_x, max_y, max_x):\n \"\"\"Clip bounding box coordinates between 0 and 1.\n\n Args:\n min_y: Normalized box coordinate of type float between 0 and 1.\n min_x: Normalized box coordinate of type float between 0 and 1.\n max_y: Normalized box coordinate of type float between 0 and 1.\n max_x: Normalized box coordinate of type float between 0 and 1.\n\n Returns:\n Clipped coordinate values between 0 and 1.\n \"\"\"\n min_y = tf.clip_by_value(min_y, 0.0, 1.0)\n min_x = tf.clip_by_value(min_x, 0.0, 1.0)\n max_y = tf.clip_by_value(max_y, 0.0, 1.0)\n max_x = tf.clip_by_value(max_x, 0.0, 1.0)\n\n return min_y, min_x, max_y, max_x\n\n\ndef check_box_area(min_y, min_x, max_y, max_x, delta=0.05):\n \"\"\"Adjusts box coordinates to make sure the area is > 0.\n\n Args:\n min_y: Normalized box coordinate of type float between 0 and 1.\n min_x: Normalized box coordinate of type float between 0 and 1.\n max_y: Normalized box coordinate of type float between 0 and 1.\n max_x: Normalized box coordinate of type float between 0 and 1.\n delta: Float, this is used to create a gap of size 2 * delta between\n box min/max coordinates that are the same on the boundary.\n This prevents the box from having an area of zero.\n\n Returns:\n Tuple of new box coordinates between 0 and 1 that will now have a\n guaranteed area > 0.\n \"\"\"\n height = max_y - min_y\n width = max_x - min_x\n\n def _adjust_box_boundaries(min_coord, max_coord):\n # Make sure max is never 0 and min is never 1.\n max_coord = tf.maximum(max_coord, 0.0+delta)\n min_coord = tf.minimum(min_coord, 1.0-delta)\n\n return min_coord, max_coord\n\n min_y, max_y = tf.cond(tf.equal(height, 0.0),\n lambda: _adjust_box_boundaries(min_y, max_y),\n lambda: (min_y, max_y))\n min_x, max_x = tf.cond(tf.equal(width, 0.0),\n lambda: _adjust_box_boundaries(min_x, max_x),\n lambda: (min_x, max_x))\n\n return min_y, min_x, max_y, max_x\n\n\ndef scale_box_only_op_probability(prob):\n \"\"\"Reduce the probability of the box-only operation.\n\n Probability is reduced so that we do not distort the content of too many\n bounding boxes that are close to each other. The value of 3.0 was a chosen\n hyper parameter when designing the autoaugment algorithm that we found\n empirically to work well.\n\n Args:\n prob: Float that is the probability of applying the box-only operation.\n Returns:\n Reduced probability.\n \"\"\"\n return prob / 3.0\n\n\ndef apply_box_augmentation(image, box, augmentation_func, *args):\n \"\"\"Applies augmentation_func to the subsection of image indicated by box.\n\n Args:\n image: 3D uint8 Tensor.\n box: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)\n of type float that represents the normalized coordinates between 0 and 1.\n augmentation_func: Augmentation function that will be applied to the\n subsection of image.\n *args: Additional parameters that will be passed into augmentation_func\n when it is called.\n\n Returns:\n A modified version of image, where the box location in the image will\n have `augmentation_func applied to it.\n \"\"\"\n image_height = tf.cast(tf.shape(image)[0], tf.float32)\n image_width = tf.cast(tf.shape(image)[1], tf.float32)\n\n min_y = tf.cast(image_height * box[0], tf.int32)\n min_x = tf.cast(image_width * box[1], tf.int32)\n max_y = tf.cast(image_height * box[2], tf.int32)\n max_x = tf.cast(image_width * box[3], tf.int32)\n\n image_height = tf.cast(image_height, tf.int32)\n image_width = tf.cast(image_width, tf.int32)\n\n # Clip to be sure the mas values do not fall out of range.\n max_y = tf.minimum(max_y, image_height - 1)\n max_x = tf.minimum(max_x, image_width - 1)\n\n # Get the sub-tensor that is the image within the bounding box region.\n box_content = image[min_y:max_y + 1, min_x:max_x + 1, :]\n\n # Apply the augmentation function to the box portion of the image.\n augmented_box_content = augmentation_func(box_content, *args)\n\n # Pad the augmented_box_content and the mask to match the shape of original image\n augmented_box_content = tf.pad(augmented_box_content,\n [[min_y, (image_height - 1) - max_y],\n [min_x, (image_width - 1) - max_x],\n [0, 0]])\n # Create a mask that will be used to zero out a part of the original image.\n mask_tensor = tf.zeros_like(box_content)\n\n mask_tensor = tf.pad(mask_tensor,\n [[min_y, (image_height-1)-max_y],\n [min_x, (image_width-1)-max_x],\n [0, 0]],\n constant_values=1)\n\n # Replace the old box content with the new augmented content.\n image = image * mask_tensor + augmented_box_content\n\n return image\n\n\ndef concat_box(box, boxes):\n \"\"\"Helper function that concates box to boxes along the first dimension.\"\"\"\n\n # Note if all elements in boxes are -1 (_INVALID_BOX), then this means\n # we discard boxes and start the boxes Tensor with the current box.\n boxes_sum_check = tf.reduce_sum(boxes)\n box = tf.expand_dims(box, 0)\n # This check will be true when it is an _INVALID_BOX\n boxes = tf.cond(tf.equal(boxes_sum_check, -4.0),\n lambda: box,\n lambda: tf.concat([boxes, box], 0))\n\n return boxes\n\n\ndef apply_box_augmentation_wrapper(image, box, new_boxes, prob, augmentation_func, func_changes_box, *args):\n \"\"\"Applies _apply_box_augmentation with probability prob.\n\n Args:\n image: 3D uint8 Tensor.\n box: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)\n of type float that represents the normalized coordinates between 0 and 1.\n new_boxes: 2D Tensor that is a list of the boxes in the image after they\n have been altered by aug_func. These will only be changed when\n func_changes_box is set to true. Each box has 4 elements\n (min_y, min_x, max_y, max_x) of type float that are the normalized\n box coordinates between 0 and 1.\n prob: Float that is the probability of applying _apply_box_augmentation.\n augmentation_func: Augmentation function that will be applied to the\n subsection of image.\n func_changes_box: Boolean. Does augmentation_func return box in addition\n to image.\n *args: Additional parameters that will be passed into augmentation_func\n when it is called.\n\n Returns:\n A tuple. Fist element is a modified version of image, where the box\n location in the image will have augmentation_func applied to it if it is\n chosen to be called with probability `prob`. The second element is a\n Tensor of Tensors of length 4 that will contain the altered box after\n applying augmentation_func.\n \"\"\"\n should_apply_op = tf.cast(tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool)\n\n if func_changes_box:\n augmented_image, box = tf.cond(should_apply_op,\n lambda: augmentation_func(image, box, *args),\n lambda: (image, box))\n else:\n augmented_image = tf.cond(should_apply_op,\n lambda: apply_box_augmentation(image, box, augmentation_func, *args),\n lambda: image)\n\n new_boxes = concat_box(box, new_boxes)\n\n return augmented_image, new_boxes\n\n\ndef apply_multi_box_augmentation(image, boxes, prob, aug_func, func_changes_box, *args):\n \"\"\"Applies aug_func to the image for each box in boxes.\n\n Args:\n image: 3D uint8 Tensor.\n boxes: 2D Tensor that is a list of the boxes in the image. Each box\n has 4 elements (min_y, min_x, max_y, max_x) of type float.\n prob: Float that is the probability of applying aug_func to a specific\n bounding box within the image.\n aug_func: Augmentation function that will be applied to the\n subsections of image indicated by the box values in boxes.\n func_changes_box: Boolean. Does augmentation_func return box in addition\n to image.\n *args: Additional parameters that will be passed into augmentation_func\n when it is called.\n\n Returns:\n A modified version of image, where each box location in the image will\n have augmentation_func applied to it if it is chosen to be called with\n probability prob independently across all boxes. Also the final\n boxes are returned that will be unchanged if func_changes_box is set to\n false and if true, the new altered ones will be returned.\n \"\"\"\n # Will keep track of the new altered boxes after aug_func is repeatedly\n # applied. The -1 values are a dummy value and this first Tensor will be\n # removed upon appending the first real box.\n new_boxes = tf.constant(INVALID_BOX)\n\n # If the boxes are empty, then just give it _INVALID_BOX. The result\n # will be thrown away.\n boxes = tf.cond(tf.equal(tf.size(boxes), 0),\n lambda: tf.constant(INVALID_BOX),\n lambda: boxes)\n boxes = tf.ensure_shape(boxes, [None, 4])\n\n # pylint: disable=g-long-lambda\n # pylint: disable=line-too-long\n wrapped_aug_func = lambda _img, _box, _new_boxes: apply_box_augmentation_wrapper(\n _img, _box, _new_boxes, prob, aug_func, func_changes_box, *args)\n # pylint:enable=g-long-lambda\n # pylint:enable=line-too-long\n\n # Setup the while_loop.\n num_boxes = tf.shape(boxes)[0] # We loop until we go over all boxes.\n idx = tf.constant(0) # Counter for the while loop.\n\n # Conditional function when to end the loop once we go over all boxes\n # images_and_boxes contain (_image, _new_boxes)\n cond = lambda _idx, _image_and_boxes: tf.less(_idx, num_boxes)\n\n # Shuffle the boxes so that the augmentation order is not deterministic\n # if we are not changing the boxes with aug_func.\n if not func_changes_box:\n loop_boxes = tf.random.shuffle(boxes)\n else:\n loop_boxes = boxes\n\n # Main function of while_loop where we repeatedly apply augmentation on\n # the boxes in the image.\n # pylint: disable=g-long-lambda\n body = lambda _idx, _image_and_boxes: [_idx + 1, wrapped_aug_func(\n _image_and_boxes[0], loop_boxes[idx], _image_and_boxes[1])]\n # pylint: enable=g-long-lambda\n _, (image, new_boxes) = tf.while_loop(cond, body, [idx, (image, new_boxes)],\n shape_invariants=[idx.get_shape(),\n (image.get_shape(), tf.TensorShape([None, 4]))])\n\n # Either return the altered boxes or the original ones depending on if\n # we altered them in anyway.\n if func_changes_box:\n final_boxes = new_boxes\n else:\n final_boxes = boxes\n\n return image, final_boxes\n\n\ndef apply_multi_box_augmentation_wrapper(image, boxes, prob, aug_func, func_changes_box, *args):\n \"\"\"Checks to be sure num boxes > 0 before calling inner function.\"\"\"\n num_boxes = tf.shape(boxes)[0]\n image, boxes = tf.cond(\n tf.equal(num_boxes, 0),\n lambda: (image, boxes),\n # pylint:disable=g-long-lambda\n lambda: apply_multi_box_augmentation(\n image, boxes, prob, aug_func, func_changes_box, *args))\n # pylint:enable=g-long-lambda\n\n return image, boxes\n\n\ndef wrap(image):\n \"\"\"Returns `image` with an extra channel set to all ones.\"\"\"\n shape = tf.shape(image)\n\n extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)\n extended = tf.concat([image, extended_channel], 2)\n\n return extended\n\n\ndef unwrap(image, replace):\n \"\"\"Unwraps an image produced by wrap.\n\n Where there is a 0 in the last channel for every spatial position,\n the rest of the three channels in that spatial dimension are grayed\n (set ot 128). Operations like translate and shear on a wrapped Tensor\n will leave 0s in empty locations. Some transformations look at the\n intensity of values to do preprocessing, and we want these empty pixels\n to assume the `average` value, rather than pure black.\n\n Args:\n image: A 3D image Tensor with 4 channels.\n replace: A one or three value 1D tensor to fill empty pixels.\n Returns:\n image: A 3D image Tensor with 3 channels.\n \"\"\"\n image_shape = tf.shape(image)\n\n # Flatten the spatial dimensions.\n flattened_image = tf.reshape(image, [-1, image_shape[2]])\n\n # Find all pixels where the last channel is zero.\n alpha_channel = flattened_image[:, 3:]\n\n replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)\n\n # Where the are zero, fill them in with `replace`.\n flattened_image = tf.where(tf.equal(alpha_channel, 0),\n tf.ones_like(flattened_image, dtype=image.dtype) * replace,\n flattened_image)\n image = tf.reshape(flattened_image, image_shape)\n image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])\n\n return image\n","sub_path":"data/augmentations/auto_augmentation/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":15158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"254309423","text":"# http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19SidoInfStateJson\r\n# 보건복지부 코로나19 시·도발생 현황\r\n\r\n# import requests\r\n#\r\n# import requests, bs4\r\n# import pandas as pd\r\n# from lxml import html\r\n# from urllib.request import Request, urlopen\r\n\r\nfrom urllib.parse import urlencode, quote_plus, unquote\r\nimport pandas\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nfrom openpyxl.workbook import Workbook\r\n\r\n\r\ndef comment_print(comment):\r\n print()\r\n print('-------------- [' + comment + '] --------------')\r\n\r\n\r\n\r\n# Service URL\r\nbase_url = 'http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19SidoInfStateJson'\r\nservice_key = '?' + 'serviceKey=CoFlabV4zDnMX82ZFxmARJcUcZ2ostZA4PPRBQVG28PSJSipJWs%2FE5taPSm8tek1SuvgC00Zeochd0jisLseaQ%3D%3D&'\r\n\r\nst_date = '20200401'\r\nen_date = '20200410'\r\n\r\nqueryParams = urlencode(\r\n {\r\n quote_plus('pageNo') : '1',\r\n quote_plus('numOfRows') : '10',\r\n quote_plus('startCreateDt') : st_date,\r\n quote_plus('endCreateDt') : en_date\r\n }\r\n)\r\n\r\nurl = base_url + service_key + queryParams\r\n\r\nprint(url)\r\n\r\nreq = requests.get(url)\r\nhtml = req.text\r\nsoup = BeautifulSoup(html, 'html.parser')\r\n\r\ncomment_print('soup length') # 아래 내용 제목 출력 //\r\nprint(len(soup))\r\n# print(soup)\r\n\r\n\r\nrows = soup.findAll('item')\r\n\r\ncomment_print('len(rows)') # 아래 내용 제목 출력 //\r\nprint(len(rows))\r\n\r\ncomment_print('rows') # 아래 내용 제목 출력 //\r\n# print(rows)\r\n\r\n# _________ [sample] ____________________\r\n# 132\r\n# 1.25\r\n# 2020-04-14 10:24:23.23\r\n# 0\r\n# 0\r\n# 0.00\r\n# 0-9\r\n# 145\r\n# null\r\n# _____________________________\r\n\r\n# i = 0\r\n\r\n# areanm = soup.findAll('areanm')\r\n# comment_print('areanm') # 아래 내용 제목 출력 //\r\n# print(areanm)\r\n# 기타, 기타, 기타\r\n\r\n# ____________ 항목별 데이터 저장 _________________\r\n\r\nstdday = soup.findAll('stdday') # 기준일시\r\ngubun = soup.findAll('gubun') # 시도명(한글)\r\ngubunen = soup.findAll('gubunen') # 시도명(영문)\r\ndefcnt = soup.findAll('defcnt') # 확진자 수\r\nincdec = soup.findAll('incdec') #전일대비 증감 수\r\n# localocccnt = soup.findAll('localocccnt') # 지역발생 수\r\n# overflowcnt = soup.findAll('overflowcnt') # 해외유입 수\r\n# isolingcnt = soup.findAll('isolingcnt') # 격리중 환자 수\r\nisolclearcnt = soup.findAll('isolclearcnt') # 격리 해제 수\r\ndeathcnt = soup.findAll('deathcnt') # 사망자 수\r\n\r\n\r\n\r\n\r\n# _________ 저장용 배열 선언 ____________________\r\nstdday_list = [] # 배열 선언\r\ngubun_list = [] # 배열 선언\r\ngubunen_list = [] # 배열 선언\r\ndefcnt_list = [] # 배열 선언\r\nincdec_list = [] # 배열 선언\r\n# localocccnt_list = [] # 배열 선언\r\n# overflowcnt_list = [] # 배열 선언\r\n# isolingcnt_list = [] # 배열 선언\r\nisolclearcnt_list = [] # 배열 선언\r\ndeathcnt_list = [] # 배열 선언\r\n\r\n\r\n\r\n# ___________ 배열에 데이터 저장 __________\r\nfor code in stdday: stdday_list.append(code.text)\r\nfor code in gubun: gubun_list.append(code.text)\r\nfor code in gubunen: gubunen_list.append(code.text)\r\n\r\nfor code in defcnt: defcnt_list.append(code.text)\r\nfor code in incdec: incdec_list.append(code.text)\r\n# for code in localocccnt: localocccnt_list.append(code.text)\r\n#\r\n# for code in overflowcnt: overflowcnt_list.append(code.text)\r\n# for code in isolingcnt: isolingcnt_list.append(code.text)\r\nfor code in isolclearcnt: isolclearcnt_list.append(code.text)\r\nfor code in deathcnt: deathcnt_list.append(code.text)\r\n\r\n\r\n\r\n# ___ 배열별 건수 인쇄 ____________\r\nprint('stdday_list: ', len(stdday_list))\r\nprint('gubun_list: ', len(gubun_list))\r\nprint('gubunen_list: ', len(gubunen_list))\r\n\r\nprint('defcnt_list: ', len(defcnt_list))\r\nprint('incdec_list: ', len(incdec_list))\r\n# print('localocccnt_list: ', len(localocccnt_list))\r\n#\r\n# print('overflowcnt_list: ', len(overflowcnt_list))\r\n# print('isolingcnt_list: ', len(isolingcnt_list))\r\nprint('isolclearcnt_list: ', len(isolclearcnt_list))\r\nprint('deathcnt_list: ', len(deathcnt_list))\r\n\r\n# _______ data frame 항목에 데이터 배정 __________________________\r\n\r\ncommerce_infor = {}\r\ncommerce_infor['stdday'] = stdday_list\r\ncommerce_infor['gubun'] = gubun_list\r\ncommerce_infor['gubunen'] = gubunen_list\r\n\r\ncommerce_infor['defcnt'] = defcnt_list\r\ncommerce_infor['incdec'] = incdec_list\r\n# commerce_infor['localocccnt'] = localocccnt_list\r\n#\r\n# commerce_infor['overflowcnt'] = overflowcnt_list\r\n# commerce_infor['isolingcnt'] = isolingcnt_list\r\ncommerce_infor['isolclearcnt'] = isolclearcnt_list\r\ncommerce_infor['deathcnt'] = deathcnt_list\r\n\r\n\r\n# _______ data frame 정의 __________________________\r\n\r\ndf = pandas.DataFrame(commerce_infor)\r\ncomment_print('df')\r\nprint(df.head(10))\r\n\r\n\r\n# _______ 파일로 저장 __________________________\r\n\r\ncomment_print('write to csv:covid19 by province and city')\r\ndf.to_csv('covid19 by province and city' + st_date + '_' + en_date + '.csv')\r\n\r\ncomment_print('write to excel:covid19 by province and city')\r\ndf.to_excel('covid19 by province and city' + st_date + '_' + en_date + '.xlsx')\r\n\r\n\r\n# __________ 주의 사항 ____________________\r\n# 확진자, 사망자는 누적 수치 임\r\n# 영문명에 결측이 있음\r\n# 기준일시가 03월, 3월 같이 0이 없는 경우도 있음.\r\n# ________________________________________\r\n\r\n# 항목명(국문) 항목명(영문) 항목크기 항목구분 샘플데이터 항목설명\r\n# 결과코드 resultCode 2 필수 00 결과코드\r\n# 결과메시지 resultMsg 50 필수 OK 결과메시지\r\n# 한 페이지 결과 수 numOfRows 4 필수 10 한 페이지 결과 수\r\n# 페이지 번호 pageNo 4 필수 1 페이지번호\r\n# 전체 결과 수 totalCount 4 필수 3 전체 결과 수\r\n# 게시글번호(국외발생현황 고유값) SEQ 30 필수 96 게시글번호(국외발생현황 고유값)\r\n# 기준일시 STD_DAY 30 필수 2020년 3월 09일 00시 기준일시\r\n# 지역명 AREA_NM 30 필수 아프리카 지역명\r\n# 지역명(영문) AREA_NM_EN 30 필수 null 지역명(영문)\r\n# 지멱명(중문) AREA_NM_CN 30 필수 null 지멱명(중문)\r\n# 국가명 NATION_NM 30 필수 토고 국가명\r\n# 국가명(영문) NATION_NM_EN 30 필수 null 국가명(영문)\r\n# 국가명(중문) NATION_NM_CN 30 필수 null 국가명(중문)\r\n# 국가별 확진자 수 NAT_DEF_CNT 15 필수 0 국가별 확진자 수\r\n# 국가별 사망자 수 NAT_DEATH_CNT 15 필수 0 국가별 사망자 수\r\n# 확진률 대비 사망률 NAT_DEATH_RATE 30 필수 0 확진률 대비 사망률\r\n# 등록일시분초 CREATE_DT 30 필수 2020-03-16 20:51:43.000 등록일시분초\r\n# 수정일시분초 UPDATE_DT 30 필수 null 수정일시분초","sub_path":"호텔산업 분석 프로젝트/API 파이썬 코드/공공데이터활용지원센터_보건복지부 코로나19 시·도발생 현황.py","file_name":"공공데이터활용지원센터_보건복지부 코로나19 시·도발생 현황.py","file_ext":"py","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"621590666","text":"#Raul Garcia\n\nimport turtle\nsides = int(input(\"Please enter number of sides for a regular polygon: \"))\nlength = int(input(\"Please enter the length of each side: \"))\nperim_color = input(\"Please enter the stroke color of the perimeter: \")\nfill_color = input(\"Please enter the fill color of the polygon: \")\nwn = turtle.Screen()\nalex = turtle.Turtle()\nalex.color(perim_color)\nalex.pensize(4)\nwn.bgcolor(fill_color)\n\n\nfor i in range(sides):\n alex.forward(length)\n alex.left(360 / sides)\n\nwn.exitonclick()\n\n","sub_path":"Wk2_28_May-3_June/HW3/raul_garcia/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"182006407","text":"from django.db.models import Model, CharField, TextField, SmallIntegerField, DateTimeField\n\n\nclass User(Model):\n GROUP_CHOICES = (\n (1, '第一小隊'),\n (2, '第二小隊'),\n (3, '第三小隊'),\n (4, '第四小隊'),\n (5, '第五小隊'),\n (6, '第六小隊'),\n (7, '第七小隊'),\n (8, '第八小隊'),\n )\n\n TRAFFIC_CHOICES = (\n (1, '客運'),\n (2, '高鐵'),\n (3, '台鐵'),\n (4, '其他'),\n )\n\n name = CharField(max_length=20)\n group = SmallIntegerField(choices=GROUP_CHOICES)\n traffic_way = SmallIntegerField(choices=TRAFFIC_CHOICES)\n description = TextField(max_length=150, default='')\n sign_time = DateTimeField(null=True)\n\n def __str__(self):\n return self.name\n","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"41686021","text":"\"\"\"\n(big image -> resize->hog_features->then sliding windows)\n\"\"\"\nimport os\nimport time\n\nfrom scipy.ndimage.measurements import label\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\n\nimport config\nfrom lessons import *\n\n\nclass DetectVehicles(object):\n def __init__(self, color_space='YCrCb', spatial_size=(32, 32),\n hist_bins=32, orient=9, single_image=True,\n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=False, hog_feat=True,\n fit_model=None, X_scaler=None, scale_img=False):\n self.color_space = color_space\n self.spatial_size = spatial_size\n self.hist_bins = hist_bins\n self.orient = orient\n self.single_image = single_image\n self.pix_per_cell = pix_per_cell\n self.cell_per_block = cell_per_block\n self.hog_channel = hog_channel\n self.spatial_feat = spatial_feat\n self.hist_feat = hist_feat\n self.hog_feat = hog_feat\n self.fit_model = fit_model\n self.X_scaler = X_scaler\n self.frame_num = 0\n self.scale_img = scale_img\n\n def draw_on_image(self, img):\n # cv2.imwrite('video_input_images/final_{0}.png'.format(self.frame_num), img)\n # Uncomment the following line if you extracted training\n # data from .png images (scaled 0 to 1 by mpimg) and the\n # image you are searching is a .jpg (scaled 0 to 255)\n draw_image = np.copy(img)\n if self.scale_img:\n draw_image = draw_image.astype(np.float32) / 255\n # img = img.astype(np.float32) / 255\n windows = self.slide_window(draw_image, x_start_stop=[690, None], y_start_stop=[375, 430],\n xy_window=(110, 90), xy_overlap=config.xy_overlap)\n windows += self.slide_window(draw_image, x_start_stop=[760, None], y_start_stop=[375, 560],\n xy_window=(110, 90), xy_overlap=config.xy_overlap)\n hot_windows = self.search_windows(draw_image, windows, self.fit_model, self.X_scaler)\n\n draw_img = self.draw_boxes(img, hot_windows, color=(0, 0, 200), thick=6)\n heat = np.zeros_like(img[:, :, 0]).astype(np.float)\n heat = self.add_heat(heat, hot_windows)\n heat2 = self.apply_threshold(heat, 7, img)\n cv2.imwrite('video_output_images/final_{0}.png'.format(self.frame_num), draw_img)\n self.frame_num += 1\n return heat2\n\n def compute_features(self, feature_image):\n file_features = []\n if self.spatial_feat:\n spatial_features = bin_spatial(feature_image, size=self.spatial_size)\n file_features.append(spatial_features)\n if self.hist_feat:\n # Apply color_hist()\n hist_features = color_hist(feature_image, nbins=self.hist_bins)\n file_features.append(hist_features)\n if self.hog_feat:\n # Call get_hog_features() with vis=False, feature_vec=True\n if self.hog_channel == 'ALL':\n hog_features = []\n self.hog_images = []\n for channel in range(feature_image.shape[2]):\n hog_feature = get_hog_features(feature_image[:, :, channel],\n self.orient, self.pix_per_cell, self.cell_per_block,\n vis=False, feature_vec=True)\n hog_features.append(hog_feature)\n # self.hog_images.append(hog_image)\n hog_features = np.ravel(hog_features)\n else:\n hog_features = get_hog_features(feature_image[:, :, self.hog_channel], self.orient,\n self.pix_per_cell, self.cell_per_block, vis=False, feature_vec=True)\n # Append the new feature vector to the features list\n file_features.append(hog_features)\n return file_features\n\n def apply_threshold(self, heatmap, threshold, img):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n cv2.imwrite('heatmap.png', heatmap)\n # Return thresholded map\n labels = label(heatmap)\n print(labels[1])\n for car_number in range(1, labels[1] + 1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)\n # cv2.imwrite('heat{0}.png'.format(pd.datetime.now().microsecond), img)\n return img\n\n def add_heat(self, heatmap, bbox_list):\n # Iterate through list of bboxes\n for box in bbox_list:\n # Add += 1 for all pixels inside each bbox\n # Assuming each \"box\" takes the form ((x1, y1), (x2, y2))\n heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1\n\n # Return updated heatmap\n return heatmap\n\n # Define a function to extract features from a list of images\n # Have this function call bin_spatial() and color_hist()\n def extract_features(self, img, single_image=None):\n # Allows override\n if single_image:\n self.single_image = single_image\n # List of images\n if not self.single_image:\n # Create a list to append feature vectors to\n features = []\n # Iterate through the list of images\n for file in img:\n # Read in each one by one\n image = cv2.imread(file)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n feature_image = self.cvt_color(image)\n file_features = self.compute_features(feature_image)\n features.append(np.concatenate(file_features))\n # Return list of feature vectors\n return features\n else:\n # 2) Apply color conversion if other than 'RGB'\n feature_image = self.cvt_color(img)\n img_features = self.compute_features(feature_image)\n # 9) Return concatenated array of features\n return np.concatenate(img_features)\n\n # Define a function you will pass an image\n # and the list of windows to be searched (output of slide_windows())\n def search_windows(self, img, windows, clf, scaler):\n # 1) Create an empty list to receive positive detection windows\n on_windows = []\n # 2) Iterate over all windows in the list\n for window in windows:\n # 3) Extract the test window from original image\n test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))\n # 4) Extract features for that window using single_img_features()\n # HACKY\n saved_single = self.single_image\n features = self.extract_features(test_img, True)\n self.single_image = saved_single\n # 5) Scale extracted features to be fed to classifier\n test_features = scaler.transform(np.array(features).reshape(1, -1))\n # 6) Predict using your classifier\n prediction = clf.predict(test_features)\n # 7) If positive (prediction == 1) then save the window\n if prediction == 1:\n on_windows.append(window)\n # 8) Return windows for positive detections\n return on_windows\n\n def cvt_color(self, image):\n # apply color conversion if other than 'RGB'\n color_space = self.color_space\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB)\n else:\n feature_image = np.copy(image)\n return feature_image\n\n # Define a function that takes an image,\n # start and stop positions in both x and y,\n # window size (x and y dimensions),\n # and overlap fraction (for both x and y)\n def slide_window(self, img, x_start_stop=[None, None], y_start_stop=[None, None],\n xy_window=(64, 64), xy_overlap=(0.5, 0.5)):\n # If x and/or y start/stop positions not defined, set to image size\n if x_start_stop[0] is None:\n x_start_stop[0] = 0\n if x_start_stop[1] is None:\n x_start_stop[1] = img.shape[1]\n if y_start_stop[0] is None:\n y_start_stop[0] = 0\n if y_start_stop[1] is None:\n y_start_stop[1] = img.shape[0]\n # Compute the span of the region to be searched\n xspan = x_start_stop[1] - x_start_stop[0]\n yspan = y_start_stop[1] - y_start_stop[0]\n # Compute the number of pixels per step in x/y\n nx_pix_per_step = np.int(xy_window[0] * (1 - xy_overlap[0]))\n ny_pix_per_step = np.int(xy_window[1] * (1 - xy_overlap[1]))\n # Compute the number of windows in x/y\n nx_windows = np.int(xspan / nx_pix_per_step) - 1\n ny_windows = np.int(yspan / ny_pix_per_step) - 1\n # Initialize a list to append window positions to\n window_list = []\n # Loop through finding x and y window positions\n # Note: you could vectorize this step, but in practice\n # you'll be considering windows one by one with your\n # classifier, so looping makes sense\n for ys in range(ny_windows):\n for xs in range(nx_windows):\n # Calculate window position\n startx = xs * nx_pix_per_step + x_start_stop[0]\n endx = startx + xy_window[0]\n starty = ys * ny_pix_per_step + y_start_stop[0]\n endy = starty + xy_window[1]\n\n # Append window position to list\n window_list.append(((startx, starty), (endx, endy)))\n # Return the list of windows\n return window_list\n\n # Define a function to draw bounding boxes\n def draw_boxes(self, img, bboxes, color=(0, 0, 255), thick=6):\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # IN HERE, WANT TO COMBINE BOXES\n # Draw a rectangle given bbox coordinates\n # x, y = zip(*bbox)\n # center = (max(x) + min(x)) / 2., (max(y) + min(y)) / 2.\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return imcopy\n\n\ndef get_training_examples():\n cars = []\n notcars = []\n for root, dirs, files in os.walk('.'):\n for name in files:\n if name.endswith((\".png\", \".jpg\")):\n if 'non-vehicles' in root:\n notcars.append(os.path.join(root, name))\n elif 'vehicles' in root:\n cars.append(os.path.join(root, name))\n return cars, notcars\n\n\ndef get_fit_model():\n cars, notcars = get_training_examples()\n\n ### TODO: Tweak these parameters and see how the results change.\n detect = DetectVehicles(config.color_space, config.spatial_size, config.hist_bins, config.orient, False,\n config.pix_per_cell, config.cell_per_block, config.hog_channel,\n config.spatial_feat, config.hist_feat, config.hog_feat)\n car_features = detect.extract_features(cars)\n notcar_features = detect.extract_features(notcars)\n X = np.vstack((car_features, notcar_features)).astype(np.float64)\n # Fit a per-column scaler\n X_scaler = StandardScaler().fit(X)\n # Apply the scaler to X\n scaled_X = X_scaler.transform(X)\n\n # Define the labels vector\n y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\n\n # Split up data into randomized training and test sets\n rand_state = np.random.randint(0, 100)\n X_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.2, random_state=rand_state)\n\n print('Using:', config.orient, 'orientations', config.pix_per_cell,\n 'pixels per cell and', config.cell_per_block, 'cells per block')\n print('Feature vector length:', len(X_train[0]))\n # Use a linear SVC\n svc = LinearSVC()\n # Check the training time for the SVC\n t = time.time()\n svc.fit(X_train, y_train)\n t2 = time.time()\n print(round(t2 - t, 2), 'Seconds to train SVC...')\n # Check the score of the SVC\n print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n # Check the prediction time for a single sample\n return svc, X_scaler\n\n\ndef vid_pipe(path='project_video.mp4', svc=None, X_scaler=None):\n from moviepy.editor import VideoFileClip\n clip = VideoFileClip(path)\n if svc is None:\n svc, X_scaler = get_fit_model()\n det = DetectVehicles(config.color_space, config.spatial_size, config.hist_bins, config.orient, False,\n config.pix_per_cell, config.cell_per_block, config.hog_channel,\n config.spatial_feat, config.hist_feat, config.hog_feat, fit_model=svc, X_scaler=X_scaler,\n scale_img=False)\n output = clip.fl_image(det.draw_on_image)\n output.write_videofile('project_video_annotated.mp4', audio=False)\n return svc, X_scaler, det\n\n\ndef test_pipe(svc, X_scaler):\n import imageio\n det = DetectVehicles(config.color_space, config.spatial_size, config.hist_bins, config.orient, False,\n config.pix_per_cell, config.cell_per_block, config.hog_channel,\n config.spatial_feat, config.hist_feat, config.hog_feat, fit_model=svc, X_scaler=X_scaler)\n import glob\n for img in glob.glob('test_images/*'):\n print(img)\n image = imageio.imread(img)\n new = det.draw_on_image(image)\n # cv2.imwrite('final_{0}.png'.format(img.split('/')[-1].split('.')[0]), new)\n break\n return det\n\n\ndef centroid_function(detections, img):\n centroid_rectangles = []\n\n heat_map = np.zeros_like(img)[:, :, 0]\n\n for (x1, y1, x2, y2) in detections:\n heat_map[y1:y2, x1:x2] += 10\n\n heat_map.astype(\"uint8\")\n\n _, binary = cv2.threshold(heat_map, 11, 255, cv2.THRESH_BINARY);\n\n _, contours, _ = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n for contour in contours:\n rectangle = cv2.boundingRect(contour)\n if rectangle[2] < 50 or rectangle[3] < 50: continue\n x, y, w, h = rectangle\n centroid_rectangles.append([x, y, x + w, y + h])\n\n return centroid_rectangles\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"487757728","text":"import torch\n\n\nclass Interpolation(object):\n\n def _cubic_interpolation_kernel(self, scaled_grid_dist):\n \"\"\"\n Computes the interpolation kernel u() for points X given the scaled\n grid distances:\n (X-x_{t})/s\n where s is the distance between neighboring grid points. Note that,\n in this context, the word \"kernel\" is not used to mean a covariance\n function as in the rest of the package. For more details, see the\n original paper Keys et al., 1989, equation (4).\n\n scaled_grid_dist should be an n-by-g matrix of distances, where the\n (ij)th element is the distance between the ith data point in X and the\n jth element in the grid.\n\n Note that, although this method ultimately expects a scaled distance matrix,\n it is only intended to be used on single dimensional data.\n \"\"\"\n U = scaled_grid_dist.abs()\n\n first_case = U[U <= 1]\n # u(s) = 1.5|s|^3 - 2.5|s|^2 + 1 when 0 < |s| < 1\n U[U <= 1] = ((1.5 * first_case - 2.5).mul(first_case)).mul(first_case) + 1\n\n # u(s) = -0.5|s|^3 + 2.5|s|^2 - 4|s| + 2 when 1 < |s| < 2\n second_case = U[(1 < U) & (U <= 2)]\n U[(1 < U) & (U <= 2)] = ((-0.5 * second_case + 2.5).mul(second_case) - 4).mul(second_case) + 2\n return U\n\n def interpolate(self, x_grid, x_target, interp_points=range(-2, 2)):\n interp_points_flip = x_grid.new(interp_points[::-1])\n interp_points = x_grid.new(interp_points)\n\n num_grid_points = x_grid.size(1)\n num_target_points = x_target.size(0)\n num_dim = x_target.size(-1)\n num_coefficients = len(interp_points)\n\n interp_values = x_target.new(num_target_points, num_coefficients ** num_dim).fill_(1)\n interp_indices = x_grid.new(num_target_points, num_coefficients ** num_dim).long().zero_()\n\n for i in range(num_dim):\n grid_delta = x_grid[i, 1] - x_grid[i, 0]\n lower_grid_pt_idxs = torch.floor((x_target[:, i] - x_grid[i, 0]) / grid_delta).squeeze()\n lower_pt_rel_dists = (x_target[:, i] - x_grid[i, 0]) / grid_delta - lower_grid_pt_idxs\n lower_grid_pt_idxs = lower_grid_pt_idxs - interp_points.max()\n\n scaled_dist = lower_pt_rel_dists.unsqueeze(-1) + interp_points_flip.unsqueeze(-2)\n dim_interp_values = self._cubic_interpolation_kernel(scaled_dist)\n\n # Find points who's closest lower grid point is the first grid point\n # This corresponds to a boundary condition that we must fix manually.\n left_boundary_pts = torch.nonzero(lower_grid_pt_idxs < 1)\n num_left = len(left_boundary_pts)\n\n if num_left > 0:\n left_boundary_pts.squeeze_(1)\n x_grid_first = x_grid[i, :num_coefficients].unsqueeze(1).t().expand(num_left, num_coefficients)\n\n grid_targets = x_target.select(1, i)[left_boundary_pts].unsqueeze(1).expand(num_left, num_coefficients)\n dists = torch.abs(x_grid_first - grid_targets)\n closest_from_first = torch.min(dists, 1)[1]\n\n for j in range(num_left):\n dim_interp_values[left_boundary_pts[j], :] = 0\n dim_interp_values[left_boundary_pts[j], closest_from_first[j]] = 1\n lower_grid_pt_idxs[left_boundary_pts[j]] = 0\n\n right_boundary_pts = torch.nonzero(lower_grid_pt_idxs > num_grid_points - num_coefficients)\n num_right = len(right_boundary_pts)\n\n if num_right > 0:\n right_boundary_pts.squeeze_(1)\n x_grid_last = x_grid[i, -num_coefficients:].unsqueeze(1).t().expand(num_right, num_coefficients)\n\n grid_targets = x_target.select(1, i)[right_boundary_pts].unsqueeze(1)\n grid_targets = grid_targets.expand(num_right, num_coefficients)\n dists = torch.abs(x_grid_last - grid_targets)\n closest_from_last = torch.min(dists, 1)[1]\n\n for j in range(num_right):\n dim_interp_values[right_boundary_pts[j], :] = 0\n dim_interp_values[right_boundary_pts[j], closest_from_last[j]] = 1\n lower_grid_pt_idxs[right_boundary_pts[j]] = num_grid_points - num_coefficients\n\n offset = (interp_points - interp_points.min()).long().unsqueeze(-2)\n dim_interp_indices = lower_grid_pt_idxs.long().unsqueeze(-1) + offset\n\n n_inner_repeat = num_coefficients ** i\n n_outer_repeat = num_coefficients ** (num_dim - i - 1)\n index_coeff = num_grid_points ** (num_dim - i - 1)\n dim_interp_indices = dim_interp_indices.unsqueeze(-1).repeat(1, n_inner_repeat, n_outer_repeat)\n dim_interp_values = dim_interp_values.unsqueeze(-1).repeat(1, n_inner_repeat, n_outer_repeat)\n interp_indices.add_(dim_interp_indices.view(num_target_points, -1).mul_(index_coeff))\n interp_values.mul_(dim_interp_values.view(num_target_points, -1))\n\n return interp_indices, interp_values\n","sub_path":"gpytorch/utils/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"82243","text":"import os\nimport sys\nimport multiprocessing as mp\n_PATH_ = os.path.dirname(os.path.dirname(__file__))\n\nif _PATH_ not in sys.path:\n sys.path.append(_PATH_)\n\n\nfrom src.utils.logger import setup_logger\nfrom src.config import Config, PlayWithHumanConfig\nfrom src.human import play\n\n\ndef setup_parameters(config):\n num_cores = mp.cpu_count()\n search_threads = 10 if num_cores < 10 else 20\n print(f\"search_threads = {search_threads}\")\n config.play.search_threads = search_threads\n\nif __name__ == \"__main__\":\n # mp.freeze_support()\n # sys.setrecursionlimit(10000)\n # config_type = 'distribute'\n\n config = Config()\n print(config)\n config.resource.create_directories()\n setup_logger(config.resource.play_log_path)\n config.opts.new = False\n config.opts.light = False\n pwhc = PlayWithHumanConfig()\n pwhc.update_play_config(config.play)\n config.opts.bg_style = 'WOOD'\n setup_parameters(config)\n config.play.simulation_num_per_move = 10\n play.start(config, True)\n input('按任意键退出...')\n","sub_path":"src/play_games.py","file_name":"play_games.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"115459792","text":"#!.\\chrome77\\tools\\python\n\nfrom pykeyboard import *\nfrom pymouse import *\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport xlrd\n\nimport time\nimport random\nimport os\nimport sys\nimport threading\nimport enum\nimport tempfile\n\nstrops = '''\n#begin\nlabel: do_open\njust: clear\njust: refresh\nopen: https://ah.2-class.com/\n\n#step0\nlabel: do_first\nclick: selector=a[href=\"/\"]; goto=do_open; timeout=3; attribute=else\n\n#step1, enter\nlabel: do_login\nclick: selector=button.ant-btn.ant-btn-primary; text=学生登录; timeout=10\n\n#step2, login\ninput: selector=input#account; default=testuser; timeout=5\nwait: 0.3\ninput: selector=input#password; default=123456\nwait: 0.3\nclick: selector=input#login.login-btn\nwait: 1.5\n\n# step3, prepare\nclick: selector=a[href=\"/competition\"]; timeout=5\nwait: 1\nclick: selector=p.competitionScore; goto=do_exit; timeout=1; attribute=optional\nclick: selector=button.ant-btn.exam-box-start.ant-btn-primary; timeout=5\n\n# step4, doing\nlabel: do_answer\n#wait: 5\nclick: selectors=input.ant-radio-input; result=random; timeout=5\nclick: selector=button.ant-btn.ant-btn-primary.ant-btn-lg; goto=do_answer; timeout=0.1; attribute=optional\nclick: selector=button.ant-btn.ant-btn-primary; text=下一题; goto=do_answer; attribute=optional\nclick: selector=button.ant-btn.ant-btn-primary; text=提 交\nwait: 0.5\nclick: selector=p.competitionScore; timeout=3; attribute=optional\n\n\n# step5, quit\nlabel: do_exit\nwait: 0.5\njust: clear\njust: refresh\njust: exit\n#click: goto=do_login\n'''\n\nCUR_PATH = os.path.abspath(os.curdir)\nTMP_PATH = tempfile.gettempdir()\n\nLOG_FILE = os.path.join(TMP_PATH, \"zen_log.txt\")\nprint(\"PATH=\", CUR_PATH, TMP_PATH)\n\nclass CAutoKM:\n def __init__(self):\n self.m = None # mouse\n self.k = None # keyboard\n self.driver = None # browser\n pass\n\n def init(self):\n self.init_device()\n self.init_browser()\n\n def close(self):\n self.driver.quit()\n\n def init_device(self):\n self.m = PyMouse()\n self.k = PyKeyboard()\n\n def init_browser(self, timeout=2.0):\n exec_path = \"chromedriver\"\n options = webdriver.ChromeOptions()\n if os.name != \"posix\":\n exec_path = \".\\\\chrome77\\\\tools\\\\chromedriver.exe\"\n options.binary_location = \".\\\\chrome77\\\\chrome.exe\"\n options.add_argument('lang=zh_CN.UTF-8')\n options.add_argument(\"disable-extensions\")\n options.add_argument(\"disable-infobars\")\n options.add_experimental_option('useAutomationExtension', False)\n options.add_experimental_option('excludeSwitches', ['enable-automation'])\n prefs = {\n 'credentials_enable_service': False,\n 'profile.password_manager_enabled': False,\n 'profile.default_content_setting_values' : {\n 'notifications' : 2\n }\n }\n options.add_experimental_option('prefs', prefs)\n self.driver = webdriver.Chrome(executable_path = exec_path, options = options)\n self.driver.maximize_window()\n #self.driver.implicitly_wait(10)\n time.sleep(timeout)\n\n def open(self, url, timeout=1.0):\n #refresh/forward/back/close/quit\n self.driver.get(url)\n time.sleep(timeout)\n def get_screen_size(self):\n width, height = self.m.screen_size()\n #print (\"screen size:\", width, height)\n return width, height\n def get_position(self):\n x, y = self.m.position()\n #print (\"mouse position\", x, y)\n return x, y\n\n def move_raw(self, x1, y1, x2, y2, sec, times):\n wait = sec / times\n d1 = (x2 - x1) / times\n d2 = (y2 - y1) / times\n for i in range(times):\n x = int(x1 + d1 * i)\n y = int(y1 + d2 * i)\n self.m.move(x, y)\n time.sleep(wait)\n self.m.move(int(x2), int(y2))\n\n def move(self, x1, y1, x2, y2):\n #self.move_raw(x1, y1, x2, y2, 0.5, 5)\n t1 = 0.1\n t2 = 0.2\n xm = x1 + (x2-x1) * 0.75\n ym = y1 + (y2-y1) * 0.75\n if abs(xm) >= 150 or abs(ym) >= 150:\n t1 = 0.2\n t2 = 0.3\n self.move_raw(x1, y1, xm, ym, t1, 2)\n self.move_raw(xm, ym, x2, y2, t2, 5)\n\n def input(self, text):\n times = len(text)\n dur = 1.0 * (times/3+1)\n wait = dur / times\n for ch in text:\n self.k.type_string(ch)\n time.sleep(wait)\n\n def input_text(self, elem, value):\n for ch in value:\n elem.send_keys(ch)\n time.sleep(0.07)\n\n def print_pos(self):\n while True:\n self.get_position()\n time.sleep(1)\n\n def get_screen_xy(self):\n # getBoundingClientRect\n jsx = \"return window.screenX;\"\n jsy = \"return (window.screenY+(window.screen.availHeight-window.innerHeight));\"\n dx = self.driver.execute_script(jsx)\n dy = self.driver.execute_script(jsy)\n #print (\"screen_xy:\", dx, dy)\n return dx, dy\n\n def clear_all(self):\n self.driver.delete_all_cookies()\n def refresh(self):\n self.driver.refresh()\n def check_css(self, css, timeout):\n sec = 0.5\n try:\n sec = float(timeout)\n if sec <= 0.09: sec = 0.5\n except:\n sec = 0.5\n try:\n WebDriverWait(self.driver, sec).until(lambda x: x.find_elements_by_css_selector(css))\n except:\n return False\n return True\n\n def move_elem(self, elem):\n if not elem: return\n x, y = self.get_position()\n sx, sy = self.get_screen_xy()\n #location = elem.location\n location = elem.location_once_scrolled_into_view\n width = elem.size[\"width\"]\n height = elem.size[\"height\"]\n #print(\"elem:\", location, height)\n dx = location['x']+sx+(width/3)\n dy = location['y']+sy+(height/2)\n #print (\"dest:\", dx, dy)\n self.move(x, y, dx, dy)\n def move_elem_name(self, name):\n elem = self.driver.find_element_by_name(name)\n self.move_elem(elem)\n def move_elem_id(self, eid):\n elem = self.driver.find_element_by_id(eid)\n self.move_elem(elem)\n def move_elem_css(self, css, text, timeout, random, value):\n if not self.check_css(css, timeout):\n return False\n elems = self.driver.find_elements_by_css_selector(css)\n #print(\"test\", css, elems)\n target = None\n if random and len(elems) > 0:\n idx = -1\n qelem = None\n if value == 1:\n #TODO: search\n qcss = \"p.exam-content-question\"\n qelem = self.driver.find_element_by_css_selector(qcss)\n if qelem != None:\n idx = do_get_answer(qelem.text)\n pass\n if idx < 0 or idx >= len(elems):\n idx = gen_random(len(elems))\n try:\n fp = open(LOG_FILE, \"a+\")\n if qelem != None:\n fp.write(\"%s___fail_random=%d\\n\" % (qelem.text, idx))\n else:\n fp.write(\"no___fail_random=%d\\n\" % idx)\n fp.close()\n except:\n pass\n else:\n pass\n target = elems[idx]\n else:\n for elem in elems:\n if text is None or elem.text.find(text) != -1:\n target = elem\n break\n if target != None:\n #print(\"move-click\", css, target)\n self.move_elem(target)\n time.sleep(0.3)\n try:\n target.click()\n except:\n pass\n return True\n return False\n def input_elem_css(self, css, text, timeout, value):\n if not self.check_css(css, timeout):\n return False\n elems = self.driver.find_elements_by_css_selector(css)\n #print(\"test\", css, elems)\n for elem in elems:\n if text is None or elem.text.find(text) != -1:\n #print(\"move-input\", css, elem)\n self.move_elem(elem)\n #elem.send_keys(value)\n time.sleep(0.1)\n self.input_text(elem, value)\n return True\n return False\n\n\ndef do_open_xls(name):\n data = None\n try:\n data = xlrd.open_workbook(name)\n except:\n data = None\n if data != None:\n table = data.sheets()[0] \n print (table.nrows)\n #rv = table.row_values(0)\n #print (rv);\n #rv = table.row_values(1)\n #print (rv);\n #rv = table.row_values(2)\n #print (rv);\n return table\n return None\n\n\ng_quit = False\n# key(text):value(int)\ng_answer_raw = {}\ng_answer_opt = {}\n\nRAW_ANSWERS_FILE = os.path.join(CUR_PATH, \"res\", \"zen_raw_answers.xls\")\nOPT_ANSWERS_FILE = os.path.join(TMP_PATH, \"zen_opt_answers.txt\")\ndef inline_strip(val):\n ret = val\n try:\n ret = val.strip()\n except:\n ret = val\n return ret\ndef inline_value(char):\n val = -1\n if char == \"A\": val = 0\n elif char == \"B\": val = 1\n elif char == \"C\": val = 2\n elif char == \"D\": val = 3\n elif char == \"E\": val = 4\n return val\ndef do_load_answer():\n try: \n data = do_open_xls(RAW_ANSWERS_FILE)\n #print(data)\n table = data\n #print (\"load_answer, nrows=\", table.nrows)\n for i in range(table.nrows):\n item = table.row_values(i)\n if len(item) < 2: \n print(\">load_answer, invalid\", i, item)\n continue\n key = inline_strip(item[0])\n char = inline_strip(item[1])\n val = inline_value(char)\n if val == -1:\n print(i, key, char, val)\n #pre = g_answer_raw.get(key)\n pre = None\n #print (key, val)\n if not pre:\n g_answer_raw[\"%s\" % key] = int(val)\n else:\n print(i, key, val, g_answer_raw.get(key))\n except Exception as e:\n print(\">load_answer, raw error=\", e)\n pass\n print (\">load_answer, raw size=\", len(g_answer_raw), RAW_ANSWERS_FILE)\n\n try:\n fp = open(OPT_ANSWERS_FILE, \"r\")\n for line in fp.readlines():\n item = line.split(\",,,\", 1)\n if len(item) >= 2:\n key = inline_strip(item[0])\n val = inline_strip(item[1])\n g_answer_opt[key] = int(val)\n fp.close()\n except:\n print(\">load_answer, opt error\")\n pass\n print(\">load_answer, opt size=\", len(g_answer_opt), OPT_ANSWERS_FILE)\n pass\ndef do_get_answer(text):\n if not text or len(text) < 7:\n return -1\n val = -1\n size = len(text)\n p1 = 1\n p2 = min(20, size-2)\n keyword = text[p1:p2] # max 20\n #tmp = g_answer_opt.get(keyword)\n tmp = None\n if not tmp:\n for key in g_answer_raw:\n if text.find(key) != -1:\n val = g_answer_raw[key]\n g_answer_opt[keyword] = val\n fp = open(OPT_ANSWERS_FILE, \"a+\")\n if fp != None:\n fp.write(\"%s,,,%d\\n\" % (keyword, val))\n fp.close()\n break\n pass\n pass\n else:\n val = -1\n return val\n\nRESULT_FILE = os.path.join(TMP_PATH, \"zen_done_result.txt\")\ng_done_results = {}\ndef do_load_results():\n try:\n fp = open(RESULT_FILE, \"r\")\n for line in fp.readlines():\n g_done_results[inline_strip(line)] = 1\n fp.close()\n except:\n print(\">load_results, error\")\n pass\n print(\">load_results, size=\", len(g_done_results), RESULT_FILE)\ndef do_update_results(acts):\n try:\n fp = open(RESULT_FILE, \"a+\")\n for act in acts:\n name = inline_strip(act[0])\n exist = g_done_results.get(name)\n if act[2] == 3 and not exist:\n fp.write(\"%s\\n\" % name)\n g_done_results[name] = True\n fp.close()\n except Exception as e:\n print(\">update_results, error\", e)\n pass\n print(\">update_results, size=\", len(g_done_results), RESULT_FILE)\n\n\n### Mark\nclass Mark(enum.Enum):\n efew = 50\n e6x = 60\n e7x = 70\n e8x = 80\n e9x = 90\n efull = 100\n# 0-60(0), 60-70(5), 70-80(10), 80-90(15), 90-100(20), 100(50)\ndef gen_mark():\n return Mark.efull\n rand = random.random()\n val = int(rand * 1000) % 100\n if val < 0: return Mark.efew\n elif val < 5: return Mark.e6x\n elif val < 15: return Mark.e7x\n elif val < 30: return Mark.e8x\n elif val < 50: return Mark.e9x\n return Mark.efull\ndef gen_answers():\n total = 20\n answers = []\n mark = gen_mark()\n value = 0\n if mark == Mark.efull:\n value = 1\n for i in range(total):\n answers.append(value)\n if mark != Mark.efull:\n ratio = mark.value / 100.0\n num = 20 * (ratio + random.random()*0.1)\n if mark == Mark.efew:\n num = 20 * (0.3 + random.random()*0.3)\n count = 0\n while count < num:\n idx = int(random.random()*1000) % total\n if answers[idx] == 0:\n answers[idx] = 1\n count += 1\n pass\n return answers\n\n\n### Task\nclass CTaskProcess:\n def __init__(self):\n self.km = None\n self.actions = []\n self.accounts = {}\n def to_open(self, idx, url):\n print(\">[%d] open: %s\" % (idx, url))\n self.km.open(url)\n def to_wait(self, idx, sec):\n print(\">[%d] wait: %s\" % (idx, sec))\n time.sleep(sec)\n def to_label(self, idx, name):\n print(\">[%d] label: %s\" % (idx, name))\n def to_click(self, idx, props, acc):\n print(\">[%d] click:%s\" % (idx, props))\n css = props.get(\"selector\")\n text = props.get(\"text\")\n goto = props.get(\"goto\")\n timeout = props.get(\"timeout\")\n if css != None:\n ret = self.km.move_elem_css(css, text, timeout, False, -1)\n if ret:\n if css.find(\"competitionScore\") != -1 and acc != None:\n if acc[2] == 2:\n acc[2] = 3\n print(\"click, goto\", goto)\n return True, goto\n else:\n css = props.get(\"selectors\")\n if css != None:\n random = False\n value = -1\n result = props.get(\"result\")\n if result != None and result.strip() == \"random\":\n random = True\n value = self.get_one_answer()\n ret = self.km.move_elem_css(css, text, timeout, random, value)\n if ret:\n self.answer_idx += 1\n print(\"click, goto\", goto)\n return True, goto\n pass\n return False, goto \n def to_input(self, idx, props, acc):\n print(\">[%d] input: %s\" % (idx, props))\n css = props.get(\"selector\")\n text = props.get(\"text\")\n goto = props.get(\"goto\")\n timeout = props.get(\"timeout\")\n if css != None:\n value = None\n if acc != None:\n if css.find(\"#account\") != -1:\n value = acc[0]\n acc[2] = 1\n elif css.find(\"#password\") != -1:\n value = acc[1]\n acc[2] = 2\n if not value:\n value = props.get(\"default\").strip()\n ret = self.km.input_elem_css(css, text, timeout, value)\n if ret:\n print(\"input, goto\", goto)\n return True, goto\n return False, goto\n def to_just(self, idx, just):\n print(\">[%d] just: %s\" % (idx, just))\n if just == \"clear\":\n self.km.clear_all()\n elif just == \"refresh\":\n self.km.refresh()\n elif just == \"exit\":\n return False\n else:\n print(\"unsupport just=\", just)\n return True\n # @return -1, exit, @return 0, nop, @return 1, maybe goto\n def to_do_one(self, idx, item, acc):\n key = item[0]\n if key == \"open\": self.to_open(idx, item[1])\n elif key == \"wait\": self.to_wait(idx, float(item[1])) \n elif key == \"just\": \n if not self.to_just(idx, item[1]):\n return -1, None # exit\n elif key == \"click\":\n ret, goto = self.to_click(idx, item[1], acc)\n attr = item[1].get(\"attribute\")\n # -- => if success and goto, if success, <-1> if fail\n # attribute=else => if fail and goto, if success\n # attribute=optional => if fail;\n if ret:\n if attr == \"else\":\n return 0, None\n if goto != None:\n return 1, goto.strip()\n else:\n if attr == \"else\" and goto != None:\n return 1, goto.strip()\n if attr != \"optional\":\n return -1, None\n return 0, None\n elif key == \"input\":\n ret, goto = self.to_input(idx, item[1], acc)\n attr = item[1].get(\"attribute\")\n if ret:\n if attr == \"else\":\n return 0, None\n if goto != None:\n return 1, goto.strip()\n else:\n if attr == \"else\" and goto != None:\n return 1, goto.strip()\n if attr != \"optional\":\n return -1, None\n return 0, None\n elif key == \"label\": self.to_label(idx, item[1])\n else: print(\"unsupported action=\", key)\n return 0, None\n def get_label_index(self, label):\n if not label: return -1\n index = -1\n for item in self.actions:\n index += 1\n key = item[0]\n if key == \"label\":\n val = item[1].strip()\n if val == label:\n return index\n return index\n def get_one_account(self):\n if len(self.accounts) == 0:\n return None\n self.acc_idx += 1\n if self.acc_idx >= len(self.accounts):\n return None\n return self.accounts[self.acc_idx]\n def get_one_answer(self):\n return 1\n if self.answer_idx >= len(self.answers):\n return 1\n if self.answer_idx < 0:\n return -1\n return self.answers[self.answer_idx]\n\n def go(self, km, acts):\n if not km or not acts:\n return\n self.km = km\n self.accounts = acts\n self.acc_idx = -1\n self.answer_idx = -1\n self.answers = []\n\n acc = None\n start = -1\n end = len(self.actions)\n quit = False\n isLoop = False\n while not quit:\n if not acc or acc[2] == 3:\n if not acc or not isLoop:\n self.answer_idx = 0\n self.answers = gen_answers()\n acc = self.get_one_account()\n if start == -1:\n start = 0\n else:\n start = self.get_label_index(\"do_first\")\n isLoop = False\n print(\"===go===, new account=\", acc, self.acc_idx, start)\n if not acc or g_quit:\n print(\"=go=, no account for idx=\", self.acc_idx);\n break\n if start < 0 or start >= end:\n print(\"=go=, invalid for acc=\", acc)\n acc = None\n continue\n for idx in range(start, end):\n item = self.actions[idx]\n ret, val = self.to_do_one(idx, item, acc)\n if g_quit:\n quit = True\n break\n if ret == -1: # exit\n print(\"=go=, error for acc=\", acc)\n acc = None\n break\n elif ret == 1: # maybe loop\n isLoop = True\n start = self.get_label_index(val)\n print(\"=go=, will new start=\", start, acc)\n break\n elif ret == 0: # nop\n pass\n if idx+1 == end:\n print(\"=go=, skip for acc=\", acc)\n acc = None\n break\n pass\n pass\n pass\n\n def parse(self, strops):\n self.actions = []\n for line in strops.splitlines():\n line = line.strip()\n if len(line) == 0 or line[0] == '#':\n continue\n fds = line.split(':', 1)\n #print(fds)\n if len(fds) <= 1:\n continue\n tag = fds[0].strip()\n body = fds[1].strip()\n #print(tag, body)\n action = []\n if tag in [\"open\", \"wait\", \"label\", \"just\"]:\n action += [tag, body]\n elif tag in [\"click\", \"input\"]:\n maps = {}\n items = body.split(';')\n for item in items:\n item = item.strip()\n if len(item) == 0:\n continue\n props = item.split('=', 1)\n if len(props) <= 1:\n print(\"wrong\", tag, prop)\n continue\n maps[props[0]] = props[1] \n if tag == \"input\" and not maps.get(\"default\"):\n maps[\"default\"] = \"testing\"\n action += [tag, maps]\n else:\n continue\n self.actions.append(action)\n\ndef gen_random(num):\n rand = random.random()\n val = int(rand * 1000) % num\n return val\n\ndef go_test1():\n km = CAutoKM()\n km.init()\n km.open('https://www.baidu.com')\n km.move_elem_name(\"wd\")\n time.sleep(5)\n km.close()\n sys.exit(0)\n\nimport tkinter as tk\nimport tkinter.filedialog as tkfd\n\nclass Application(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.pack()\n self.create_widgets()\n self.master.title(\"ZEN\")\n\n width = 480\n height = 320\n self.master.minsize(width, height)\n self.master.maxsize(width, height)\n self.master.lift()\n\n self.km = None\n self.task = None\n self.acctable = None\n self.accindex = 0\n\n def create_widgets(self):\n row = 0\n text = \"===== Control Panel =====\"\n self.top = tk.Label(self, text=text)\n self.top.grid(row=row)\n\n self.v = tk.IntVar()\n self.v.set(1)\n row += 1\n text1 = \"按行输入账号: name,password\"\n self.radio1 = tk.Radiobutton(self, text=text1, variable=self.v, value=1,\n command=self.do_radio)\n self.radio1.grid(row=row, sticky=\"W\")\n self.text = tk.Text(self, width=30, height=10, highlightbackground=\"grey\")\n self.text.grid(row=row, column=1)\n row += 1\n text2 = \"从文件导入账号\"\n self.radio2 = tk.Radiobutton(self, text=text2, variable=self.v, value=2,\n command=self.do_radio)\n self.radio2.grid(row=row, sticky=\"W\")\n self.select = tk.Button(self, text=\"浏览...\", fg=\"blue\", borderwidth=10,\n command=self.do_select)\n self.select.grid(row=row, column=1)\n self.do_radio()\n\n row += 1\n self.sp = tk.Label(self, height=1)\n self.sp.grid(row=row)\n\n row += 1\n self.start = tk.Button(self, text=\"开始\", fg=\"blue\",\n command=self.do_start)\n self.start.grid(row=row, sticky=\"E\")\n self.state = tk.Label(self, text=\"开始后的运行状态 ...\")\n self.state.grid(row=row, column=1)\n\n row += 1\n self.quit = tk.Button(self, text=\"退出\", fg=\"red\",\n command=self.do_quit)\n self.quit.grid(row=row, sticky=\"E\")\n\n def do_radio(self):\n val = self.v.get()\n if val == 1:\n self.text.config(state=\"normal\")\n self.select.config(state=\"disabled\")\n elif val == 2:\n self.text.config(state=\"disabled\")\n self.select.config(state=\"normal\")\n pass\n\n def do_select(self):\n filename = tkfd.askopenfilename(\n title = \"Select File\",\n filetypes = ((\"xls files\",\"*.xls\"),(\"all files\",\"*.*\")))\n print(filename)\n self.accindex = 2\n self.acctable = do_open_xls(filename)\n #print(self.acctable)\n # debug and recover\n parts = self.get_accounts(3)\n print(\"do_select, parts=\", self.accindex, parts)\n self.accindex -= len(parts)\n\n def get_accounts(self, maxsize=10):\n parts = []\n default_pwd = \"123456\"\n if self.v.get() == 1:\n szval = self.text.get(1.0, tk.END)\n lines = szval.splitlines()\n #print(lines)\n for line in lines:\n line = line.strip()\n if len(line) == 0: continue\n items = line.split(\",\")\n name = items[0].strip()\n print(\">add new\", name, len(g_done_results))\n if len(items) == 2:\n passwd = items[1].strip()\n if len(passwd) == 0: passwd = default_pwd\n parts.append([name, passwd, 0])\n elif len(items) == 1:\n parts.append([name, default_pwd, 0])\n pass\n else:\n if self.acctable != None:\n num = 0\n while num < maxsize:\n cur = self.accindex\n self.accindex += 1\n if cur >= self.acctable.nrows:\n print(\"=== xls data ended\")\n break\n item = self.acctable.row_values(cur)\n if len(item) < 5:\n print(\"=== xls data-format not support!\")\n continue\n name = inline_strip(item[4])\n exist = g_done_results.get(name)\n if exist != None:\n print(\"=== xls exist and skip=%s\" % name)\n continue\n else:\n print(\"=== xls new name=%s,\" % name, len(g_done_results))\n parts.append([name, default_pwd, 0])\n num += 1\n pass\n else:\n print(\"=== select one xls file first\")\n pass\n return parts\n\n def do_set(self, st):\n self.radio1.config(state=st)\n self.radio2.config(state=st)\n self.text.config(state=st)\n self.select.config(state=st)\n self.start.config(state=st)\n if st == \"normal\":\n self.do_radio()\n\n def do_start(self):\n g_quit = False\n self.do_set(\"disabled\")\n self.master.wm_iconify()\n th = threading.Thread(target=do_backend,args=(self,)) \n th.setDaemon(True)\n th.start()\n\n def do_quit(self):\n g_quit = True\n self.do_close()\n self.master.destroy()\n\n def do_running(self):\n while True:\n acts = self.get_accounts(1)\n if len(acts) == 0:\n print(\"=== no more accounts and quit\")\n break\n #print(\">accounts:\", acts)\n self.do_init()\n self.do_task(acts)\n self.do_post(acts)\n #print(\">removed when using xlrs\")\n if self.v.get() == 1:\n break\n if g_quit:\n break # TODO\n #time.sleep(3)\n self.do_close()\n self.do_set(\"normal\")\n\n def do_init(self):\n if not self.km:\n self.km = CAutoKM()\n self.km.init()\n if not self.task:\n self.task = CTaskProcess()\n self.task.parse(strops)\n pass\n def do_task(self, acts):\n if self.km != None and self.task != None:\n self.task.go(self.km, acts)\n pass\n def do_post(self, acts):\n print(\"post:\", acts)\n do_update_results(acts)\n\n def do_close(self):\n if self.km != None:\n self.km.close()\n self.km = None\n pass\n\ndef do_backend(app):\n app.do_running()\n pass\n\nif __name__ == '__main__':\n #go_test1()\n do_load_answer()\n do_load_results()\n #sys.exit(0)\n\n aUI = True\n if not aUI:\n km = CAutoKM()\n km.init()\n task = CTaskProcess()\n task.parse(strops)\n task.go(km)\n time.sleep(3)\n else:\n root = tk.Tk()\n app = Application(master=root)\n #root.call('wm', 'attributes', '.', '-topmost', '1')\n #root.lift()\n root.attributes('-topmost', True)\n root.update()\n root.attributes('-topmost', False)\n app.mainloop()\n sys.exit(0)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":29015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"608893778","text":"if __name__ == \"__main__\":\r\n #Первое задание\r\n def func1(num):\r\n n = num * 5\r\n print(n)\r\n\r\n global_var = 42\r\n\r\n func1(global_var)\r\n func1(10)\r\n func1('bla')\r\n\r\n\r\n\r\n #Второе задание\r\n def func(n):\r\n if n < 3:\r\n n = n * 10\r\n\r\n return n\r\n ","sub_path":"Основы Python/12. параметры и аргументы функций, локальные и глобальные переменные/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"496608862","text":"class Solution(object):\n def removeDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n\n i, j = 0, 1\n\n while j < len(nums):\n if nums[j] == nums[i]:\n # move j forward when duplicate found\n j += 1\n else:\n # j iterates faster than i.\n # in case there is duplicates, i+1 is safe to reused for the next unique number\n nums[i+1] = nums[j]\n i += 1\n j += 1\n\n return i + 1\n","sub_path":"0026-remove-duplicates-from-sorted-array.py","file_name":"0026-remove-duplicates-from-sorted-array.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"618466597","text":"from django.urls import path\n\nfrom .views.adminsek import PilihWilayahView\nfrom .views.base import (DaftarAduanView, TambahAduanView, DetailAduanView, CetakDetailAduanView, PilihAduanView,\n DilimpahkanView, DiteruskanView, DipindahkanView)\nfrom .views.korwil import CatatanView, KorwilUploadView, IdleView\nfrom .views.utils import DaftarSpamView, SpamView, get_auditee, unread_message\nfrom .views.verifikator import PesanView, DitolakView, ProsesView\n\napp_name = 'pengaduanApp'\n\nurlpatterns = [\n path('ajax/get_auditee/', get_auditee, name='getAuditeeUrl'),\n\n path('/aduan/', PilihAduanView.as_view(), name='pilihAduanUrl'),\n path('/catatan/', CatatanView.as_view(), name='catatanUrl'),\n path('/cetak/', CetakDetailAduanView.as_view(), name='cetakDetailUrl'),\n path('/dilimpahkan/', DilimpahkanView.as_view(), name='dilimpahkanUrl'),\n path('/diteruskan/', DiteruskanView.as_view(), name='diteruskanUrl'),\n path('/dipindahkan/', DipindahkanView.as_view(), name='dipindahkanUrl'),\n path('/diproses/', ProsesView.as_view(), name='diprosesUrl'),\n path('/ditolak/', DitolakView.as_view(), name='ditolakUrl'),\n path('/pesan/', PesanView.as_view(), name='pesanUrl'),\n path('/pilih_wilayah/', PilihWilayahView.as_view(), name='pilihWilayahUrl'),\n path('/spam/', SpamView.as_view(), name='spamUrl'),\n path('/idle/', IdleView.as_view(), name='idleUrl'),\n path('/upload/', KorwilUploadView.as_view(), name='korwilUploadUrl'),\n path('/', DetailAduanView.as_view(), name='detailAduanUrl'),\n path('spam/', DaftarSpamView.as_view(), name='daftarSpamUrl'),\n path('tambah/', TambahAduanView.as_view(), name='tambahAduanUrl'),\n path('unread/', unread_message, name='unreadPesanUrl'),\n path('', DaftarAduanView.as_view(), name='daftarAduanUrl'),\n]\n","sub_path":"simadu/pengaduan/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"474893787","text":"cases = int(input())\r\n\r\nfor i in range(cases):\r\n NSA = input().split()\r\n num = list(NSA[0])\r\n sourceLanguage = list(NSA[1])\r\n alienLanguage = list(NSA[2])\r\n conv = []\r\n sourceBase = len(sourceLanguage)\r\n tBase = len(alienLanguage)\r\n\r\n sConv = 0\r\n for j, n in enumerate(num[::-1]):\r\n sConv += sourceLanguage.index(n) * sourceBase**j\r\n \r\n while sConv > 0:\r\n conv.append(alienLanguage[sConv % tBase])\r\n sConv = sConv // tBase\r\n\r\n print(f\"Case #{i + 1}: \" + ''.join(conv[::-1]))\r\n","sub_path":"Python/Alien_Numbers.py","file_name":"Alien_Numbers.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"172917747","text":"import math\n\np = open(\"Primes\")\ncurr_line = p.readline()\nlist_of_primes = map(int,curr_line.split(\", \"))\n\ndef isPrime(num):\n\tlimit = int(math.sqrt(num))+1\n\tcurr = 0\n\tval = list_of_primes[curr]\n\twhile val < limit and val < 1000000:\n\t\tif num%val == 0:\n\t\t\treturn val\n\t\tcurr += 1\n\t\tval = list_of_primes[curr]\n\treturn 1\n\ndef toBinary(num,digits):\n\tif num == 0:\n\t\tresult = \"\"\n\t\tfor i in range(0,digits):\n\t\t\tresult += '0'\n\t\treturn result\n\tresult = toBinary(num/2,digits-1)\n\tif num%2 ==0:\n\t\tresult += '0'\n\telse:\n\t\tresult += '1'\n\treturn result\n\ndef fromBase(string,base):\n\tif (string == \"\"):\n\t\treturn 0\n\ttotal = base*fromBase(string[:len(string)-1],base)\n\ttotal += int(string[len(string)-1])\n\treturn total\n\ndef isJamcoin(string):\n\tvals = []\n\tfor i in range(2,11):\n\t\tvals.append(fromBase(string,i))\n\tdivisors = [True]\n\tfor v in vals:\n\t\tn = isPrime(v)\n\t\tif n == 1:\n\t\t\treturn [False,v,n]\n\t\tdivisors.append(n)\n\treturn divisors\n\n\nf = open('C-large.in')\noutput = open('C-large.out','w')\ncurr_line = f.readline()\nnumCases = int(curr_line)\nfor i in range(0,numCases):\n\tcurr_line = f.readline()\n\toutput.write(\"Case #%d:\" %(i+1))\n\tstrs = curr_line.split(' ')\n\tN = int(strs[0])\n\tJ = int(strs[1])\n\tnumFound = 0\n\tn = 0\n\tinDigs = N-2\n\twhile numFound < J and n < 2**(inDigs):\n\t\tcurrString = \"1\"\n\t\tcurrString += toBinary(n,inDigs)\n\t\tcurrString += \"1\"\n\t\tresult = isJamcoin(currString)\n\t\tif result[0]:\n\t\t\tfor i in range(1,len(result)):\n\t\t\t\tcurrString += \" \" + str(result[i])\n\t\t\toutput.write(\"\\n\" + currString)\n\t\t\tnumFound += 1\n\t\tn += 1\n\n\n","sub_path":"solutions_5738606668808192_1/Python/benshare/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"622702721","text":"from django.urls import path\nfrom . import views\nfrom .views import EventsList\n\nurlpatterns = [\n path('', views.home, name='index'),\n path('accounts/signup', views.signup, name='signup'),\n path('events/list', EventsList.as_view(), name='events_list'),\n path('events/create', views.show_event_create, name='show_event_create'),\n path('events/create/new', views.event_create, name='event_create'),\n path('events/', views.event_detail, name='detail'),\n path('events//update', views.UpdateEvent.as_view(), name='event_update'),\n path('events//delete', views.DeleteEvent.as_view(), name='event_delete'),\n path('events//attend', views.event_attend, name='event_attend'),\n path('events//add_photo/', views.add_photo, name='add_photo'),\n path('user/my_events/', views.user_events, name='user_events_list'),\n]","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"457131564","text":"from backend.main.deck import Deck\nfrom backend.main.card import Card\n\n\ndef test_deck_creation():\n \"\"\"\"\n Tests that the deck is created completely with 52 unique card objects.\n \"\"\"\n new_deck = Deck()\n assert isinstance(new_deck, Deck)\n assert isinstance(new_deck.deck_pile[0], Card)\n assert len(new_deck.deck_pile) == 52\n assert len(set(new_deck.deck_pile)) == 52\n\n\ndef test_deck_shuffle():\n \"\"\"\n Tests that the top card in the deck pile before a shuffle is different to the top card in the deck pile\n post the shuffle method.\n \"\"\"\n new_deck = Deck()\n\n top_card = new_deck.deck_pile[0]\n Deck.deck_shuffle(new_deck)\n new_top_card = new_deck.deck_pile[0]\n\n assert top_card != new_top_card\n\n\ndef test_deal_a_card():\n \"\"\"\n Checks that when the deal_a_card() is called the deck pile reduces by one and the dealt card is removed from the\n deck pile.\n \"\"\"\n new_deck = Deck()\n dealt_card = Deck.deal_a_card(new_deck)\n assert len(new_deck.deck_pile) == 51\n assert dealt_card not in new_deck.deck_pile\n","sub_path":"backend/tests/test_deck.py","file_name":"test_deck.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"257066012","text":"from socket import *\n\nserverPort = '''numero da porta do servidor'''\n\nserverSocket = socket(AF_INET, SOCK_STREAM)\n\n#atribui a porta ao socket criado\nserverSocket.bind(('', serverPort))\n\n#aceita conexões com no máximo um cliente na fila\nserverSocket.listen(1)\n\nprint('The server is ready to receive')\n\nwhile True:\n connectionSocket, addr = serverSocket.accept()\n \n #recebe a mensagem do cliente em bytes\n mensagem = connectionSocket.recv(1024).decode()\n \n #envio tbm deve ser em bytes\n connectionSocket.send(mensagem)\n \n connectionSocket.close()","sub_path":"TCPServer.py","file_name":"TCPServer.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"251728563","text":"import sys\nimport json\nimport pandas as pd\nimport numpy as np\nimport haversine as hs\nfrom datetime import datetime, timedelta\nimport plotly_express as px\nimport plotly\n\nfrom gmplot import *\n\nfrom classes import LoadingPlace, MixerTruck, Order, Delivery\n\nclass Trip: \n def __init__(self, MixerTruck, LoadigPlaceId, DeliveryId, ServiceHour,\n CustomerFlowRate, Cost, TravelTime, DatetimeNow, CodLoadingPlace,\n CodOrder, CodDelivery): \n self.MixerTruck = int(MixerTruck)\n self.LoadigPlaceId = int(LoadigPlaceId)\n self.DeliveryId = int(DeliveryId) \n self.ServiceHour = int(ServiceHour) \n self.CustomerFlowRate = int(CustomerFlowRate) \n self.DurationOfService = self.CustomerFlowRate * 8\n self.Cost = int(Cost) \n self.TravelTime = int(TravelTime)\n self.DatetimeNow = DatetimeNow\n self.ServiceTime = DatetimeNow + timedelta(minutes=self.ServiceHour)\n self.LoadBeginTime = self.ServiceTime - timedelta(minutes=self.TravelTime) - timedelta(minutes=10)\n self.ReturnTime = self.ServiceTime + timedelta(minutes=(self.CustomerFlowRate*8)) + timedelta(minutes=self.TravelTime)\n self.CodLoadingPlace = int(CodLoadingPlace)\n self.CodOrder = int(CodOrder)\n self.CodDelivery = int(CodDelivery)\n\ndef cplexBianchessi4RealResults(basePath, dataFolder, googleMapsApiKey):\n tripsJson = 0\n with open(basePath + '\\\\BianchessiResult4.json') as data_file: \n tripsJson = json.load(data_file)\n \n today = datetime.utcnow().date()\n startTime = datetime(today.year, today.month, today.day, 0, 0, 0, 0) \n\n np = tripsJson['numberOfLoadingPlaces']\n nc = tripsJson['numberOfDeliveries']\n nv = tripsJson['numberOfMixerTrucks']\n\n df = pd.DataFrame(tripsJson['routes'])\n\n trips = []\n for index, row in df.iterrows():\n if row['Delivery1'] != row['Delivery2'] and row['Delivery1'] != row['Delivery3'] and row['Delivery2'] != row['Delivery3']: \n trip1 = Trip(MixerTruck=row['MixerTruck'], LoadigPlaceId=row['LoadigPlaceId'], \n DeliveryId=row['Delivery1'], ServiceHour=row['ServiceTime1'], \n CustomerFlowRate=row['CustomerFlowRate1'], Cost=row['Cost1'], \n TravelTime=row['TravelTime1'], DatetimeNow=startTime, \n CodLoadingPlace=row['CodLoadingPlace'],\n CodOrder=row['CodOrder1'], CodDelivery=row['CodDelivery1'])\n trip2 = Trip(MixerTruck=row['MixerTruck'], LoadigPlaceId=row['LoadigPlaceId'], \n DeliveryId=row['Delivery2'], ServiceHour=row['ServiceTime2'], \n CustomerFlowRate=row['CustomerFlowRate2'], Cost=row['Cost2'], \n TravelTime=row['TravelTime2'], DatetimeNow=startTime,\n CodLoadingPlace=row['CodLoadingPlace'],\n CodOrder=row['CodOrder2'], CodDelivery=row['CodDelivery2'])\n trip3 = Trip(MixerTruck=row['MixerTruck'], LoadigPlaceId=row['LoadigPlaceId'], \n DeliveryId=row['Delivery3'], ServiceHour=row['ServiceTime3'], \n CustomerFlowRate=row['CustomerFlowRate3'], Cost=row['Cost3'], \n TravelTime=row['TravelTime3'], DatetimeNow=startTime,\n CodLoadingPlace=row['CodLoadingPlace'],\n CodOrder=row['CodOrder3'], CodDelivery=row['CodDelivery3'])\n trips.append(trip1)\n trips.append(trip2)\n trips.append(trip3)\n elif row['Delivery1'] == row['Delivery2'] and row['Delivery1'] != row['Delivery3']: \n trip1 = Trip(MixerTruck=row['MixerTruck'], LoadigPlaceId=row['LoadigPlaceId'], \n DeliveryId=row['Delivery1'], ServiceHour=row['ServiceTime1'], \n CustomerFlowRate=row['CustomerFlowRate1'], Cost=row['Cost1'], \n TravelTime=row['TravelTime1'], DatetimeNow=startTime,\n CodLoadingPlace=row['CodLoadingPlace'],\n CodOrder=row['CodOrder1'], CodDelivery=row['CodDelivery1'])\n trip3 = Trip(MixerTruck=row['MixerTruck'], LoadigPlaceId=row['LoadigPlaceId'], \n DeliveryId=row['Delivery3'], ServiceHour=row['ServiceTime3'], \n CustomerFlowRate=row['CustomerFlowRate3'], Cost=row['Cost3'], \n TravelTime=row['TravelTime3'], DatetimeNow=startTime,\n CodLoadingPlace=row['CodLoadingPlace'],\n CodOrder=row['CodOrder3'], CodDelivery=row['CodDelivery3'])\n trips.append(trip1)\n trips.append(trip3)\n elif row['Delivery1'] != row['Delivery2'] and row['Delivery2'] == row['Delivery3']: \n trip1 = Trip(MixerTruck=row['MixerTruck'], LoadigPlaceId=row['LoadigPlaceId'], \n DeliveryId=row['Delivery1'], ServiceHour=row['ServiceTime1'], \n CustomerFlowRate=row['CustomerFlowRate1'], Cost=row['Cost1'], \n TravelTime=row['TravelTime1'], DatetimeNow=startTime,\n CodLoadingPlace=row['CodLoadingPlace'],\n CodOrder=row['CodOrder1'], CodDelivery=row['CodDelivery1'])\n trip2 = Trip(MixerTruck=row['MixerTruck'], LoadigPlaceId=row['LoadigPlaceId'], \n DeliveryId=row['Delivery2'], ServiceHour=row['ServiceTime2'], \n CustomerFlowRate=row['CustomerFlowRate2'], Cost=row['Cost2'], \n TravelTime=row['TravelTime2'], DatetimeNow=startTime,\n CodLoadingPlace=row['CodLoadingPlace'],\n CodOrder=row['CodOrder2'], CodDelivery=row['CodDelivery2'])\n trips.append(trip1)\n trips.append(trip2)\n elif row['Delivery1'] == row['Delivery2'] and row['Delivery1'] == row['Delivery3'] and row['Delivery2'] == row['Delivery3']: \n trip1 = Trip(MixerTruck=row['MixerTruck'], LoadigPlaceId=row['LoadigPlaceId'], \n DeliveryId=row['Delivery1'], ServiceHour=row['ServiceTime1'], \n CustomerFlowRate=row['CustomerFlowRate1'], Cost=row['Cost1'], \n TravelTime=row['TravelTime1'], DatetimeNow=startTime,\n CodLoadingPlace=row['CodLoadingPlace'],\n CodOrder=row['CodOrder1'], CodDelivery=row['CodDelivery1'])\n trips.append(trip1)\n else:\n print('Delivery1: ' + str(row['Delivery1']) + \n ' Delivery2: ' + str(row['Delivery2']) + \n ' Delivery3: ' + str(row['Delivery3']))\n\n df = pd.DataFrame([vars(t) for t in trips])\n\n df['LoadBeginTime'] = pd.to_datetime(df['LoadBeginTime'])\n df['ReturnTime'] = pd.to_datetime(df['ReturnTime'])\n df['ServiceTime'] = pd.to_datetime(df['ServiceTime'])\n\n df['FINAL'] = ''\n df['BEGIN'] = ''\n df['Arrival'] = ''\n df['FINAL'] = df['ReturnTime'].dt.strftime(\"%A, %d. %B %Y %I:%M%p\")\n df['BEGIN'] = df['LoadBeginTime'].dt.strftime(\"%A, %d. %B %Y %I:%M%p\")\n df['Arrival'] = df['ServiceTime'].dt.strftime(\"%A, %d. %B %Y %I:%M%p\")\n\n fig = px.timeline(df, \n x_start=df['LoadBeginTime'], \n x_end=df['ReturnTime'], \n y=df['MixerTruck'], \n color=df['CodOrder'], \n hover_data={ 'BEGIN': True, 'FINAL': True, \n 'LoadBeginTime': False, 'ReturnTime': False, \n 'ServiceTime': True, \n 'MixerTruck': True,\n 'DurationOfService': True, 'Cost': True, 'TravelTime': True, \n 'LoadigPlaceId': True, 'CodLoadingPlace': True,\n 'CodOrder': True, 'CodDelivery': True },\n title='BianchessiRealData')\n fig.update_yaxes(autorange='reversed')\n fig.update_layout(title_font_size=42, font_size=18, title_font_family='Arial')\n plotly.offline.plot(fig, filename=basePath + '\\\\Bianchessi4RealDataGant.html')","sub_path":"RealResultsGenerator/cplexBianchessi4RealResults.py","file_name":"cplexBianchessi4RealResults.py","file_ext":"py","file_size_in_byte":7638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"636061911","text":"import cv2\nfrom base_camera import BaseCamera\nimport os\nimport time\nimport random\nfrom datetime import datetime\n\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\n#print(a)\nclass Camera(BaseCamera):\n video_source = 0\n\n @staticmethod\n def set_video_source(source):\n Camera.video_source = source\n \n \n \n #==================================================\n # Flush buffer in camera.\n def flushCamera(camera):\n delay = 0\n\n framesWithDelayCount = 0\n flushed_frames = 0\n\n while (framesWithDelayCount <= 1):\n timer_start = time.time()\n\n camera.grab()\n flushed_frames += 1\n\n delay = time.time() - timer_start\n\n if (delay > 0):\n framesWithDelayCount += 1\n\n @staticmethod\n def frames():\n camera = cv2.VideoCapture(Camera.video_source)\n flushCamera(camera)\n #option 1\n while True: \n try:\n ret, frame = camera.read()\n flushCamera(camera)\n if frame is None:\n camera = cv2.VideoCapture(Camera.video_source)\n time.sleep(2)\n continue\n #logic\n yield cv2.imencode('.jpg', frame)[1].tobytes()\n except:\n pass\n \n \n #option 2\n while True:\n if camera.isOpened():\n for i in range(5):\n ret, image = camera.read()\n if ret:\n if image == 'None':\n continue\n try:\n #logic\n yield cv2.imencode('.jpg', frame)[1].tobytes()\n except:\n pass\n \n \n #option 3\n while True:\n try:\n for i in range(5):\n ret, frame = camera.read()\n \n if frame is None:\n # print(\"frame None\")\n camera = cv2.VideoCapture(Camera.video_source)\n yield cv2.imencode('.jpg', frame)[1].tobytes()\n continue\n \n #logic\n yield cv2.imencode('.jpg', frame)[1].tobytes()\n except:\n pass\n","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"555952467","text":"from collections import deque\n\n\ndef Lagged_Fibonacci_Generator():\n A = deque()\n k = 1\n while k < 56:\n s = (100003 - 200003 * k + 300007 * k ** 3) % 1000000\n A.append(s)\n yield (s)\n k += 1\n\n while True:\n s = (A[- 24] + A.popleft()) % 1000000\n A.append(s)\n yield (s)\n\n\ndef Call_Generator():\n LFG = Lagged_Fibonacci_Generator()\n while True:\n a = next(LFG)\n b = next(LFG)\n if a != b:\n yield (a, b)\n\n\nminister = 524287\ncitizens = 10 ** 6\ntarget = 0.99\n\nParent = list(range(citizens))\nSize = [1 for x in Parent]\n\n\ndef root(Parent, n):\n while Parent[n] != n:\n n = Parent[n]\n return (n)\n\n\ndef union(Parent, Size, n1, n2):\n r1 = root(Parent, n1)\n r2 = root(Parent, n2)\n s1 = Size[r1]\n s2 = Size[r2]\n if r1 == r2:\n return ()\n\n if s1 > s2:\n major = r1\n minor = r2\n else:\n major = r2\n minor = r1\n\n Parent[minor] = major\n Size[major] = s1 + s2\n\n\nprop = Size[minister] / citizens\nn = 0\nCG = Call_Generator()\nwhile prop < target:\n a, b = next(CG)\n union(Parent, Size, a, b)\n n += 1\n minister = root(Parent, minister)\n prop = Size[minister] / citizens\n if n % 100000 == 0:\n print(n, prop)\n\nprint(n, prop)\n","sub_path":"problems/old/pb186_2.py","file_name":"pb186_2.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"514514904","text":"# Input Specification\n# The user will be prompted to enter two integers. First, the user will be prompted to enter the speed\n# limit. Second, the user will be prompted to enter the recorded speed of the car.\n\n# Output Specification\n# If the driver is not speeding, the output should be:\n# Congratulations, you are within the speed limit!\n# If the driver is speeding, the output should be:\n# You are speeding and your fine is $F .\n# where F is the amount of the fine as described in the table above.\n\n'''\n1 to 20 -> 100\n21 to 30 -> 270\n31 and above -> 500\n'''\nspeed_limit = int(input('Enter the speed limit: '))\nspeed = int(input('Enter the recorded speed of the car: '))\n\nif speed <= speed_limit: \n print('Congratulations, you are within the speed limit!')\n\nelse:\n if speed - speed_limit <= 20:\n fine = 100\n elif speed - speed_limit <= 30:\n fine = 270\n else:\n fine = 500\n print('You are speeding and your fine is $' + str(fine) + '.')\n","sub_path":"competition-questions/2012/J1-2012.py","file_name":"J1-2012.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"443670630","text":"class Animal:\n # количество лап\n _legs_number = 0\n\n # количество пар глаз\n _eyes_pair = 0\n\n def __init__(self, age=None):\n if age is None:\n age = 1\n\n if not (isinstance(age, int) or isinstance(age, float)):\n raise TypeError('Возвраст должен быть числом')\n\n if age <= 0:\n raise ValueError('Возраст должен быть неотрицательным числом')\n\n self._age = age\n\n @property\n def age(self):\n return self._age\n\n @age.setter\n def age(self, age):\n if not (isinstance(age, int) or isinstance(age, float)):\n raise TypeError('Возраст должен быть числом')\n\n if age <= 0:\n raise ValueError('Возвраст должен быть неотрицательным числом')\n\n self._age = age\n\n def __str__(self):\n return str.format(\n 'Количество лап: {}\\nКоличество пар глаз: {}',\n self._legs_number, self._eyes_pair\n )\n\n def show_age(self):\n print(str.format('Возраст животного: {} месяца', self._age))\n\n def show_legs_and_eyes(self):\n print(self.__str__())\n\n\nclass Mammal(Animal):\n # количество пар глаз (переопределяем атрибут класса)\n _eyes_pair = 1\n\n def __init__(self, age=None, is_pregnant=False):\n # raise NotImplementedError(\"Абстрактный класс\")\n # вызываем конструктор родительского класса\n super().__init__(age)\n\n if not isinstance(is_pregnant, bool):\n raise TypeError(\"Аргумент is_pregnant должен быть булевого типа\")\n\n self._is_pregnant = is_pregnant\n\n @property\n def is_pregnant(self):\n return self._is_pregnant\n\n @is_pregnant.setter\n def is_pregnant(self, is_pregnant):\n if not isinstance(is_pregnant, bool):\n raise TypeError('Аргумент is_pregnant должен быть булевого типа')\n\n self._is_pregnant = is_pregnant\n\n\nclass DomesticMammal(Mammal):\n def __init__(self, name, favorite_toy, age=None, is_pregnant=False):\n if not isinstance(name, str):\n raise TypeError('Аргумент name должен быть строкой')\n\n self._name = name\n\n if not isinstance(favorite_toy, str):\n raise TypeError('Аргумент favorite_toy должен быть строкой')\n\n self._favorite_toy = favorite_toy\n\n # вызываем конструктор родительского класса Mammal\n super().__init__(age, is_pregnant)\n\n @property\n def name(self):\n return self._name\n\n @property\n def favorite_toy(self):\n return self._favorite_toy\n\n @favorite_toy.setter\n def favorite_toy(self, favorite_toy):\n self._favorite_toy = favorite_toy\n\n def talks(self):\n print(str.format('{}: говорит', self._name))\n\n\nclass Dog(DomesticMammal):\n # количество лап (переопределяем атрибут класса)\n _legs_number = 4\n\n _breed = 'Dog'\n _breed_family = 'Simple Dog'\n\n def __init__(self, name, favorite_toy, age=None, is_pregnant=False):\n super().__init__(name, favorite_toy, age, is_pregnant=is_pregnant)\n\n\ndef main():\n print('*' * 80)\n a = Animal(3)\n print(a)\n a.show_age()\n\n print('-' * 80)\n m = Mammal(is_pregnant=True)\n m.show_legs_and_eyes()\n\n print('*' * 80)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"core/oop/app14.py","file_name":"app14.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"141475923","text":"# import\n\n# -*- coding:utf-8 -*-\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# データ読み込み\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\n# 計算グラフ\nx = tf.placeholder(tf.float32, shape=(None, 784))\nimg = tf.reshape(x,[-1,28,28,1])\n\n### 畳み込み層1\n# 使わない場合\nw_conv = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1))\nb_conv = tf.Variable(tf.zeros(shape=[32]))\nconv = tf.nn.conv2d(img, w_conv, strides=[1, 1, 1, 1], padding= \"SAME\")\nconv1 = tf.nn.relu(conv + b_conv)\n\n# 使った場合\n#conv1 = tf.layers.conv2d(\n# inputs=img, # 入力するテンソル\n# filters=32, # 畳み込み後のチャンネル数\n# strides=(1, 1), # ストライド [縦方向,横方向] \n# kernel_size=[5, 5], # フィルタのサイズ [高さ,幅] \n# padding=\"same\", # パディング\n# activation=tf.nn.relu # 活性化関数Relu \n#)\n\n#プーリング層1\n# 使わない場合\npool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n# 使った場合\n#pool1 = tf.layers.max_pooling2d(conv1, pool_size=[2, 2], strides=2, padding=\"SAME\")\n\n### 畳み込み層2\"\n# 畳み込み層2 プーリング層2\nconv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5, 5],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\npool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n# 全結合層\n#畳み込まれているものをフラットな形に変換\npool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])\n# 使った場合\n#dense = tf.layers.dense(inputs=pool2_flat,units=1024,activation=tf.nn.relu)\n# 使わない場合\nw_hidden = tf.Variable(tf.truncated_normal([7*7*64, 1024], stddev=0.1))\nb_hidden = tf.Variable(tf.zeros(shape=[1024]))\ndense = tf.nn.relu(tf.matmul(pool2_flat, w_hidden) + b_hidden)\n\n#出力層\nout = tf.layers.dense(\n inputs=dense,\n units=10,\n activation=tf.nn.softmax)\n\n#正解データの型を定義\ny = tf.placeholder(tf.float32, [None, 10])\n\n#損失関数\nloss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(out), reduction_indices=[1]))\n\n#訓練\ntrain_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)\n\n#評価\ncorrect = tf.equal(tf.argmax(out,1), tf.argmax(y,1))\naccuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n\n# 訓練\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n# 学習\nfor i in range(1000):\n train_images, train_labels = mnist.train.next_batch(50)\n sess.run( train_step, feed_dict={x: train_images, y: train_labels})\n\n step = i+1 \n if step % 10 == 0:\n loss_val, acc_val = sess.run([loss, accuracy], feed_dict={x: mnist.validation.images, y: mnist.validation.labels})\n print('Step %d: accuracy = %.2f\\tloss = %.2f' % (step, acc_val, loss_val))\n\n\n# テストデータで評価\nprint(\"正解率 : \", sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}))\n","sub_path":"code/mnist_cnn/mnist_cnn.py","file_name":"mnist_cnn.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"384034164","text":"import sys\n\n# Given: At most 50 DNA strings of approximately equal length, not exceeding 1 kbp, in FASTA format (which represent reads deriving from the same strand of a single linear chromosome).\n#\n# The dataset is guaranteed to satisfy the following condition: there exists a unique way to reconstruct the entire chromosome from these reads by gluing together pairs of reads that overlap by more than half their length.\n#\n# Return: A shortest superstring containing all the given strings (thus corresponding to a reconstructed chromosome).\n\nfile_path = sys.argv[1]\n\nseq_list = []\n#creates list with all given sequences in it\nwith open(file_path,'r') as file_object:\n current_string = \"\"\n for line in file_object:\n if not line.startswith(\">\"):\n current_string += line.rstrip()\n else:\n seq_list.append(current_string)\n current_string = \"\"\n seq_list.append(current_string)\n# I am still looking for a better alternative to read multiple lines in a file into one\n# string than this, but it works for now.\ndel seq_list[0]\n\n#function to calculate the shared overlap between 2 strings\nall_overlaps = []\ndef common_suffix_prefix(string1, string2):\n csp_list = [string1,string2]\n longest_sub = \"\"\n max_length = 0\n for index in range(1, len(string1)):\n if string1[-index:] == string2[:index]:\n if len(string1[-index:]) > max_length:\n longest_sub = string1[-index:]\n max_length = len(string1[-index:])\n csp_list.append(longest_sub)\n csp_list.append(max_length)\n all_overlaps.append(csp_list)\n# now you have all your overlaps in the list 'all_overlapps'\n\n# Lastly, pick the greatest overlap between 2 sequences in the sequence list, concatenate them,\n# remove their initial string values from the sequence list and append the concatenated version.\n# Repeat this step while there is still more than one sequence in the list of sequences.\nwhile(len(seq_list)>1):\n for item in seq_list:\n for index in range(0,len(seq_list)):\n common_suffix_prefix(item,seq_list[index]) #calculates the overlap between any two sequences of seq_list\n max_overlap = 0\n max_overlap_seqs = [] #[0] and [1] are the sequences, [2] the overlap score\n for item in all_overlaps: # generates all the overlaps between two sequences and saves all important values in a list\n for index in range(0,len(all_overlaps)):\n if item[3] > max_overlap:\n max_overlap = item[3]\n max_overlap_seqs.clear()\n max_overlap_seqs.append(item[0])\n max_overlap_seqs.append(item[1])\n max_overlap_seqs.append(item[2])\n\n seq_list.remove(max_overlap_seqs[0])\n seq_list.remove(max_overlap_seqs[1])\n\n #note: for the second sequence, the overlap is cut out, only concatenating the rest of the sequence to the new string\n new_string = max_overlap_seqs[0] + (max_overlap_seqs[1])[max_overlap:] #new_string = greatest overlap of 2 sequences\n max_overlap_seqs.clear()\n all_overlaps.clear()\n seq_list.append(new_string)\nprint(seq_list) #Only element left in the sequence list is the Shortest Superstring\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Bioinformatics_Stronghold/Genome_Assembly_as_Shortest_Superstring.py","file_name":"Genome_Assembly_as_Shortest_Superstring.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"75112555","text":"import discord\nimport random\nfrom management.kicking import *\nstarted = False\nusers = []\ndef StartPurge():\n\tusers = []\n\tglobal started\n\tif not started:\n\t\tstarted = True\n\telse:\n\t\treturn False\ndef AddPurgeUser(user):\n\tglobal started\n\tif started:\n\t\texists = False\n\t\tfor u in users:\n\t\t\tif u.name == user.name:\n\t\t\t\texists = True\n\t\tif not exists:\n\t\t\tusers.append(user)\n\telse:\n\t\treturn False\nasync def EndPurge(client):\n\tstarted = False\n\tfor u in users:\n\t\tif random.randrange(0,2) == 1:\n\t\t\tawait KickUser(client, u)\n\t\t\tusers.remove(u)\n\treturn users\n\n","sub_path":"games/purge.py","file_name":"purge.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"194600190","text":"# -*- coding: utf-8 -*-\n# 1:引入单元测试\n# 2:引入ddt\n# 3:测试用例里面引入引入try...except..finally,并写回测试结果\n# 4:引入日志\n# 5:完成用例的可配置化:想跑哪条用例,就在配置文件里面写好\n# 6:搞定全局变量(path变量,数据与文件分离)\nimport unittest\nfrom ddt import ddt,data\nfrom API_Program.API_03_1.common.do_excel import DoExcel\nfrom API_Program.API_03_1.common.log_test import MyLog\nfrom API_Program.API_03_1.common.http_request import HttpRequest\nfrom API_Program.API_03_1.common import project_path\nfrom API_Program.API_03_1.common.get_data import GetData\n# 测试数据\nbidloan_data=DoExcel(project_path.case_path,'Bidloan').read_data('BidloanCase')\n\n@ddt\nclass RunCase(unittest.TestCase):\n def setUp(self):\n '''准备测试数据,测试前的准备工作'''\n self.do_exl=DoExcel(project_path.case_path,'Bidloan')\n self.my_log=MyLog()\n self.http=HttpRequest()\n @data(*bidloan_data)\n def test_case(self,case):\n global result\n # 取到request里需要的参数\n url=case['Url']\n param=eval(case['Params'])\n method=case['Method']\n expected=eval(case['ExpectedResult'])\n # 准备测试\n self.my_log.info('开始执行{}模块第{}用例:{}'.format(case['Module'],case['CaseId'],case['Title']))\n self.my_log.info('参数是:{}'.format(param))\n\n res=self.http.http_request(method,url,param,cookies=getattr(GetData,'cookies'))\n print('实际结果是:{}'.format(res.json()))\n # 判断是否有cookies,有就将cookies重新根据反射赋值\n if res.cookies:\n setattr(GetData,'cookies',res.cookies)\n try:\n self.assertEqual(expected['code'],res.json()['code'])\n result='pass'\n self.my_log.info('该条测试用例通过')\n except AssertionError as e:\n result='failed'\n self.my_log.error('该条用例不通过:{}'.format(e))\n finally:\n final_result=result\n self.my_log.info('******开始写入数据******')\n self.do_exl.write_back(case['CaseId']+1,8,res.text) #写入实际结果\n self.do_exl.write_back(case['CaseId']+1,9,final_result) #写入测试结果\n self.my_log.info('******写入数据完毕******')\n","sub_path":"API_Program/API_03_1/common/run_05_bidloan.py","file_name":"run_05_bidloan.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"20280271","text":"from .digest_enzyme import DigestEnzyme\n\nclass Trypsin(DigestEnzyme):\n NAME = \"Trypsin\"\n SHORTCUT = \"try\"\n REGEX = r\"(?<=[KR])(?!P)\"\n\n\n def __init__(self, max_number_of_missed_cleavages = 0, minimum_peptide_length = 0, maximum_peptide_length = 1):\n super().__init__(\n self.NAME,\n self.SHORTCUT,\n self.REGEX,\n max_number_of_missed_cleavages,\n minimum_peptide_length,\n maximum_peptide_length\n )","sub_path":"trypperdb/proteomics/enzymes/trypsin.py","file_name":"trypsin.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"300290222","text":"# -*- encoding: utf-8 -*-\n# @File: 226.py \n# @Time: 2020-08-03 00:22\n# @Author: ZHANG\n# @Description: 226\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n \"\"\"Mine\"\"\"\n def invertTree(self, root: TreeNode) -> TreeNode:\n if not root:\n return None\n root.left, root.right = root.right, root.left\n self.invertTree(root.left)\n self.invertTree(root.right)\n return root\n\n \"\"\"Official\"\"\"\n # def invertTree(self, root):\n # if not root:\n # return None\n # left = self.invertTree(root.left)\n # right = self.invertTree(root.right)\n # root.left = right\n # root.right = left\n # return root\n","sub_path":"lc/226.py","file_name":"226.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"168399812","text":"import math\n\ndef count(num):\n letters = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\"]\n\n letterDigits = [0] #Count from nothing\n for _ in range(num):\n #Increment\n letterDigits[0] = letterDigits[0] + 1\n\n for j in range(len(letterDigits)):\n if letterDigits[j] > len(letters):\n letterDigits[j] = 1\n\n if j+1 < len(letterDigits): #check If there is a digit ahead based on length of letterDigits +1, since 1 based, increment it, otherwise create one\n letterDigits[j+1] = letterDigits[j+1] + 1\n else:\n letterDigits.append(1)\n\n #Convert to letter\n lettersText = \"\"\n for i in range(len(letterDigits)):\n lettersText += (letters[letterDigits[i] - 1])\n\n print(lettersText)\n\nfor i in range(10000):\n count(i+ 1)","sub_path":"NumberToLetter.py","file_name":"NumberToLetter.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"396278624","text":"# testServo.py - test servo on channel using pca9685\n# PWM and servo featherwing\n# 2017_0205 CircuitPython version\n# source: https://learn.adafruit.com/micropython-hardware-pca9685-pwm-and-servo-driver/software\n\nimport time\n\n# Adafruit CircuitPython I2C Initialization\nfrom board import *\nimport bitbangio as io # Huzzah ESP8266 uses soft I2C\ni2c = io.I2C(SCL, SDA)\n\n# Import Servo Module\nfrom adafruit_pca9685 import servo\nservos = servo.Servos(i2c)\n\n# There are a few ways to control the position of the servo\n# using the position function. One way is to specify the\n# pulse length in microseconds. Most servos will go to their\n# center position at a pulse length of 1500 microseconds,\n# a 90 degree extreme at 2000 microseconds,\n# and the opposite 90 degree extreme at 1000 microseconds.\n\n# Try setting the servo to its center position with a\n# pulse length of 1500 microseconds.\n# Servo is connected to channel 5\nCHANNEL = 5\nservos.position(CHANNEL, us=1500)\ntime.sleep(1.0)\n\n# Try other extremes like 2000 and 1000 microseconds\n# to see how the servo moves:\nservos.position(CHANNEL, us=2000)\ntime.sleep(1.0)\nservos.position(CHANNEL, us=1000)\ntime.sleep(1.0)\n\n# You can also specify a position as an angle.\n# This is a little trickier to use since you'll need\n# to know the total angle that your servo can sweep between.\n# The default is 180 degrees but your servo might have a\n# smaller sweep--change the total angle by specifying the\n# degrees parameter in the Servos class initializer above.\nservos.position(CHANNEL, degrees=180)\ntime.sleep(1.0)\nservos.position(CHANNEL, degrees=0)\ntime.sleep(1.0)\n\n# sweep servo between 0 and 180 degrees\ndef sweep(channel, delay):\n while True:\n for i in range(180):\n servos.position(channel, degrees=i)\n time.sleep(delay)\n for i in range(180, 0, -1):\n servos.position(channel, degrees=i)\n time.sleep(delay)\nsweep(5, 0.005)\n","sub_path":"PCA9685_FeatherWing/testServo.py","file_name":"testServo.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"62578234","text":"#pylint: disable=E0611, w0614, R0903\n#-*-coding: utf-8 -*-\nimport sys\nimport os.path\nimport logging\n#from optparse import OptionParser\nfrom PyQt5.QtCore import QUrl\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox\nfrom plus.web import MainView\n\nclass Window(QMainWindow):\n def __init__(self,file=\"index.html\"):\n super().__init__()\n self.view = MainView()\n self._initUI()\n #self.view.load(QUrl.fromLocalFile(os.path.join(os.path.dirname( os.path.abspath( __file__ ) ), file)))\n self.view.load(QUrl('http:/localhost:8080'))\n \n def _initUI(self):\n self.setCentralWidget(self.view)\n self.setMinimumSize(1300, 800)\n self.setWindowTitle(\"QWebview-plus for Kiwoom\")\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, 'Message', \"Are you sure to quit?\",\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes:\n #API 문제로 잠시 이용.\n self.view.kiwoom.quit()\n event.accept()\n else:\n event.ignore() \n #event.accept() if reply == QMessageBox.Yes else event.ignore()\n\ndef main():\n #setup logger config\n logging.basicConfig(level=logging.DEBUG)\n\n #parsing command line arguments\n #parser = OptionParser()\n #parser.add_option(\"-p\", \"--port\", action=\"store\", type=\"string\", dest=\"port\", help=\"크롬 원격 디버깅 포트\")\n #parser.add_option(\"-f\", \"--file\", action=\"store\", type=\"string\", dest=\"file\", help=\"시작 파일 경로\", default=\"index.html\")\n #(opt, args) = parser.parse_args()\n\n #if os.path.isfile(opt.file):\n os.environ[\"QTWEBENGINE_REMOTE_DEBUGGING\"] = \"0.0.0.0: 8888\"\n app = QApplication(sys.argv)\n window = Window(\"view_source/dist/index.html\")\n window.show()\n sys.exit(app.exec_())\n #else:\n # parser.print_help()\n # sys.exit()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"500700153","text":"from django.contrib.auth import authenticate, login\nfrom django.contrib.auth import logout\nfrom django.shortcuts import render, get_object_or_404\nfrom .forms import UserForm, DatasetForm\nfrom .models import Dataset, Competition\nfrom pandas import read_csv\nfrom django.contrib.auth.models import User\n\nACCEPTED_FILE_TYPES = ['csv']\n\n\ndef create_dataset(request):\n if not request.user.is_authenticated():\n return render(request, 'mlearn/login.html')\n else:\n form = DatasetForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n dataset = form.save(commit=False)\n dataset.user = request.user\n file_type = dataset.data_file.url.split('.')[-1].lower()\n if file_type not in ACCEPTED_FILE_TYPES:\n context = {\n 'dataset': dataset,\n 'form': form,\n 'error_message': 'File must be CSV or TXT format',\n }\n return render(request, 'mlearn/create_dataset.html', context)\n dataset.data_file = request.FILES['data_file']\n dataset.file_path = dataset.data_file.url\n dataset.file_name = dataset.file_path.split('/')[-1].split('.')[0]\n dataset.file_type = file_type\n dataset.save()\n name = dataset.file_name\n url = 'http://127.0.0.1:8000' + dataset.file_path\n dframe = read_csv(url, header=0, parse_dates=True) \\\n .to_html(bold_rows=True, classes=[\"table table-striped\", \"table table-bordered\", \"table table-hover\"])\n return render(request, 'mlearn/show_dataset.html', {'dframe': dframe, 'name': name})\n context = {\n \"form\": form,\n }\n return render(request, 'mlearn/create_dataset.html', context)\n\n\ndef delete_dataset(request, dataset_id):\n dataset = Dataset.objects.get(pk=dataset_id)\n dataset.delete()\n datasets = Dataset.objects.filter(user=request.user)\n return render(request, 'mlearn/index.html', {'datasets': datasets})\n\n\ndef show_dataset(request, dataset_id):\n if not request.user.is_authenticated():\n return render(request, 'mlearn/login.html')\n else:\n dataset = Dataset.objects.get(pk=dataset_id)\n name = dataset.file_name\n url = 'http://127.0.0.1:8000' + dataset.file_path\n dframe = read_csv(url, header=0, parse_dates=True)\\\n .to_html(bold_rows=True, classes=[\"table table-striped\", \"table table-bordered\", \"table table-hover\"])\n return render(request, 'mlearn/show_dataset.html', {'dframe': dframe, 'name': name})\n\n\ndef index(request):\n if not request.user.is_authenticated():\n return render(request, 'mlearn/login.html')\n else:\n users = User.objects.all()\n competitions = Competition.objects.all()\n\t\t#albums = Album.objects.all()\n #song_results = Song.objects.all()\n query = request.GET.get(\"q\")\n if query:\n competitions = competitions.filter(\n Q(comp_name__icontains=query) |\n Q(comp_desc__icontains=query)\n ).distinct()\n return render(request, 'mlearn/index.html', {\n 'competitions': competitions,\n })\n else:\n return render(request, 'mlearn/index.html', {'competitions': competitions})\n\n\ndef login_user(request):\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n datasets = Dataset.objects.filter(user=request.user)\n return render(request, 'mlearn/index.html', {'datasets': datasets})\n else:\n return render(request, 'mlearn/login.html', {'error_message': 'Your account has been disabled'})\n else:\n return render(request, 'mlearn/login.html', {'error_message': 'Invalid login'})\n return render(request, 'mlearn/login.html')\n\n\ndef logout_user(request):\n logout(request)\n form = UserForm(request.POST or None)\n context = {\n \"form\": form,\n }\n return render(request, 'mlearn/login.html', context)\n\n\ndef register(request):\n form = UserForm(request.POST or None)\n if form.is_valid():\n user = form.save(commit=False)\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user.set_password(password)\n user.save()\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n datasets = Dataset.objects.filter(user=request.user)\n return render(request, 'mlearn/index.html', {'datasets': datasets})\n context = {\n \"form\": form,\n }\n return render(request, 'mlearn/register.html', context)\n\n\ndef detail(request, competition_id):\n if not request.user.is_authenticated():\n return render(request, 'mlearn/login.html')\n else:\n competition = get_object_or_404(Competition, pk=competition_id)\n return render(request, 'mlearn/detail.html', {'competition': competition,})\n","sub_path":"mlearn/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"4840637","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, SUPERUSER_ID\nfrom odoo.exceptions import AccessError, ValidationError, UserError\n\n\n# class HrAppraisalComment(models.Model):\n# _name = \"hr.appraisal.comment\"\n#\n# name = fields.Char('Comment')\n# user_id = fields.Many2one('res.users', default=lambda self: self.env.user.id)\n# related_objective = fields.Many2one('hr.appraisal.objective', 'Related Objective')\n# related_appraisal = fields.Many2one('hr.appraisal', 'Related Appraisal')\n#\n# def write(self, vals):\n# if self.user_id.id != self.env.user.id:\n# if len(vals) == 1 and not vals.get('related_appraisal') and not vals.get('related_objective'):\n# raise UserError('Only comment owner can modified!')\n# elif len(vals) > 1:\n# raise UserError('Only comment owner can modified!')\n# return super(HrAppraisalComment, self).write(vals)\n\n\nclass HrAppraisalPeriod(models.Model):\n _name = 'hr.appraisal.period'\n\n name = fields.Char('Period')\n\n from_date = fields.Date('From Date')\n to_date = fields.Date('To Date')\n year = fields.Char(\n string='Year',\n required=False)\n\n @api.onchange('from_date', 'to_date')\n def _onchange_date(self):\n for rec in self:\n if rec.from_date and rec.to_date:\n rec.name = \"Appraisal %s / %s\" % (rec.from_date, rec.to_date)\n\n\nclass HrAppraisal(models.Model):\n _inherit = \"hr.appraisal\"\n\n def _default_stage_id(self):\n return self.env['hr.appraisal.stage'].search([], order='sequence asc', limit=1).id\n\n appraisal_objectives = fields.One2many('hr.appraisal.objective', 'related_appraisal',\n domain=[('state', '!=', 'deleted')], string='Appraisal Objectives')\n appraisal_edit_objectives = fields.One2many('hr.appraisal.objective', 'related_appraisal',\n domain=[('state', '!=', 'deleted')], string='Appraisal Edit Objectives')\n deleted_appraisal_objectives = fields.One2many('hr.appraisal.objective', 'related_appraisal',\n domain=[('state', '=', 'deleted')],\n string='Deleted Appraisal Objectives')\n appraisal_training = fields.One2many('hr.training', 'related_appraisal', string='Appraisal Training')\n\n appraisal_form = fields.Many2one('hr.appraisal.form', 'Appraisal Form', required=False)\n\n related_period = fields.Many2one(related='appraisal_form.period_id')\n\n employee_grade = fields.Integer(related='employee_id.job_id.job_grade.level', string=\"Employee Grade\", store=True)\n contract_subgroup = fields.Many2one(related='employee_id.contract_id.contract_subgroup', string=\"Employee Type\")\n\n # comments = fields.One2many('hr.appraisal.comment', 'related_appraisal', 'Comments')\n\n current_manager = fields.Many2one('hr.employee', 'Current Manager', related='employee_id.parent_id')\n\n stage_id = fields.Many2one('hr.appraisal.stage', 'Stage', ondelete='restrict', tracking=True,\n copy=False, index=True,\n group_expand='_read_group_stage_ids',\n default=_default_stage_id)\n\n appraisal_manager = fields.Many2one('hr.employee', 'Appraisal Manager',\n domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\")\n\n total_sum_weight = fields.Float('Total Weight Sum', compute=\"_calculate_total_weight\")\n\n employee_overall_rating = fields.Float('Employee Rating', compute=\"calculate_employee_overall_rating\", store=True)\n\n employee_overall_rating_rounded = fields.Float('Employee Rating Rounded', compute=\"calculate_employee_overall_rating\", store=True)\n\n overall_rating = fields.Float('Manager Rating', compute=\"calculate_overall_rating\", store=True)\n\n overall_rating_rounded = fields.Float('Manager Rating Rounded', compute=\"calculate_overall_rating\", store=True)\n\n hr_overall_rating = fields.Float('HR Final Rating', default=0.0)\n\n hr_overall_rating_final = fields.Float('HR Overall Rating Final', compute=\"calculate_hr_overall_rating\", store=True)\n\n manager_comment = fields.Text('Manager Comment')\n\n employee_comment = fields.Text('Employee Comment')\n\n hr_comment = fields.Text('HR Comment')\n\n survey_id = fields.Many2one('survey.survey', string=\"Survey\",\n domain=[('category', '=', 'hr_appraisal')], readonly=True\n )\n response_id = fields.Many2one('survey.user_input', \"Response\", ondelete=\"set null\", readonly=True,\n )\n\n employee_oc_id = fields.Many2one(related='employee_id.department_id')\n employee_system_id = fields.Char(related='employee_id.system_id')\n employee_strata_id = fields.Char(related='employee_id.strata_id')\n employee_job_title = fields.Char(related='employee_id.contract_id.job_title.name', string=\"Job Title\", store=True)\n group = fields.Many2one(related='employee_id.contract_id.group', store=True)\n department = fields.Many2one(related='employee_id.contract_id.department', store=True)\n section = fields.Many2one(related='employee_id.contract_id.section', store=True)\n subsection = fields.Many2one(related='employee_id.contract_id.subsection', store=True)\n\n total_count_objectives = fields.Float('Total Count Objectives', compute=\"_calculate_total_count\")\n\n previous_stage_id = fields.Many2one(\n comodel_name='hr.appraisal.stage',\n string='Previous Stage',\n required=False, compute='get_previous_stage')\n next_stage_id = fields.Many2one(\n comodel_name='hr.appraisal.stage',\n string='Next Stage',\n required=False, compute='get_next_stage')\n year = fields.Char(related='related_period.year', store=True)\n can_complete = fields.Boolean(related='stage_id.can_complete')\n is_completed = fields.Boolean(\n string='Is Completed',\n required=False, default=False)\n\n # pip fields\n area_development = fields.Text('Employee Areas of Development')\n\n first_manager_review = fields.Text('1st Month Review By Manager Comments')\n first_employee_review = fields.Text('1st Month Employee Review')\n\n second_manager_review = fields.Text('2nd Month Review By Manager Comments')\n second_employee_review = fields.Text('2nd Month Employee Review')\n\n third_manager_review = fields.Text('3rd Month Review By Manager Comments')\n third_employee_review = fields.Text('3rd Month Employee Review')\n\n manager_final_decision = fields.Selection(string='Manager Final Decision',\n selection=[('pass', 'Pass PIP'),\n ('fail', 'Fail PIP')])\n manager_final_comments = fields.Text('Manager Final Comments')\n\n def action_complete_appraisal(self):\n # todo\n pass\n\n def get_previous_stage(self):\n self = self.sudo()\n for rec in self:\n stage = self.env['hr.appraisal.stage'].search(\n [('sequence', '>', rec.stage_id.sequence), ('id', 'in', rec.appraisal_form.all_stages.ids)],\n order='sequence asc',\n limit=1)\n if stage:\n rec.previous_stage_id = stage.id\n else:\n rec.previous_stage_id = False\n\n def get_next_stage(self):\n self = self.sudo()\n for rec in self:\n stage = self.env['hr.appraisal.stage'].search(\n [('sequence', '<', rec.stage_id.sequence), ('id', 'in', rec.appraisal_form.all_stages.ids)],\n order='sequence desc', limit=1)\n if stage:\n rec.next_stage_id = stage.id\n else:\n rec.next_stage_id = False\n\n @api.onchange('appraisal_form')\n def _onchange_appraisal_form(self):\n for rec in self:\n if rec.appraisal_form:\n if rec.appraisal_form.period_id:\n rec.date_close = rec.appraisal_form.period_id.to_date\n if rec.appraisal_form.default_survey_id:\n rec.survey_id = rec.appraisal_form.default_survey_id.id\n if rec.appraisal_form.starting_stage:\n rec.stage_id = rec.appraisal_form.starting_stage.id\n if rec.appraisal_form.default_related_objective:\n rec.appraisal_objectives.unlink()\n for line in rec.appraisal_form.default_related_objective:\n linecopy = line.copy()\n linecopy.related_appraisal_forms = None\n linecopy.fixed = True\n rec.appraisal_objectives |= linecopy\n\n @api.onchange('employee_id')\n def _onchange_employee_id(self):\n for rec in self:\n rec = rec.sudo()\n if rec.employee_id:\n rec.current_manager = rec.employee_id.parent_id.id\n rec.appraisal_manager = rec.employee_id.parent_id.id\n\n @api.onchange('current_manager')\n def _onchange_manager_id(self):\n for rec in self:\n rec = rec.sudo()\n if not rec.stage_id.can_complete:\n rec.appraisal_manager = rec.current_manager.id\n # return {'domain': {\n # 'appraisal_form': [('from_grade_num', '<=', rec.employee_grade),\n # ('to_grade_num', '>=', rec.employee_grade),\n # ('related_contract_subgroup.id', '=', (rec.contract_subgroup.id)),\n # ]}}\n # return {'domain': {\n # 'appraisal_form': [('grade_ids.id', '=', (rec.employee_grade.id)),\n # ('related_contract_subgroup.id', '=', (rec.contract_subgroup.id)),\n # ]}}\n\n def action_start_survey(self):\n self.ensure_one()\n # create a response and link it to this applicant\n if not self.response_id:\n response = self.survey_id._create_answer(partner=self.env.user.partner_id)\n self.response_id = response.id\n else:\n response = self.response_id\n # grab the token of the response and start surveying\n action = self.survey_id.with_context(survey_token=response.token).action_start_survey()\n action.update({'target': 'new'})\n return action\n\n def action_print_survey(self):\n \"\"\" If response is available then print this response otherwise print survey form (print template of the survey) \"\"\"\n self.ensure_one()\n if not self.response_id:\n action = self.survey_id.action_print_survey()\n action.update({'target': 'new'})\n return action\n else:\n response = self.response_id\n action = self.survey_id.with_context(survey_token=response.token).action_print_survey()\n action.update({'target': 'new'})\n return action\n\n @api.depends('appraisal_objectives.employee_rating', 'appraisal_objectives.weight')\n def calculate_employee_overall_rating(self):\n for rec in self:\n total = sum([(int(x.employee_rating) * (x.weight / 100)) for x in rec.appraisal_objectives])\n rounded = rec.appraisal_form.calculation_rules.filtered(\n lambda x: x.from_value <= total and x.to_value >= total).value\n rec.employee_overall_rating = total\n rec.employee_overall_rating_rounded = rounded\n\n\n @api.depends('appraisal_objectives.manager_rating', 'appraisal_objectives.weight')\n def calculate_overall_rating(self):\n for rec in self:\n total = sum([(int(x.manager_rating) * (x.weight / 100)) for x in rec.appraisal_objectives])\n rounded = rec.appraisal_form.calculation_rules.filtered(\n lambda x: x.from_value <= total and x.to_value >= total).value\n rec.overall_rating = total\n rec.overall_rating_rounded = rounded\n\n @api.depends('overall_rating_rounded', 'hr_overall_rating')\n def calculate_hr_overall_rating(self):\n for rec in self:\n if rec.hr_overall_rating:\n rec.hr_overall_rating_final = rec.hr_overall_rating\n else:\n rec.hr_overall_rating_final = rec.overall_rating_rounded\n\n @api.onchange('hr_overall_rating')\n def oncahnge_hr_overall_rating(self):\n for rec in self:\n if rec.hr_overall_rating:\n rec.hr_overall_rating_final = rec.hr_overall_rating\n\n # @api.constrains('appraisal_objectives')\n def _check_objective_counts(self):\n if len(self.appraisal_objectives) > self.appraisal_form.max_objective or len(\n self.appraisal_objectives) < self.appraisal_form.min_objective:\n raise ValidationError(\"Objective must be between %s and %s\" % (\n self.appraisal_form.min_objective, self.appraisal_form.max_objective))\n\n def check_total_weight(self):\n if sum(self.appraisal_objectives.mapped('weight')) < self.appraisal_form.total_weight or sum(\n self.appraisal_objectives.mapped('weight')) > self.appraisal_form.total_weight:\n raise ValidationError(\"Total Objective Weight must be equal to %s\" % (self.appraisal_form.total_weight))\n\n def approve_all_objectives(self):\n for rec in self:\n if rec.appraisal_form.validate_weight:\n rec.check_total_weight()\n rec.appraisal_objectives.state_approve()\n\n @api.depends('appraisal_edit_objectives.weight')\n def _calculate_total_weight(self):\n for rec in self:\n rec.total_sum_weight = sum(rec.appraisal_objectives.mapped('weight'))\n\n @api.depends('appraisal_edit_objectives')\n def _calculate_total_count(self):\n for rec in self:\n rec.total_count_objectives = len(self.appraisal_edit_objectives)\n\n @api.model\n def _read_group_stage_ids(self, stages, domain, order):\n # retrieve job_id from the context and write the domain: ids + contextual columns (job or default)\n search_domain = []\n stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID)\n return stages.browse(stage_ids)\n\n def stage_changed_notification(self):\n for rec in self:\n rec.message_notify(\n partner_ids=rec.employee_id.user_partner_id.ids,\n body=(\"New stage %s\") % (\n rec.stage_id.name),\n subject=\"Appraisal\")\n rec.message_notify(\n partner_ids=rec.appraisal_manager.user_partner_id.ids,\n body=(\"%s - New Stage %s\") % (\n rec.employee_id.name, rec.stage_id.name),\n subject=\"Appraisal\")\n\n def check_survey_done(self):\n for rec in self:\n if rec.stage_id.employee_take_survey or rec.stage_id.manager_take_survey or rec.stage_id.users_take_survey:\n if not rec.response_id:\n raise ValidationError('Survey Must be filled before moving Forward!')\n\n def check_survey_done(self):\n for rec in self:\n if rec.stage_id.employee_take_survey or rec.stage_id.manager_take_survey or rec.stage_id.users_take_survey:\n if not rec.response_id:\n raise ValidationError('Survey Must be filled before moving Forward!')\n\n def check_comments_added(self):\n for rec in self:\n if rec.stage_id.employee_can_comment_form_hr or rec.stage_id.manager_can_comment_form_hr or rec.stage_id.users_can_comment_form_hr:\n if not rec.hr_comment:\n raise ValidationError('HR Comment on the appraisal form is required before moving forward')\n if rec.stage_id.employee_can_comment_form_manager or rec.stage_id.manager_can_comment_form_manager or rec.stage_id.users_can_comment_form_manager:\n if not rec.manager_comment:\n raise ValidationError('Manager Comment on the appraisal form is required before moving forward')\n if rec.stage_id.employee_can_comment_form_employee or rec.stage_id.manager_can_comment_form_employee or rec.stage_id.users_can_comment_form_employee:\n if not rec.employee_comment:\n raise ValidationError('Employee Comment on the appraisal form is required before moving forward')\n if rec.stage_id.employee_can_comment_objective_hr or rec.stage_id.manager_can_comment_objective_hr or rec.stage_id.users_can_comment_objective_hr:\n for line in rec.appraisal_edit_objectives:\n if not line.hr_comment:\n raise ValidationError('HR Comments on each objective is required before moving forward')\n if rec.stage_id.employee_can_comment_objective_manager or rec.stage_id.manager_can_comment_objective_manager or rec.stage_id.users_can_comment_objective_manager:\n for line in rec.appraisal_edit_objectives:\n if not line.manager_comment:\n raise ValidationError(\n 'Manager Comments on each objective is required before moving forward')\n if rec.stage_id.employee_can_comment_objective_employee or rec.stage_id.manager_can_comment_objective_employee or rec.stage_id.users_can_comment_objective_employee:\n for line in rec.appraisal_edit_objectives:\n if not line.employee_comment:\n raise ValidationError(\n 'Employee Comments on each objective is required before moving forward')\n if rec.stage_id.employee_rating_manager_add or rec.stage_id.manager_rating_manager_add or rec.stage_id.users_rating_manager_add:\n for line in rec.appraisal_edit_objectives:\n if line.manager_rating == '0':\n raise ValidationError('Manager Ratings on each objective is required before moving forward')\n if rec.stage_id.employee_rating_employee_add or rec.stage_id.manager_rating_employee_add or rec.stage_id.users_rating_employee_add:\n for line in rec.appraisal_edit_objectives:\n if line.employee_rating == '0':\n raise ValidationError(\n 'Employee Ratings on each objective is required before moving forward')\n\n if rec.stage_id.employee_area_development_add or rec.stage_id.manager_area_development_add or rec.stage_id.users_area_development_add:\n if not rec.area_development:\n raise ValidationError(\n 'Employee Areas of Development on the appraisal form is required before moving forward')\n if rec.stage_id.employee_first_manager_review_add or rec.stage_id.manager_first_manager_review_add or rec.stage_id.users_first_manager_review_add:\n if not rec.first_manager_review:\n raise ValidationError(\n '1st Month Review By Manager Comments on the appraisal form is required before moving forward')\n if rec.stage_id.employee_first_employee_review_add or rec.stage_id.manager_first_employee_review_add or rec.stage_id.users_first_employee_review_add:\n if not rec.first_employee_review:\n raise ValidationError(\n '1st Month Employee Review on the appraisal form is required before moving forward')\n if rec.stage_id.employee_second_manager_review_add or rec.stage_id.manager_second_manager_review_add or rec.stage_id.users_second_manager_review_add:\n if not rec.second_manager_review:\n raise ValidationError(\n '2nd Month Review By Manager Comments on the appraisal form is required before moving forward')\n if rec.stage_id.employee_second_employee_review_add or rec.stage_id.manager_second_employee_review_add or rec.stage_id.users_second_employee_review_add:\n if not rec.second_employee_review:\n raise ValidationError(\n '2nd Month Employee Review on the appraisal form is required before moving forward')\n if rec.stage_id.employee_third_manager_review_add or rec.stage_id.manager_third_manager_review_add or rec.stage_id.users_third_manager_review_add:\n if not rec.third_manager_review:\n raise ValidationError(\n '3rd Month Review By Manager Comments on the appraisal form is required before moving forward')\n if rec.stage_id.employee_third_employee_review_add or rec.stage_id.manager_third_employee_review_add or rec.stage_id.users_third_employee_review_add:\n if not rec.third_employee_review:\n raise ValidationError(\n '3rd Month Employee Review on the appraisal form is required before moving forward')\n if rec.stage_id.employee_manager_final_decision_add or rec.stage_id.manager_manager_final_decision_add or rec.stage_id.users_manager_final_decision_add:\n if not rec.manager_final_decision:\n raise ValidationError(\n 'Manager Final Decision on the appraisal form is required before moving forward')\n if rec.stage_id.employee_manager_final_comments_add or rec.stage_id.manager_manager_final_comments_add or rec.stage_id.users_manager_final_comments_add:\n if not rec.manager_final_comments:\n raise ValidationError(\n 'Manager Final Comments on the appraisal form is required before moving forward')\n\n def check_trainings_added(self):\n for rec in self:\n if rec.stage_id.employee_can_modifiy_training or rec.stage_id.manager_can_modifiy_training or rec.stage_id.users_can_modifiy_training:\n if len(rec.appraisal_training) == 0:\n raise ValidationError('Trainings are required before moving forward')\n\n def move_next_stage(self):\n self = self.sudo()\n if self.appraisal_form.validate_weight:\n self.check_total_weight()\n self._check_objective_counts()\n self.check_survey_done()\n self.check_comments_added()\n self.check_trainings_added()\n stage = self.env['hr.appraisal.stage'].search(\n [('sequence', '>', self.stage_id.sequence), ('id', 'in', self.appraisal_form.all_stages.ids)],\n order='sequence asc',\n limit=1)\n if stage:\n self.stage_id = stage.id\n self.stage_changed_notification()\n if stage.can_complete:\n self.action_complete_appraisal()\n view = self.env.ref('sh_message.sh_message_wizard')\n view_id = view and view.id or False\n context = dict(self._context or {})\n context['message'] = 'Thank you, the form has been submitted to next stage.'\n return {\n 'name': 'Success',\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'sh.message.wizard',\n 'views': [(view.id, 'form')],\n 'view_id': view_id,\n 'target': 'new',\n 'context': context,\n }\n\n def log_or_move_previous_stage(self):\n self = self.sudo()\n if self.stage_id.log_note_backward:\n return {\n 'type': 'ir.actions.act_window',\n 'name': 'Log Reason',\n 'res_model': 'log.note.wizard',\n 'view_mode': 'form',\n 'target': 'new',\n 'view_id': self.env.ref('hr_appraisal_custom.log_note_wizard_view_form').id,\n 'context': {\n 'default_related_appraisal': self.id,\n }\n }\n self.move_previous_stage()\n\n def move_previous_stage(self):\n stage = self.env['hr.appraisal.stage'].search(\n [('sequence', '<', self.stage_id.sequence), ('id', 'in', self.appraisal_form.all_stages.ids)],\n order='sequence desc', limit=1)\n if stage:\n self.stage_id = stage.id\n self.stage_changed_notification()\n\n can_move_forward = fields.Boolean('Can Move Forward', compute='_check_stage_rule_move_forward')\n\n def _check_stage_rule_move_forward(self):\n if (self.employee_id.user_id == self.env.user and self.stage_id.employee_allowed_forward) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_allowed_forward) \\\n or self.env.user in self.stage_id.users_allowed_forward:\n self.can_move_forward = True\n else:\n self.can_move_forward = False\n\n can_move_previous = fields.Boolean('Can Move Previous', compute='_check_stage_rule_move_previous')\n\n def _check_stage_rule_move_previous(self):\n if (self.employee_id.user_id == self.env.user and self.stage_id.employee_allowed_backward) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_allowed_backward) \\\n or self.env.user in self.stage_id.users_allowed_backward:\n self.can_move_previous = True\n else:\n self.can_move_previous = False\n\n can_add_objectives = fields.Boolean('Can Add Objectives', compute='_check_stage_rule_add_objectives')\n\n def _check_stage_rule_add_objectives(self):\n if (self.employee_id.user_id == self.env.user and self.stage_id.employee_can_add_objective) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_can_add_objective) \\\n or self.env.user in self.stage_id.users_can_add_objective:\n self.can_add_objectives = True\n else:\n self.can_add_objectives = False\n\n can_edit_appraisal = fields.Boolean('Can Edit Appraisal', compute='_check_stage_rule_edit_appraisal')\n\n def _check_stage_rule_edit_appraisal(self):\n if (self.employee_id.user_id == self.env.user and self.stage_id.employee_allowed_edit) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_allowed_edit) \\\n or self.env.user in self.stage_id.users_allowed_edit:\n self.can_edit_appraisal = True\n else:\n self.can_edit_appraisal = False\n\n can_comment_hr = fields.Boolean('Can HR Comment', compute='_check_stage_rule_hr_comment')\n\n def _check_stage_rule_hr_comment(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_can_comment_form_hr) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_can_comment_form_hr) \\\n or self.env.user in self.stage_id.users_can_comment_form_hr:\n self.can_comment_hr = True\n else:\n self.can_comment_hr = False\n\n can_comment_manager = fields.Boolean('Can Manager Comment', compute='_check_stage_rule_manager_comment')\n\n def _check_stage_rule_manager_comment(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_can_comment_form_manager) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_can_comment_form_manager) \\\n or self.env.user in self.stage_id.users_can_comment_form_manager:\n self.can_comment_manager = True\n else:\n self.can_comment_manager = False\n\n can_comment_employee = fields.Boolean('Can Employee Comment', compute='_check_stage_rule_employee_comment')\n\n def _check_stage_rule_employee_comment(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_can_comment_form_employee) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_can_comment_form_employee) \\\n or self.env.user in self.stage_id.users_can_comment_form_employee:\n self.can_comment_employee = True\n else:\n self.can_comment_employee = False\n\n can_see_comment_hr = fields.Boolean('Can See HR Comment', compute='_check_stage_rule_hr_see_comment')\n\n def _check_stage_rule_hr_see_comment(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_can_see_comment_form_hr) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_can_see_comment_form_hr) \\\n or self.env.user in self.stage_id.users_can_see_comment_form_hr:\n self.can_see_comment_hr = True\n else:\n self.can_see_comment_hr = False\n\n can_see_comment_manager = fields.Boolean('Can See Manager Comment', compute='_check_stage_rule_manager_see_comment')\n\n def _check_stage_rule_manager_see_comment(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_can_see_comment_form_manager) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_can_see_comment_form_manager) \\\n or self.env.user in self.stage_id.users_can_see_comment_form_manager:\n self.can_see_comment_manager = True\n else:\n self.can_see_comment_manager = False\n\n can_see_comment_employee = fields.Boolean('Can See Employee Comment',\n compute='_check_stage_rule_employee_see_comment')\n\n def _check_stage_rule_employee_see_comment(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_can_see_comment_form_employee) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_can_see_comment_form_employee) \\\n or self.env.user in self.stage_id.users_can_see_comment_form_employee:\n self.can_see_comment_employee = True\n else:\n self.can_see_comment_employee = False\n\n can_training = fields.Boolean('Can Edit Training', compute='_check_stage_rule_training')\n\n def _check_stage_rule_training(self):\n if (self.employee_id.user_id == self.env.user and self.stage_id.employee_can_modifiy_training) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_can_modifiy_training) \\\n or self.env.user in self.stage_id.users_can_modifiy_training:\n self.can_training = True\n else:\n self.can_training = False\n\n can_approve_objectives = fields.Boolean('Can Approve Objectives', compute='_check_stage_rule_approve_objectives')\n\n def _check_stage_rule_approve_objectives(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_can_approve_approve) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_can_approve_approve) \\\n or self.env.user in self.stage_id.users_can_approve_approve:\n self.can_approve_objectives = True\n else:\n self.can_approve_objectives = False\n\n can_take_survey = fields.Boolean('Can Take Survey', compute='_check_stage_rule_take_survey')\n\n def _check_stage_rule_take_survey(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_take_survey) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_take_survey) \\\n or self.env.user in self.stage_id.users_take_survey:\n self.can_take_survey = True\n else:\n self.can_take_survey = False\n\n can_print_survey = fields.Boolean('Can See Survey', compute='_check_stage_rule_see_survey')\n\n def _check_stage_rule_see_survey(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_see_survey) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_see_survey) \\\n or self.env.user in self.stage_id.users_see_survey:\n self.can_print_survey = True\n else:\n self.can_print_survey = False\n\n can_rate_hr = fields.Boolean('Can HR Rate', compute='_check_stage_rule_hr_rate')\n\n def _check_stage_rule_hr_rate(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_rating_hr_add) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_rating_hr_add) \\\n or self.env.user in self.stage_id.users_rating_hr_add:\n self.can_rate_hr = True\n else:\n self.can_rate_hr = False\n\n can_see_rate_hr = fields.Boolean('Can See HR Rate', compute='_check_stage_rule_hr_see_rate')\n\n def _check_stage_rule_hr_see_rate(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_rating_hr_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_rating_hr_see) \\\n or self.env.user in self.stage_id.users_rating_hr_see:\n self.can_see_rate_hr = True\n else:\n self.can_see_rate_hr = False\n\n can_see_rate_manager = fields.Boolean('Can See Manager Rate', compute='_check_stage_rule_manager_see_rate')\n\n def _check_stage_rule_manager_see_rate(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_rating_manager_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_rating_manager_see) \\\n or self.env.user in self.stage_id.users_rating_manager_see:\n self.can_see_rate_manager = True\n else:\n self.can_see_rate_manager = False\n\n can_see_rate_employee = fields.Boolean('Can See Employee Rate', compute='_check_stage_rule_employee_see_rate')\n\n def _check_stage_rule_employee_see_rate(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_rating_employee_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_rating_employee_see) \\\n or self.env.user in self.stage_id.users_rating_employee_see:\n self.can_see_rate_employee = True\n else:\n self.can_see_rate_employee = False\n\n can_see_comment_objective_hr = fields.Boolean('Can See HR Comment',\n compute='_check_stage_rule_hr_see_objective_comment')\n\n def _check_stage_rule_hr_see_objective_comment(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_can_see_comment_objective_hr) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_can_see_comment_objective_hr) \\\n or self.env.user in self.stage_id.users_can_see_comment_objective_hr:\n self.can_see_comment_objective_hr = True\n else:\n self.can_see_comment_objective_hr = False\n\n can_see_comment_objective_manager = fields.Boolean('Can See Manager Comment',\n compute='_check_stage_rule_manager_see_objective_comment')\n\n def _check_stage_rule_manager_see_objective_comment(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_can_see_comment_objective_manager) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_can_see_comment_objective_manager) \\\n or self.env.user in self.stage_id.users_can_see_comment_objective_manager:\n self.can_see_comment_objective_manager = True\n else:\n self.can_see_comment_objective_manager = False\n\n can_see_comment_objective_employee = fields.Boolean('Can See Employee Comment',\n compute='_check_stage_rule_employee_see_objective_comment')\n\n def _check_stage_rule_employee_see_objective_comment(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_can_see_comment_objective_employee) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_can_see_comment_objective_employee) \\\n or self.env.user in self.stage_id.users_can_see_comment_objective_employee:\n self.can_see_comment_objective_employee = True\n else:\n self.can_see_comment_objective_employee = False\n\n can_see_rate_objective_manager = fields.Boolean('Can See Manager Rate',\n compute='_check_stage_rule_manager_see_objective_rate')\n\n def _check_stage_rule_manager_see_objective_rate(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_rating_manager_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_rating_manager_see) \\\n or self.env.user in self.stage_id.users_rating_manager_see:\n self.can_see_rate_objective_manager = True\n else:\n self.can_see_rate_objective_manager = False\n\n can_see_rate_objective_employee = fields.Boolean('Can See Employee Rate',\n compute='_check_stage_rule_employee_objective_see_rate')\n\n def _check_stage_rule_employee_objective_see_rate(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_rating_employee_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_rating_employee_see) \\\n or self.env.user in self.stage_id.users_rating_employee_see:\n self.can_see_rate_objective_employee = True\n else:\n self.can_see_rate_objective_employee = False\n\n def _get_objectives(self):\n # line = [{'name': \"Objective\", 'weight': \"Weight\", 'description': \"Description\", 'e_rating': \"Employee Rating\",\n # 'e_comment': \"Employee Comment\", 'm_rating': \"Manager Rating\",\n # 'm_comment': \"Manager Comment\", 'h_comment': \"HR Comment\",\n # 'bold': True}]\n line = []\n\n for obj in self.appraisal_edit_objectives.filtered(lambda x: x.state == 'active'):\n line.append(\n {'name': obj.title, 'weight': obj.weight, 'description': obj.description,\n 'e_rating': obj.employee_rating,\n 'e_comment': obj.employee_comment, 'm_rating': obj.manager_rating,\n 'm_comment': obj.manager_comment, 'h_comment': obj.hr_comment,\n 'bold': False})\n # line.append(\n # {'name': \"Objective\", 'weight': \"Weight\", 'description': \"Description\", 'e_rating': \"Employee Rating\",\n # 'e_comment': \"Employee Comment\", 'm_rating': \"Manager Rating\",\n # 'm_comment': \"Manager Comment\", 'h_comment': \"HR Comment\",\n # 'bold': True})\n\n return line\n\n def _get_trainings(self):\n full_date_format = '%d/%m/%Y'\n line = [{'name': \"Training Name\", 'status': \"Status\", 'from_date': \"From Date\", 'to_date': \"To Date\",\n 'description': \"Description\", 'bold': True}]\n\n for obj in self.appraisal_training:\n line.append(\n {'name': obj.name, 'status': obj.status.name,\n 'from_date': obj.from_date.strftime(full_date_format) if obj.from_date else '',\n 'to_date': obj.to_date.strftime(full_date_format) if obj.to_date else '', 'description': obj.desc,\n 'bold': False})\n\n return line\n\n def _get_feedbacks(self):\n line = [\n {'name': \"Name\", 'user': \"Responsible User\", 'type': \"Feedback Type\", 'feedback': \"Feedback\", 'bold': True}]\n\n for obj in self.related_feedback:\n line.append(\n {'name': obj.name, 'user': obj.user_id.name,\n 'type': dict(self.related_feedback._fields['feedback_type'].selection).get(obj.feedback_type),\n 'feedback': obj.feedback,\n 'bold': False})\n\n return line\n\n can_add_area_development = fields.Boolean('Can Add Area of Development',\n compute='_check_stage_rule_add_area_development')\n\n def _check_stage_rule_add_area_development(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_area_development_add) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_area_development_add) \\\n or self.env.user in self.stage_id.users_area_development_add:\n self.can_add_area_development = True\n else:\n self.can_add_area_development = False\n\n can_see_area_development = fields.Boolean('Can See Area of Development',\n compute='_check_stage_rule_see_area_development')\n\n def _check_stage_rule_see_area_development(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_area_development_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_area_development_see) \\\n or self.env.user in self.stage_id.users_area_development_see:\n self.can_see_area_development = True\n else:\n self.can_see_area_development = False\n\n can_add_first_manager_review = fields.Boolean('Can Add 1st month Review by Manager Comments',\n compute='_check_stage_rule_add_first_manager_review')\n\n def _check_stage_rule_add_first_manager_review(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_first_manager_review_add) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_first_manager_review_add) \\\n or self.env.user in self.stage_id.users_first_manager_review_add:\n self.can_add_first_manager_review = True\n else:\n self.can_add_first_manager_review = False\n\n can_see_first_manager_review = fields.Boolean('Can See 1st month Review by Manager Comments',\n compute='_check_stage_rule_see_first_manager_review')\n\n def _check_stage_rule_see_first_manager_review(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_first_manager_review_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_first_manager_review_see) \\\n or self.env.user in self.stage_id.users_first_manager_review_see:\n self.can_see_first_manager_review = True\n else:\n self.can_see_first_manager_review = False\n\n can_add_first_employee_review = fields.Boolean('Can Add 1st month Employee Review',\n compute='_check_stage_rule_add_first_employee_review')\n\n def _check_stage_rule_add_first_employee_review(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_first_employee_review_add) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_first_employee_review_add) \\\n or self.env.user in self.stage_id.users_first_employee_review_add:\n self.can_add_first_employee_review = True\n else:\n self.can_add_first_employee_review = False\n\n can_see_first_employee_review = fields.Boolean('Can See 1st month Employee Review',\n compute='_check_stage_rule_see_first_employee_review')\n\n def _check_stage_rule_see_first_employee_review(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_first_employee_review_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_first_employee_review_see) \\\n or self.env.user in self.stage_id.users_first_employee_review_see:\n self.can_see_first_employee_review = True\n else:\n self.can_see_first_employee_review = False\n\n can_add_second_manager_review = fields.Boolean('Can Add 2nd month Review by Manager Comments',\n compute='_check_stage_rule_add_second_manager_review')\n\n def _check_stage_rule_add_second_manager_review(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_second_manager_review_add) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_second_manager_review_add) \\\n or self.env.user in self.stage_id.users_second_manager_review_add:\n self.can_add_second_manager_review = True\n else:\n self.can_add_second_manager_review = False\n\n can_see_second_manager_review = fields.Boolean('Can See 2nd month Review by Manager Comments',\n compute='_check_stage_rule_see_second_manager_review')\n\n def _check_stage_rule_see_second_manager_review(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_second_manager_review_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_second_manager_review_see) \\\n or self.env.user in self.stage_id.users_second_manager_review_see:\n self.can_see_second_manager_review = True\n else:\n self.can_see_second_manager_review = False\n\n can_add_second_employee_review = fields.Boolean('Can Add 2nd month Employee Review',\n compute='_check_stage_rule_add_second_employee_review')\n\n def _check_stage_rule_add_second_employee_review(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_second_employee_review_add) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_second_employee_review_add) \\\n or self.env.user in self.stage_id.users_second_employee_review_add:\n self.can_add_second_employee_review = True\n else:\n self.can_add_second_employee_review = False\n\n can_see_second_employee_review = fields.Boolean('Can See 2nd month Employee Review',\n compute='_check_stage_rule_see_second_employee_review')\n\n def _check_stage_rule_see_second_employee_review(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_second_employee_review_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_second_employee_review_see) \\\n or self.env.user in self.stage_id.users_second_employee_review_see:\n self.can_see_second_employee_review = True\n else:\n self.can_see_second_employee_review = False\n\n can_add_third_manager_review = fields.Boolean('Can Add 3rd month Review by Manager Comments',\n compute='_check_stage_rule_add_third_manager_review')\n\n def _check_stage_rule_add_third_manager_review(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_third_manager_review_add) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_third_manager_review_add) \\\n or self.env.user in self.stage_id.users_third_manager_review_add:\n self.can_add_third_manager_review = True\n else:\n self.can_add_third_manager_review = False\n\n can_see_third_manager_review = fields.Boolean('Can See 3rd month Review by Manager Comments',\n compute='_check_stage_rule_see_third_manager_review')\n\n def _check_stage_rule_see_third_manager_review(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_third_manager_review_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_third_manager_review_see) \\\n or self.env.user in self.stage_id.users_third_manager_review_see:\n self.can_see_third_manager_review = True\n else:\n self.can_see_third_manager_review = False\n\n can_add_third_employee_review = fields.Boolean('Can Add 3rd month Employee Review',\n compute='_check_stage_rule_add_third_employee_review')\n\n def _check_stage_rule_add_third_employee_review(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_third_employee_review_add) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_third_employee_review_add) \\\n or self.env.user in self.stage_id.users_third_employee_review_add:\n self.can_add_third_employee_review = True\n else:\n self.can_add_third_employee_review = False\n\n can_see_third_employee_review = fields.Boolean('Can See 3rd month Employee Review',\n compute='_check_stage_rule_see_third_employee_review')\n\n def _check_stage_rule_see_third_employee_review(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_third_employee_review_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_third_employee_review_see) \\\n or self.env.user in self.stage_id.users_third_employee_review_see:\n self.can_see_third_employee_review = True\n else:\n self.can_see_third_employee_review = False\n\n can_add_manager_final_decision = fields.Boolean('Can Add Manager Final Decision',\n compute='_check_stage_rule_add_manager_final_decision')\n\n def _check_stage_rule_add_manager_final_decision(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_manager_final_decision_add) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_manager_final_decision_add) \\\n or self.env.user in self.stage_id.users_manager_final_decision_add:\n self.can_add_manager_final_decision = True\n else:\n self.can_add_manager_final_decision = False\n\n can_see_manager_final_decision = fields.Boolean('Can See Manager Final Decision',\n compute='_check_stage_rule_see_manager_final_decision')\n\n def _check_stage_rule_see_manager_final_decision(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_manager_final_decision_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_manager_final_decision_see) \\\n or self.env.user in self.stage_id.users_manager_final_decision_see:\n self.can_see_manager_final_decision = True\n else:\n self.can_see_manager_final_decision = False\n\n can_add_manager_final_comments = fields.Boolean('Can Add Manager Final Comments',\n compute='_check_stage_rule_add_manager_final_comments')\n\n def _check_stage_rule_add_manager_final_comments(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_manager_final_comments_add) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_manager_final_comments_add) \\\n or self.env.user in self.stage_id.users_manager_final_comments_add:\n self.can_add_manager_final_comments = True\n else:\n self.can_add_manager_final_comments = False\n\n can_see_manager_final_comments = fields.Boolean('Can See Manager Final Comments',\n compute='_check_stage_rule_see_manager_final_comments')\n\n def _check_stage_rule_see_manager_final_comments(self):\n if (\n self.employee_id.user_id == self.env.user and self.stage_id.employee_manager_final_comments_see) or (\n self.appraisal_manager.user_id == self.env.user and self.stage_id.manager_manager_final_comments_see) \\\n or self.env.user in self.stage_id.users_manager_final_comments_see:\n self.can_see_manager_final_comments = True\n else:\n self.can_see_manager_final_comments = False\n","sub_path":"hr_appraisal_custom/models/hr_appraisal.py","file_name":"hr_appraisal.py","file_ext":"py","file_size_in_byte":53206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"478838539","text":"import os, sys\nimport bots.botsglobal as botsglobal\nimport bots.botslib as botslib\nfrom datetime import datetime\n\ndef ta_infocontent(ta_info,*args,**kwargs):\n ta_info[\"ISA05\"]=\"ZZ\"\n ta_info[\"ISA07\"]=\"ZZ\"\n #ta_info[\"GS06\"]=ta_info[\"alt\"]\n #ta_info[\"ISA13\"]=ta_info[\"alt\"]\n ta_info[\"version\"]=\"00306\"\n now = datetime.now()\n dt = now.strftime('%y%m%d')\n ta_info[\"GS04\"]=dt\n\ndef envelopecontent(ta_info,out,*args,**kwargs):\n now = datetime.now()\n dt = now.strftime('%y%m%d')\n out.change(where=({'BOTSID':'ISA'},{'BOTSID':'GS'},),change={'GS04': dt})\n","sub_path":"usersys/envelopescripts/x12/x12.py","file_name":"x12.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"453496605","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport logging as log\n\nfrom synergetics.config import config as cfg\nfrom synergetics.neurons.LIF.LIF import LIF\n\n\nclass LIFRate(LIF):\n \"\"\"\n http://en.wikipedia.org/wiki/Biological_neuron_model#Leaky_integrate-and-fire\n \"\"\"\n def __init__(self, position, excitatory=True,\n update_rule=None,\n discover_rule=None,\n tau_rc=0.02,\n tau_th=0.002,\n reversal_potential=0,\n membrane_resistance=0.01,\n refractory_period=1):\n self._tau_rc = tau_rc\n self._tau_th = tau_th\n self._i_thresh = reversal_potential/membrane_resistance\n self._t_ref = refractory_period\n\n super(LIFStochastic, self).__init__(position, excitatory, update_rule, discover_rule)\n\n def excite(self, excitor, excitement, timestamp):\n if excitement > self._i_thresh:\n rate = 1.0/(self._t_ref - self._tau_rc*np.log(1.0 - (excitement/self._tau_th)))\n else:\n rate = 0\n if rate: self.spike(timestamp, num=rate)\n\n","sub_path":"src/neurons/LIF/LIFRate.py","file_name":"LIFRate.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"65898215","text":"#04_while_number.py\n\n#练习:\n# 1、输入一个整数,用end变量绑定,\n# 打印出1~end的所有整数,(包含end)\n\nend = int(input(\"请输入一个整数:\"))\ni = 1\nwhile i <= end:\n print(i)\n i += 1\n\nprint(\"程序结束!\")\n\n\n#2、写程序,输入两个整数,第一个用begin绑定,\n# 第二个用end变量绑定,打印出begin~end的所有的整数\n\nbegin = int(input(\"请输入第一个整数:\"))\nend = int(input(\"请输入第二个整数:\"))\n\ni = begin\nwhile i <= end:\n print(i)\n i += 1\n\nprint(\"程序结束!\")","sub_path":"python语法/day4/04_while_number.py","file_name":"04_while_number.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"441536066","text":"import torch\nfrom torch.autograd import grad\nfrom torch.autograd.functional import hessian \n\n\n\n\ndef propose_step(loss, x):\n \"\"\"\n Proposes a step to be evaluated by {replace here}. \n\n :param torch.Tensor loss: A scalar function ``x`` to be minimized. \n :param torch.Tensor x: A dependent variable with rightmost size of 2. \n \n :list proposed_step: List of proposed steps for each parameter in x\n\n Note: \n For the moment I assume that every parameter is a vector (in the mathematical sense)\n \"\"\"\n\n if loss.shape != ():\n raise ValueError('Expected loss to be a scalar, actual shape{}'.format(loss.shape)) \n\n x = list(x)\n proposed_step = []\n g = grad(loss, x, create_graph=True)\n\n # We need to compute the hessians idependently in case the have different shapes\n # For the moment I am assuming that every parameter is a row\n # We can probably handle this later\n\n for i, param in enumerate(x):\n hessian_rows = [grad(g[i][j], param, retain_graph=True)[0] for j in range(len(param))]\n H = torch.stack(hessian_rows)\n H_inverse = torch.inverse(H)\n proposed_step.append(- H_inverse @ g[i])\n \n return proposed_step\n \n","sub_path":"pyro/ops/trustvi.py","file_name":"trustvi.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"56336671","text":"# -*-coding:utf-8-*-\nclass Solution(object):\n\tdef findMedianSortedArrays(self, nums1, nums2):\n\t\tlen1,len2 = len(nums1),len(nums2)\n\t\tlength = len1 + len2\n\t\tif (length % 2 == 0):\n\t\t\tposition = length / 2\n\t\telse:\n\t\t\tposition = length / 2 + 1\n\t\tif (position == 1):\n\t\t\tstartIndex = 0\n\t\telse:\n\t\t\tstartIndex = position / 2 - 1\n\t\tnum1,num2 = self.partition(nums1,nums2,0,0,startIndex,startIndex,len1,len2,position)\n\t\tif (num1 is None and num2 is None):\n\t\t\treturn None\n\t\telif (num1 is None):\n\t\t\treturn num2\n\t\telif (num2 is None):\n\t\t\treturn num1\n\t\tif (length % 2 == 0):\n\t\t\treturn (num1 + num2) / 2.0\n\t\treturn num1\n\t\n\tdef partition(self,nums1,nums2,start1,start2,index1,index2,len1,len2,position):\n\t\t# 处理某个数组的个数为 0 的情况\n\t\tif (len1 == 0 and len2 == 0):\n\t\t\treturn None,None\n\t\telif (len1 == 0):\n\t\t\tif (len2 == 1):\n\t\t\t\treturn nums2[len2 /2],None\n\t\t\telif(len2 % 2== 0):\n\t\t\t\treturn nums2[len2 /2 - 1],nums2[len2/2]\n\t\t\telse:\n\t\t\t\treturn nums2[len2 /2],nums2[len2/2 + 1]\n\t\telif (len2 == 0):\n\t\t\tif (len1 == 1):\n\t\t\t\treturn nums1[len1 /2],None\n\t\t\telif(len1 % 2 == 0):\n\t\t\t\treturn nums1[len1 /2 - 1],nums1[len1/2]\n\t\t\telse:\n\t\t\t\treturn nums1[len1 /2],nums1[len1/2+1]\n\t\t\t\t\n\t\t# 在某次迭代后,某个数组的总长度已经小于归并后所需的子长度\t\t\n\t\tif (index1 >= len1):\n\t\t\tindex1 = len1 - 1\n\t\tif (index2 >= len2):\n\t\t\tindex2 = len2 - 1\n\t\t\t\n\t\t# N 次迭代后,剩下所需子长度终于为1,结束归并。\n\t\tif (position == 1):\n\t\t\tif (nums1[index1] < nums2[index2]):\n\t\t\t\tif (index1 + 1 < len(nums1) and nums1[index1 + 1] <= nums2[index2]):\n\t\t\t\t\treturn nums1[index1],nums1[index1 + 1]\n\t\t\t\telse:\n\t\t\t\t\treturn nums1[index1],nums2[index2]\n\t\t\telse:\n\t\t\t\tif (index2 + 1 < len(nums2) and nums1[index1] >= nums2[index2 + 1]):\n\t\t\t\t\treturn nums2[index2],nums2[index2 + 1]\n\t\t\t\telse:\n\t\t\t\t\treturn nums2[index2],nums1[index1]\n\t\t\t\t\t\n\t\t# 核心的迭代过程\n\t\tif (nums1[index1] <= nums2[index2]):\n\t\t\tposition = position - index1 + start1 - 1\n\t\t\tif (index1 == len1 - 1):\n\t\t\t\treturn nums2[start2 + position - 1],nums2[start2 + position]\n\t\t\tif (position == 1):\n\t\t\t\tstart1 = index1 + 1\n\t\t\t\tindex1 = start1\n\t\t\t\tindex2 = start2\t\n\t\t\telse:\t\n\t\t\t\tstart1 = index1 + 1\n\t\t\t\tindex1 = position / 2 + start1 - 1\n\t\t\t\tindex2 = position / 2 + start2 - 1\n\t\t\treturn self.partition(nums1,nums2,start1,start2,index1,index2,len1,len2,position)\n\t\telse:\n\t\t\tposition = position - index2 + start2 - 1\n\t\t\tif (index2 == len2 - 1):\n\t\t\t\treturn nums1[start1 + position - 1],nums1[start1 + position]\n\t\t\tif (position == 1):\n\t\t\t\tindex1 = start1\n\t\t\t\tstart2 = index2 + 1 \n\t\t\t\tindex2 = start2\t\n\t\t\telse:\n\t\t\t\tindex1 = position / 2 + start1 - 1\n\t\t\t\tstart2 = index2 + 1 \n\t\t\t\tindex2 = position / 2 + start2 - 1 \n\t\t\treturn self.partition(nums1,nums2,start1,start2,index1,index2,len1,len2,position)","sub_path":"Leetcode/findMedianSortedArrays.py","file_name":"findMedianSortedArrays.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"476645927","text":"from flask import Flask, render_template\n\napp = Flask(__name__)\n\nuser = {\n 'username': 'yard',\n 'bio': 'A boy who love movies'\n}\nmovies = [\n {'name': 'My Neighbor Totoro', 'year': '1988'},\n {'name': 'Three Colours trilogy', 'year': '1993'},\n {'name': 'Forrest Gump', 'year': '1994'},\n {'name': 'Perfect Blue', 'year': '1997'},\n {'name': 'The Matrix', 'year': '1999'},\n {'name': 'Memento', 'year': '2000'},\n {'name': 'The Bucket list', 'year': '2007'},\n {'name': 'Black Swan', 'year': '2010'},\n {'name': 'Gone Girl', 'year': '2014'},\n {'name': 'CoCo', 'year': '2017'}\n]\n\n\n@app.route('/index')\ndef index():\n return 'hello!'\n\n\n@app.route('/watchlist')\ndef watchlist():\n return render_template('watchlist.html', user=user, movies=movies)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app_movie.py","file_name":"app_movie.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"355840423","text":"from selfdrive.controls.lib.pid import PIController\nfrom selfdrive.controls.lib.drive_helpers import get_steer_max\nfrom cereal import car\nfrom cereal import log\n\n\nclass LatControlPID():\n def __init__(self, CP):\n self.factor = 1.\n\n self.lowpid = PIController((CP.lateralTuning.pid.kpBP, CP.lateralTuning.pid.kpV),\n (CP.lateralTuning.pid.kiBP, CP.lateralTuning.pid.kiV),\n k_f=CP.lateralTuning.pid.kf, pos_limit=1.0, sat_limit=CP.steerLimitTimer)\n\n self.pid = PIController((CP.lateralTuning.pid.kpBP, CP.lateralTuning.pid.kpV),\n (CP.lateralTuning.pid.kiBP, CP.lateralTuning.pid.kiV),\n k_f=CP.lateralTuning.pid.kf, pos_limit=1.0, neg_limit=-1.0,\n sat_limit=CP.steerLimitTimer)\n self.angle_steers_des = 0.\n self.increasing = False\n self.dualpids = False\n\n def reset(self):\n self.pid.reset()\n self.lowpid.reset()\n self.increasing = False\n\n def dualPIDinit(self, CP):\n self.factor = CP.lateralParams.torqueBP[1] / CP.lateralParams.torqueBP[-1]\n\n self.highkpV = CP.lateralTuning.pid.kpV[1]\n self.highkiV = CP.lateralTuning.pid.kiV[1]\n\n self.lowpid = PIController((CP.lateralTuning.pid.kpBP, [CP.lateralTuning.pid.kpV[0]]),\n (CP.lateralTuning.pid.kiBP, [CP.lateralTuning.pid.kiV[0]]),\n k_f=CP.lateralTuning.pid.kf, pos_limit=1.0, sat_limit=CP.steerLimitTimer)\n\n self.pid = PIController((CP.lateralTuning.pid.kpBP, [self.highkpV]),\n (CP.lateralTuning.pid.kiBP, [self.highkiV]),\n k_f=CP.lateralTuning.pid.kf, pos_limit=1.0, sat_limit=CP.steerLimitTimer)\n self.angle_steers_des = 0.\n self.increasing = False\n self.dualpids = True\n\n def pidset(self, pid, descontrol, setpoint, measurement, feedforward):\n error = float(setpoint - measurement)\n p = error * pid._k_p[1][0]\n f = feedforward * pid.k_f\n i = descontrol - p - f\n pid.p = p\n pid.i = i\n pid.f = f\n pid.sat_count = 0.0\n pid.saturated = False\n pid.control = descontrol\n\n def update(self, active, CS, CP, path_plan):\n pid_log = log.ControlsState.LateralPIDState.new_message()\n pid_log.steerAngle = float(CS.steeringAngle)\n pid_log.steerRate = float(CS.steeringRate)\n\n if CS.vEgo < 0.3 or not active:\n output_steer = 0.0\n pid_log.active = False\n if len(CP.lateralTuning.pid.kpV) > 1 and not self.dualpids:\n self.dualPIDinit(CP)\n self.pid.reset()\n self.lowpid.reset()\n self.increasing = False\n else:\n self.angle_steers_des = path_plan.angleSteers # get from MPC/PathPlanner\n\n steers_max = get_steer_max(CP, CS.vEgo)\n self.pid.pos_limit = steers_max\n self.pid.neg_limit = -steers_max\n\n self.lowpid.pos_limit = steers_max\n self.lowpid.neg_limit = -steers_max\n\n steer_feedforward = self.angle_steers_des # feedforward desired angle\n if CP.steerControlType == car.CarParams.SteerControlType.torque:\n # TODO: feedforward something based on path_plan.rateSteers\n steer_feedforward -= path_plan.angleOffset # subtract the offset, since it does not contribute to resistive torque\n steer_feedforward *= CS.vEgo**2 # proportional to realigning tire momentum (~ lateral accel)\n deadzone = 0.0\n\n check_saturation = (CS.vEgo > 10) and not CS.steeringRateLimited and not CS.steeringPressed\n #output_steer = self.pid.update(self.angle_steers_des, CS.steeringAngle, check_saturation=check_saturation, override=CS.steeringPressed,\n # feedforward=steer_feedforward, speed=CS.vEgo, deadzone=deadzone)\n\n output_steer = 0.0\n if not self.dualpids:\n output_steer = self.pid.update(self.angle_steers_des, CS.steeringAngle, check_saturation=check_saturation, override=CS.steeringPressed,\n feedforward=steer_feedforward, speed=CS.vEgo, deadzone=deadzone)\n else:\n if not self.increasing:\n raw_low_output_steer = self.lowpid.update(self.angle_steers_des, CS.steeringAngle, check_saturation=check_saturation, override=CS.steeringPressed,\n feedforward=steer_feedforward, speed=CS.vEgo, deadzone=deadzone)\n output_steer = raw_low_output_steer * self.factor\n #print(\"Lo - \" + str(output_steer))\n if abs(output_steer) > (self.factor * 0.99):\n self.pidset(self.pid, output_steer, self.angle_steers_des, CS.steeringAngle,steer_feedforward)\n #self.pid.p = self.lowpid.p * self.factor\n #self.pid.i = self.lowpid.i * self.factor\n #self.pid.f = self.lowpid.f * self.factor\n #self.pid.sat_count = 0.0\n #self.pid.saturated = False\n #self.pid.control = self.lowpid.control * self.factor\n\n self.increasing = True\n else:\n output_steer = self.pid.update(self.angle_steers_des, CS.steeringAngle, check_saturation=check_saturation, override=CS.steeringPressed,\n feedforward=steer_feedforward, speed=CS.vEgo, deadzone=deadzone)\n #print(\"Hi - \" + str(output_steer))\n if abs(output_steer) < (self.factor * 0.1) and abs(self.pid.i) < (self.factor * 0.2):\n self.pidset(self.lowpid, (output_steer / self.factor), self.angle_steers_des, CS.steeringAngle,steer_feedforward)\n #self.lowpid.p = self.pid.p / self.factor\n #self.lowpid.i = 0\n #self.lowpid.f = self.pid.f / self.factor\n #self.lowpid.sat_count = 0.0\n #self.lowpid.saturated = False\n #self.lowpid.control = self.pid.control / self.factor\n\n self.increasing = False\n pid_log.active = True\n pid_log.p = self.pid.p\n pid_log.i = self.pid.i\n pid_log.f = self.pid.f\n pid_log.output = output_steer\n pid_log.saturated = bool(self.pid.saturated)\n\n return output_steer, float(self.angle_steers_des), pid_log\n","sub_path":"selfdrive/controls/lib/latcontrol_pid.py","file_name":"latcontrol_pid.py","file_ext":"py","file_size_in_byte":6063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"404678750","text":"import matplotlib.pyplot as plt\nplt.rcParams['font.sans-serif'] = ['SimHei'] \n\ninput_values = [1, 2, 3, 4, 5]\nsquares = [1, 4, 9, 16, 25]\n\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(input_values, squares, linewidth=3)\n\n#设置图表标题并给坐标轴加上标签。\nax.set_title(\"square numbers\", fontsize=18)\nax.set_xlabel(\"values\", fontsize=14)\nax.set_ylabel(\"square\", fontsize=14)\n\n#设置刻度标记的大小\nax.tick_params(axis='both', labelsize=14)\nplt.show()","sub_path":"Python Work/Exercise/grammar/mpl_squares.py","file_name":"mpl_squares.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"216324581","text":"\"\"\"\n****************************************************************************************************\n:copyright (c) 2019-2020 URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted\nprovided that the following conditions are met:\n\nRedistributions of source code must retain the above copyright notice, this list of conditions\nand the following disclaimer.\n\nRedistributions in binary form must reproduce the above copyright notice, this list of conditions\nand the following disclaimer in the documentation and/or other materials provided with the\ndistribution.\n\nNeither the name of the copyright holder nor the names of its contributors may be used to endorse\nor promote products derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR\nIMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER\nIN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\nOF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n****************************************************************************************************\n\"\"\"\n\nimport json\nimport os\n\nfrom jinja2 import Environment, FileSystemLoader\n\n\nclass ETSTemplate:\n \"\"\"This class will template the ETS modelica model.\"\"\"\n\n def __init__(self, thermal_junction_properties_geojson, system_parameters_geojson, ets_from_building_modelica):\n \"\"\"\n thermal_junction_properties_geojson contains the ETS at brief and at higher level;\n system_parameters_geojson contains the ETS with details ;\n ets_from_building_modelica contains the modelica model of ETS ;\n \"\"\"\n super().__init__()\n\n self.thermal_junction_properties_geojson = thermal_junction_properties_geojson\n self.thermal_junction_properties_geojson = self.thermal_junction_properties_geojson.replace(\"\\\\\", \"/\")\n\n self.system_parameters_geojson = system_parameters_geojson\n if \"\\\\\" in self.system_parameters_geojson:\n self.system_parameters_geojson = self.system_parameters_geojson.replace(\"\\\\\", \"/\")\n\n self.ets_from_building_modelica = ets_from_building_modelica\n if \"\\\\\" in self.ets_from_building_modelica:\n self.ets_from_building_modelica = self.ets_from_building_modelica.replace(\"\\\\\", \"/\")\n\n # get the path of modelica-buildings library\n # temporarily copied here to reduce repo size\n directory_up_one_level = os.path.abspath(os.path.join(__file__, \"../..\"))\n self.directory_modelica_building = os.path.join(\n directory_up_one_level + \"/modelica/CoolingIndirect.mo\"\n )\n if \"\\\\\" in self.directory_modelica_building:\n self.directory_modelica_building = self.directory_modelica_building.replace(\"\\\\\", \"/\")\n\n # go up two levels of directory, to get the path of tests folder for ets\n # TODO: we shouldn't be writing to the test directory in this file, only in tests.\n directory_up_two_levels = os.path.abspath(os.path.join(__file__, \"../../..\"))\n self.directory_ets_templated = os.path.join(\n directory_up_two_levels + \"/tests/output_ets\"\n )\n if \"\\\\\" in self.directory_ets_templated:\n self.directory_ets_templated = self.directory_ets_templated.replace(\"\\\\\", \"/\")\n\n if not os.path.isdir(self.directory_ets_templated):\n os.mkdir(self.directory_ets_templated)\n else:\n pass\n\n # here comes the Jinja2 function: Environment()\n # it loads all the \"*.mot\" files into an environment by Jinja2\n self.template_env = Environment(\n loader=FileSystemLoader(\n searchpath=os.path.join(os.path.dirname(os.path.abspath(__file__)), \"templates\")\n )\n )\n\n def check_ets_thermal_junction(self):\n \"\"\"check if ETS info are in thermal-junction-geojson file\"\"\"\n with open(self.thermal_junction_properties_geojson, \"r\") as f:\n data = json.load(f)\n\n ets_general = False\n for key, value in data.items():\n if key == \"definitions\":\n # three levels down to get the ETS signal\n junctions = data[\"definitions\"][\"ThermalJunctionType\"][\"enum\"]\n if \"ETS\" in junctions:\n ets_general = True\n else:\n pass\n\n return ets_general\n\n def check_ets_system_parameters(self):\n \"\"\"check detailed parameters of ETS\"\"\"\n with open(self.system_parameters_geojson, \"r\") as f:\n data = json.load(f)\n\n ets_parameters = False\n # four levels down to get the ets model description\n # ets_overall = data[\"definitions\"][\"building_def\"][\"properties\"][\"ets\"]\n # three levels down to get the parameters\n ets_parameters = data[\"definitions\"][\"ets_parameters\"][\"properties\"]\n # print (\"est_parameters are: \", type(ets_parameters) )\n return ets_parameters\n\n def check_ets_from_building_modelica(self):\n \"\"\"check if ETS-indirectCooling are in modelica building library\"\"\"\n ets_modelica_available = os.path.isfile(self.ets_from_building_modelica)\n\n return ets_modelica_available\n\n def to_modelica(self):\n \"\"\"convert ETS json to modelica\"\"\"\n # Here come the Jinja2 function: get_template(), which reads into templated ets model.\n # CoolingIndirect.mot was manually created as a starting point, by adding stuff following Jinja2 syntax.\n # it has all the necessary parameters which need to be changed through templating.\n ets_template = self.template_env.get_template(\"CoolingIndirect.mot\")\n\n # TODO: Seems like the ets_data below should allow defaults from\n # the system parameters JSON file, correct?\n # ets model parameters are from the schema.json file, default values only.\n ets_data = self.check_ets_system_parameters()\n\n # Here comes the Jina2 function: render()\n file_data = ets_template.render(ets_data=ets_data)\n\n # write templated ETS back to modelica file , to the tests folder for Dymola test\n path_ets_templated = os.path.join(self.directory_ets_templated, \"ets_cooling_indirect_templated.mo\")\n\n if os.path.exists(path_ets_templated):\n os.remove(path_ets_templated)\n with open(path_ets_templated, \"w\") as f:\n f.write(file_data)\n\n # write templated ETS back to building-modelica folder for Dymola test\n path_writtenback = os.path.join(os.path.abspath(os.path.join(__file__, \"../..\")) + \"/modelica/\")\n\n if os.path.exists(os.path.join(path_writtenback, \"ets_cooling_indirect_templated.mo\")):\n os.remove(os.path.join(path_writtenback, \"ets_cooling_indirect_templated.mo\"))\n with open(os.path.join(path_writtenback, \"ets_cooling_indirect_templated.mo\"), \"w\") as f:\n f.write(file_data)\n\n return file_data\n\n def templated_ets_openloops_dymola(self):\n \"\"\"after we creating the templated ets, we need to test it in Dymola under open loops.\n Here we refactor the example file: CoolingIndirectOpenLoops,\n to test our templated ets model.\n \"\"\"\n file = open(\n os.path.join(os.getcwd(), \"geojson_modelica_translator\") + \"/modelica/CoolingIndirectOpenLoops.mo\", \"r\",\n )\n cooling_indirect_filename = \"/CoolingIndirectOpenLoops_Templated.mo\"\n\n # if the modelica example file is existed, delete it first\n path_openloops = os.path.join(os.path.abspath(os.path.join(__file__, \"../..\")) + \"/modelica/\")\n\n if os.path.exists(path_openloops + cooling_indirect_filename):\n os.remove(path_openloops + cooling_indirect_filename)\n\n # create the modelica example file for Dymola test\n # TODO: Replace this with the ModelicaFile Class --\n # extend ModelicaFile class if does not support.\n # Theoretically it is doable using extend clause from Modelica.\n # But we need to change the original ETS model first, in order to extend.\n # This is Michael Wetter suggested approach.\n # if so, we don't need to template modelica models, but we need to connect the modelica components\n repl_dict = {}\n from_str = \"model CoolingIndirectOpenLoops\"\n to_str = \"model CoolingIndirectOpenLoops_Templated\\n\"\n repl_dict[from_str] = to_str\n from_str = (\n \"Buildings.Applications.DHC.EnergyTransferStations.CoolingIndirect coo(\"\n )\n to_str = \"Buildings.Applications.DHC.EnergyTransferStations.ets_cooling_indirect_templated coo(\"\n repl_dict[from_str] = to_str\n from_str = \"end CoolingIndirectOpenLoops;\"\n to_str = \"end CoolingIndirectOpenLoops_Templated;\"\n repl_dict[from_str] = to_str\n\n with open(path_openloops + cooling_indirect_filename, \"w\") as examplefile:\n for f in file:\n fx = f\n for from_str, to_str in repl_dict.items():\n # TODO: f.string() causes errors, check code\n if fx.strip() == from_str.strip():\n fx = f.replace(from_str, to_str)\n examplefile.write(fx)\n\n return examplefile\n\n def connect(self):\n \"\"\"connect ETS-modelica to building-modelica (specifically TEASER modelica).\n This function will be modified in future\"\"\"\n pass\n","sub_path":"geojson_modelica_translator/model_connectors/ets_template.py","file_name":"ets_template.py","file_ext":"py","file_size_in_byte":10066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"141440985","text":"#!/usr/bin/env python\n\"\"\"\npython implementation of super_auto.f\nc aligns atom in pdb file so as to minimize\nc rms atom distance compared to reference pdb file\nc branch off super.f that automatically seraches for matching atom pairs\n21aug2020: added calculation of mean absolute dedivation (MAD)\njuly 2021 - for pdb inputs with duplicate atom names - e.g. from gold\nprevent atoms being used > once for a match\n\"\"\"\nimport sys\nimport math\nimport pdb_methods as pm\nimport numpy as np\n#=====================================================\ndef supq(co1,co2,nat):\n \"\"\" find rotation matrix to minimix rmsd - we assume centers\n have already been moved to the origin using method of Kearsley\n Acta Cryst (1989) A45:208 usng quaternions\n \"\"\"\n #\n # calculate omega matrix of Kearsley et al.\n omega = np.zeros((4,4),'float')\n qmin = np.zeros(4,'float')\n for i in range(nat):\n # diagonal terms\n omega[0][0] += (co1[i][0] - co2[i][0])**2 + (co1[i][1] - co2[i][1])**2 + (co1[i][2] - co2[i][2])**2\n omega[1][1] += (co1[i][0] - co2[i][0])**2 + (co1[i][1] + co2[i][1])**2 + (co1[i][2] + co2[i][2])**2\n omega[2][2] += (co1[i][0] + co2[i][0])**2 + (co1[i][1] - co2[i][1])**2 + (co1[i][2] + co2[i][2])**2\n omega[3][3] += (co1[i][0] + co2[i][0])**2 + (co1[i][1] + co2[i][1])**2 + (co1[i][2] - co2[i][2])**2\n # off-diagonal terms\n omega[1][0] += (co1[i][1]+co2[i][1])*(co1[i][2]-co2[i][2]) - (co1[i][1]-co2[i][1])*(co1[i][2]+co2[i][2])\n omega[2][0] += (co1[i][2]+co2[i][2])*(co1[i][0]-co2[i][0]) - (co1[i][2]-co2[i][2])*(co1[i][0]+co2[i][0])\n omega[3][0] += (co1[i][0]+co2[i][0])*(co1[i][1]-co2[i][1]) - (co1[i][0]-co2[i][0])*(co1[i][1]+co2[i][1])\n omega[2][1] += (co1[i][0]-co2[i][0])*(co1[i][1]-co2[i][1]) - (co1[i][0]+co2[i][0])*(co1[i][1]+co2[i][1])\n omega[3][1] += (co1[i][0]-co2[i][0])*(co1[i][2]-co2[i][2]) - (co1[i][0]+co2[i][0])*(co1[i][2]+co2[i][2])\n omega[3][2] += (co1[i][1]-co2[i][1])*(co1[i][2]-co2[i][2]) - (co1[i][1]+co2[i][1])*(co1[i][2]+co2[i][2])\n #\n for i in range(4):\n for j in range(i+1,4):\n omega[i][j] = omega[j][i]\n #print('\\nOmega matrix: \\n',omega,'\\n')\n eign,eigv = np.linalg.eig(omega)\n #print('\\nEigen values: \\n',eign)\n #print('\\nEigen vectors: \\n',eigv)\n imn = 0\n eign_mn = eign[0]\n imx = 0\n eign_mx = eign[0]\n for i in range(4):\n if(eign[i] < eign_mn):\n imn = i\n eign_mn = eign[i]\n if(eign[i] > eign_mx):\n imx = i\n eign_mx = eign[i]\n if(eign_mn < 1.e-6):\n print('Warning: eigen value close to or below 0: ',eign_mn)\n #print('min,max eign: ',eign[imn],eign[imx])\n rms_mn = math.sqrt(eign_mn/nat)\n rms_mx = math.sqrt(eign_mx/nat)\n #print('min, max rmsd possible by rotating: ',rms_mn,rms_mx)\n #print('Final RMS Deviation over selected set (A):: ',rms_mn)\n qmin[0] = - eigv[0][imn]\n for k in range(1,4):\n qmin[k] = eigv[k][imn]\n #print('min rmsd quaternion: ',qmin)\n return rms_mn,qmin\n\ndef qttomt(qt):\n \"\"\" convert quaternion to roation matrix\"\"\"\n rmt = np.zeros((3,3),'float')\n rmt[0][0] = qt[0]**2+qt[1]**2-qt[2]**2-qt[3]**2\n rmt[1][1] = qt[0]**2-qt[1]**2+qt[2]**2-qt[3]**2\n rmt[2][2] = qt[0]**2-qt[1]**2-qt[2]**2+qt[3]**2\n #\n rmt[1][0] = 2.*(qt[1]*qt[2]+qt[0]*qt[3])\n rmt[0][1] = 2.*(qt[1]*qt[2]-qt[0]*qt[3])\n #\n rmt[2][0] = 2.*(qt[1]*qt[3]-qt[0]*qt[2])\n rmt[0][2] = 2.*(qt[1]*qt[3]+qt[0]*qt[2])\n #\n rmt[2][1] = 2.*(qt[2]*qt[3]+qt[0]*qt[1])\n rmt[1][2] = 2.*(qt[2]*qt[3]-qt[0]*qt[1])\n trace = rmt[0][0] + rmt[1][1] + rmt[2][2]\n cosang = (trace - 1.)/2.\n angle = 180.*math.acos(cosang)/math.pi\n #print('angle: ',angle)\n return rmt,angle\n#\n#=====================================================\n#main\n#=====================================================\nif(len(sys.argv) < 3):\n print('Usage: python super_auto.py ref_pdbfile mov_pdbfile')\n sys.exit()\n#print(sys.argv)\n#\n# read pdb files\n#\npdb_ref = pm.pdb_struct()\npdb_ref.readfile(sys.argv[1])\nnref = pdb_ref.natom\nprint(sys.argv[1],' has ',nref,' atoms')\n#\npdb_mov = pm.pdb_struct()\npdb_mov.readfile(sys.argv[2])\nnmov = pdb_mov.natom\nprint(sys.argv[2],' has ',nmov,' atoms')\n#\n# write header for aligned file\n#\npdb_out = open('super_auto_py.pdb','w')\nhead = 'REMARK pdb file ' + sys.argv[2] + '\\n'\npdb_out.write(head)\nhead = 'REMARK aligned with ' + sys.argv[1] + '\\n'\npdb_out.write(head)\n#\n# find matching atom pairs\n#\nnmatch = 0\nindx1 = []\nindx2 = []\nused = []\nfor j in range(nmov):\n used.append(False)\nfor i in range(nref):\n for j in range(nmov):\n if(used[j]): continue # prevent using atom twice when atom names not unique\n if(i not in indx2):\n ifind = 1\n if(pdb_ref.name[i] != pdb_mov.name[j]): ifind = 0\n if(pdb_ref.res[i] != pdb_mov.res[j]): ifind = 0\n if(pdb_ref.resnum[i] != pdb_mov.resnum[j]): ifind = 0\n if(ifind == 1):\n nmatch += 1\n used[j] = True # prevent using atom twice when atom names not unique\n #print('matched ref ',i,' moving ',j)\n indx1.append(j)\n indx2.append(i)\n break\nprint('# of matched atom pairs: ',nmatch)\npdb_out.write('REMARK # of matched atom pairs: %6d\\n'%(nmatch))\nprint('----')\n#\n# make working copy of coords\n#\ncrd_ref = np.zeros((nmatch,3),'float')\ncrd_mov = np.zeros((nmatch,3),'float')\nxyz = [0.,0.,0.]\n#rmsd = 0.\nfor i in range(nmatch):\n iref = indx2[i]\n imov = indx1[i]\n print(imov,pdb_mov.name[imov],iref,pdb_ref.name[iref])\n for k in range(3):\n crd_ref[i][k]= pdb_ref.coords[iref][k]\n crd_mov[i][k]= pdb_mov.coords[imov][k]\n #del2 = (crd_mov[i][k] - crd_ref[i][k])**2\n #rmsd += del2\n #print(crd_ref[i],crd_mov[i],del2)\nprint('----')\n#rmsd = math.sqrt(rmsd/nmatch)\n#print('rmsd: ',rmsd) # check\n#print(crd_ref)\n#print(crd_mov)\n#\n# initial rmsdev, and centroids\n#\nrms = 0.\nmad = 0.\ncen_ref = [0.,0.,0.]\ncen_mov = [0.,0.,0.]\ntrn_vec = [0.,0.,0.]\nfor i in range(nmatch):\n dist2 = 0.\n for k in range(3):\n cen_ref[k] += crd_ref[i][k]\n cen_mov[k] += crd_mov[i][k]\n dist2 += (crd_ref[i][k] - crd_mov[i][k])**2\n #rms += (crd_ref[i][k] - crd_mov[i][k])**2\n rms += dist2\n mad += math.sqrt(dist2)\nrms = math.sqrt(rms/nmatch)\nmad /= nmatch\nprint('Initial Rms Deviation, MAD over selected set (A):: %8.3f %8.3f'%(rms,mad))\npdb_out.write('REMARK Initial Rms Deviation, MAD over selected set (A):: %8.3f %8.3f\\n'%(rms,mad))\nfor k in range(3):\n cen_ref[k] = cen_ref[k]/nmatch\n cen_mov[k] = cen_mov[k]/nmatch\n trn_vec[k] = cen_ref[k] - cen_mov[k]\n#print('ref molc centroid: ',cen_ref)\n#print('mov molc centroid: ',cen_mov)\n#print('translation vector: ',trn_vec)\npdb_out.write('REMARK translation vector: %8.3f %8.3f %8.3f\\n'%(trn_vec[0],trn_vec[1],trn_vec[2]))\ntrn_mag = math.sqrt(trn_vec[0]**2 + trn_vec[1]**2 + trn_vec[2]**2)\n#\n# move molecules to the origin before finding rotation\n#\nrms = 0.\nfor i in range(nmatch):\n for k in range(3):\n crd_ref[i][k] -= cen_ref[k]\n crd_mov[i][k] -= cen_mov[k]\n rms += (crd_ref[i][k] - crd_mov[i][k])**2\nrms = math.sqrt(rms/nmatch)\nprint('Rms Deviation after translation (A):: %8.3f'%(rms))\n#\n# find best rotation matrix\nrms_mn,qmin = supq(crd_ref,crd_mov,nmatch)\n#print('min rmsd quaternion: ',qmin)\n#\n# convert quaternion to rotation matrix and rotation magnitude\nrotmat,angle = qttomt(qmin)\n#print(rotmat)\nprint('Magnitude of Translation (A) & rotation (o): %8.2g %8.2f\\n' % (trn_mag,angle))\npdb_out.write('REMARK Magnitude of Translation (A) & rotation (o): %8.2g %8.2f\\n' % (trn_mag,angle))\n#print('Final RMS Deviation over selected set (A):: ',rms_mn)\n#pdb_out.write('REMARK Final Rms Deviation over selected set (A):: %8.3f\\n'%(rms_mn))\n#\n# apply rotation to moving set so we can compute final MAD \n# (and final rms again, but directly, not by minimum eigen value)\n#\nxyz1 = [0.,0.,0.]\nrms_mn = 0.\nmad_mn = 0.\nfor n in range(nmatch):\n for j in range(3):\n xyz1[j] = 0.\n for k in range(3):\n xyz1[j] += rotmat[j][k]*crd_mov[n][k]\n dist2 = 0.\n for k in range(3):\n dist2 += (crd_ref[n][k] - xyz1[k])**2\n rms_mn += dist2\n mad_mn += math.sqrt(dist2)\nrms_mn = math.sqrt(rms_mn/nmatch)\nmad_mn /= nmatch\nprint('Final Rms Deviation, MAD over selected set (A):: %8.3f %8.3f'%(rms_mn,mad_mn))\npdb_out.write('REMARK Final Rms Deviation, MAD over selected set (A):: %8.3f %8.3f\\n'%(rms_mn,mad_mn))\n#\n# apply rotation and translation to entire moving set and write out new pdb file\n#\nfor n in range(nmov):\n for k in range(3):\n xyz[k] = pdb_mov.coords[n][k] - cen_mov[k]\n for j in range(3):\n xyz1[j] = 0.\n for k in range(3):\n xyz1[j] += rotmat[j][k]*xyz[k]\n for k in range(3):\n pdb_mov.coords[n][k] =xyz1[k] + cen_ref[k]\npm.pdb_write(pdb_out,pdb_mov)\npdb_out.close()\n","sub_path":"src/super_auto.py","file_name":"super_auto.py","file_ext":"py","file_size_in_byte":8590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"467389626","text":"from django.shortcuts import render\nfrom django.conf import settings\nfrom django.views.decorators.cache import cache_page\nfrom django.http import JsonResponse\nfrom django.db.models import F\n\nfrom protwis.context_processors import site_title\nfrom news.models import News\nfrom common.models import ReleaseNotes, ReleaseStatistics, Citation\nfrom protein.models import Protein, ProteinCouplings\nfrom structure.models import StructureComplexModel\nfrom contactnetwork.models import InteractingResiduePair\nfrom signprot.models import SignprotComplex, SignprotStructure\nfrom googleapiclient.discovery import build\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n\n\n@cache_page(60 * 60 * 24)\ndef index(request):\n request.session.flush()\n\n context = {}\n\n # title of the page\n context['site_title'] = site_title(request)['site_title']#settings.SITE_TITLE\n context['documentation_url'] = settings.DOCUMENTATION_URL\n\n # analytics\n context['google_analytics_key'] = settings.GOOGLE_ANALYTICS_KEY\n\n if settings.GOOGLE_ANALYTICS_API:\n # Based on https://developers.google.com/analytics/devguides/reporting/core/v3/quickstart/service-py\n # from googleapiclient.discovery import build\n # from oauth2client.service_account import ServiceAccountCredentials\n # Define the auth scopes to request.\n scope = 'https://www.googleapis.com/auth/analytics.readonly'\n key_file_location = settings.GOOGLE_ANALYTICS_API\n\n # Fetched from API -- look at original code to re-fetch if changes.\n profile_id = '77082434'\n\n # Authenticate and construct service.\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n key_file_location, scopes=[scope])\n # Build the service object.\n service = build('analytics', 'v3', credentials=credentials)\n\n users_year = service.data().ga().get(ids='ga:' + profile_id, start_date='365daysAgo', end_date='today', metrics='ga:users').execute().get('rows')[0][0]\n users_month = service.data().ga().get(ids='ga:' + profile_id, start_date='30daysAgo', end_date='today', metrics='ga:users').execute().get('rows')[0][0]\n\n context['users'] = \"GPCRdb, GproteinDb, and ArrestinDb had {:,} different users since this date last year and {:,} users in the last 30 days (Google Analytics).\".format(int(users_year), int(users_month))\n\n # get news\n context['news'] = News.objects.order_by('-date').all()[:3]\n\n # get release notes\n try:\n context['release_notes'] = ReleaseNotes.objects.all()[0]\n rel_stats = list(ReleaseStatistics.objects.filter(release=context['release_notes'])\\\n .values_list(\"statistics_type__name\", \"value\"))\n\n # Create dictionary and process part of the results\n context['release_statistics'] = []\n if context['site_title']=='GproteinDb':\n context['release_statistics'].append({\"statistics_type\": \"\" + \"Human G proteins\" + \"\", \"value\": \"\" + \"{:,}\".format(Protein.objects.filter(family__parent__parent__name='Alpha', species__common_name='Human', accession__isnull=False).count()) + \"\"})\n context['release_statistics'].append({\"statistics_type\": \"\" + \"Species orthologs\" + \"\", \"value\": \"\" + \"{:,}\".format(Protein.objects.filter(family__parent__parent__name='Alpha', accession__isnull=False).count()) + \"\"})\n\n context['release_statistics'].append({\"statistics_type\": \"Experimental structures\", \"value\" : \"\"})\n signcomp = SignprotComplex.objects.all()\n context['release_statistics'].append({\"statistics_type\": \"\" + \"G proteins\" + \"\", \"value\": \"\" + \"{:,}\".format(signcomp.count()+SignprotStructure.objects.all().count()) + \"\"})\n context['release_statistics'].append({\"statistics_type\": \"\" + \"GPCR-G protein complexes\" + \"\", \"value\": \"\" + \"{:,}\".format(SignprotComplex.objects.all().count()) + \"\"})\n\n context['release_statistics'].append({\"statistics_type\": \"Structure models\", \"value\" : \"\"})\n context['release_statistics'].append({\"statistics_type\": \"\" + \"GPCR-G protein complexes\" + \"\", \"value\": \"\" + \"{:,}\".format(StructureComplexModel.objects.all().count()-SignprotComplex.objects.filter(structure__refined=True).count()) + \"\"})\n context['release_statistics'].append({\"statistics_type\": \"\" + \"Refined complex structures\" + \"\", \"value\": \"\" + \"{:,}\".format(signcomp.filter(structure__refined=True).count()) + \"\"})\n\n context['release_statistics'].append({\"statistics_type\": \"Structure interactions\", \"value\" : \"\"})\n interface_interactions_count = InteractingResiduePair.objects.filter(referenced_structure__in=signcomp.values_list('structure', flat=True)).exclude(res1__protein_conformation_id=F('res2__protein_conformation_id')).count()\n context['release_statistics'].append({\"statistics_type\": \"\" + \"GPCR-G protein interface\" + \"\", \"value\": \"\" + \"{:,}\".format(interface_interactions_count) + \"\"})\n\n context['release_statistics'].append({\"statistics_type\": \"Couplings\", \"value\" : \"\"})\n context['release_statistics'].append({\"statistics_type\": \"\" + \"GPCR-G protein coupling\" + \"\", \"value\": \"\" + \"{:,}\".format(ProteinCouplings.objects.all().count()) + \"\"})\n\n context['release_statistics'].append({\"statistics_type\": \"Mutations\", \"value\" : \"\"})\n context['release_statistics'].append({\"statistics_type\": \"\" + \"Interface mutations\" + \"\", \"value\": \"\" + \"{:,}\".format(54) + \"\"})\n else:\n rename_dictionary = {\"Exp. GPCR structures\" : \"GPCRs\", \"Exp. Gprotein structures\" : \"G proteins\", \"GPCR structure models\": \"GPCRs\", \"GPCR-G protein structure models\": \"GPCR-G protein complexes\", \"Refined GPCR structures\": \"Refined GPCR structures\"}\n first_struct = -1\n first_model = -1\n count = 0\n for entry in rel_stats:\n if first_struct < 0 and \"Exp.\" in entry[0]:\n first_struct = count\n elif first_model < 0 and \"model\" in entry[0]:\n first_model = count\n\n if entry[0] in rename_dictionary:\n context['release_statistics'].append({\"statistics_type\": \"\" + rename_dictionary[entry[0]] + \"\", \"value\": \"\" + \"{:,}\".format(entry[1]) + \"\"})\n else:\n context['release_statistics'].append({\"statistics_type\": \"\" + entry[0] + \"\", \"value\": \"\" + \"{:,}\".format(entry[1]) + \"\"})\n count += 1\n\n # Adjusted formatting for release notes\n context['release_statistics'].insert(first_model, {\"statistics_type\": \"Structure models\", \"value\" : \"\"})\n context['release_statistics'].insert(first_struct, {\"statistics_type\": \"Experimental structures\", \"value\" : \"\"})\n\n\n except IndexError:\n context['release_notes'] = ''\n context['release_statistics'] = []\n\n return render(request, 'home/index.html', context)\n\n@cache_page(60 * 60 * 24 * 7)\ndef citations_json(request):\n context = {}\n citations_q = Citation.objects.all().values_list(\"url\", \"video\", \"docs\", \"main\", \"page_name\", \"publication__title\", \"publication__authors\", \"publication__year\", \"publication__reference\",\n \"publication__journal__name\", \"publication__web_link__index\").order_by(\"-publication__year\", \"page_name\")\n response = JsonResponse(list(citations_q), safe=False)\n return response\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"171297696","text":"import pygame\n\n# It seems that up to USEREVENT + 3 are already taken.\n# Anyway, an event for server announces.\n\n# It's about time for the server to advertise its presence.\nE_ANNOUNCE = pygame.USEREVENT + 4\n# A state change has occurred.\nE_STATE = pygame.USEREVENT + 5\n\n# Player in the lobby.\nS_LOBBY = 0\n# Player creating a new server.\nS_CREATE = 1\n# Player joining an existing game.\nS_JOIN = 2\n# Player in the game.\nS_GAME = 3\n# Player in the game, placing ships.\nS_GAME_PLACING = 4\n# Player in the game, waiting for their turn.\nS_GAME_WAITING = 5\n# Player's turn, cherry-picking the tile to bomb.\nS_GAME_SHOOTING = 6\n\nS_GAME_LAST = 6\n","sub_path":"bship/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"255167499","text":"\"\"\"!doge city: Watchdog info for all hosts\n!such status : Watchdog info for host\n!watchdog log : tails the watchdog log for that host\n!orphans: Shows latest versions, and hosts that do not conform\n\"\"\"\n\nimport subprocess\nimport re\nimport requests\nimport shlex\nfrom requests.auth import HTTPBasicAuth\nfrom limbo import conf\nfrom collections import defaultdict\n\n\nnagios_user = conf.nagios_user\nnagios_pass = conf.nagios_pass\n\nDB = 'mysql -A -u%s -p%s -hmysql-budget-slave.prod.adnxs.net -D optimization' % (conf.db_user, conf.db_pass)\n\ndef watchdog_status(body):\n reg = re.compile('!such[\\s|_]status (.*)', re.IGNORECASE)\n match = reg.match(body)\n if not match:\n return False\n host = match.group(1)\n try:\n int(host)\n host = \"{}.bm-etl-optimization.prod.lax1\".format(host)\n except ValueError:\n pass\n cmd = 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s \"cat /var/run/watchdog.status\"' % host\n stat = subprocess.check_output(cmd, shell=True)\n return stat\n\n\ndef all_watchdog_status(body):\n reg = re.compile('!doge[\\s|_]city', re.IGNORECASE)\n match = reg.match(body)\n if not match:\n return False\n url = \"https://multimonitor.nym2.adnxs.net/check_mk/view.py?service=etl-optimization-watchdog&opthostgroup=&host=&view_name=servicedesc&st0=on&st1=on&st2=on&st3=on&stp=on&output_format=python\"\n response = requests.get(url, auth=HTTPBasicAuth(nagios_user, nagios_pass), verify=False)\n data = eval(response.text)\n output = \"\"\n for i in data:\n output += \"Host: {h}\\tStatus: {s}\\tDoge: {d}\\n\".format(h=i[1], s=i[0], d=i[2])\n return \"```%s```\" % output\n\n\ndef watchdog_log(body):\n reg = re.compile('!watchdog log (.*)', re.IGNORECASE)\n match = reg.match(body)\n if not match:\n return False\n host = match.group(1)\n try:\n int(host)\n host = \"{}.bm-etl-optimization.prod.lax1\".format(host)\n except:\n pass\n cmd = 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s \"tail -n 15 /var/log/adnexus/watchdog.log\"' % host\n stat = subprocess.check_output(shlex.split(cmd))\n return \"\\n\" + stat\n\n\ndef orphans_get_lazy_hosts():\n query = \"\"\"select distinct host\n from work_queue_task\n where host not in (select t.host\n from optimization.work_queue_task t\n join work_queue_job_cache jc\n on jc.deleted = 0 and t.job_id = jc.job_id\n WHERE t.status = 'running') order by 1\n ;\"\"\"\n\n command_str = 'echo \"' + query + '\" | ' + DB\n result = subprocess.check_output(command_str, shell=True)\n if not result:\n return []\n else:\n return result.split('\\n')[1:]\n\ndef orphans(body):\n if not body.startswith('!orphans'):\n return False\n\n url = \"https://multimonitor.nym2.adnxs.net/check_mk/view.py?service=etl-optimization-watchdog&opthostgroup=&host=&view_name=servicedesc&st0=on&st1=on&st2=on&st3=on&stp=on&output_format=python\"\n response = requests.get(url, auth=HTTPBasicAuth(nagios_user, nagios_pass), verify=False)\n data = eval(response.text)\n output = \"\"\n max_version = \"\"\n state_set = set()\n # map of maps {version -> {state -> [(appname, host)]}}\n version_state_host_list_map = defaultdict(lambda: defaultdict(list))\n for i in data:\n # status = i[0]\n host = i[1]\n message = i[2]\n matches = re.finditer(r\"(?P\\S*)\\[pid:(\\S*), version:(?P\\S*), state:(?P\\S*)]\", \\\n message, re.IGNORECASE)\n for match in matches:\n appname = match.group('appname')\n version = match.group('version')\n state = match.group('state')\n state_set.add(state)\n version_state_host_list_map[version][state].append((appname, host))\n\n if version > max_version:\n max_version = version\n\n output += \"Latest version: {}\\n\".format(max_version)\n if 'RESTARTING' not in state_set and len(version_state_host_list_map) == 1:\n output += \"No orphans!\"\n else:\n output += \"\\n\"\n\n lazy_host_list = orphans_get_lazy_hosts()\n\n for version, state_host_list_map in sorted(version_state_host_list_map.iteritems(), reverse=True):\n version_output = \"\\nVersion: {}\\n\".format(version)\n state_output_list = []\n for state, appname_host_list in sorted(state_host_list_map.iteritems()):\n state_output = \"\\tState: {}\".format(state)\n if version != max_version or state == 'RESTARTING':\n worker_bees = []\n funemployed = []\n for appname, host in appname_host_list:\n appname_host_str = \"\\t\\t\\t{0:25} {1}\".format(appname, host)\n if host in lazy_host_list:\n funemployed.append(appname_host_str)\n else:\n worker_bees.append(appname_host_str)\n\n init_string = \"\\t\\t\\t{0:25} {1}\\n\".format('Application', 'Host')\n if worker_bees:\n worker_str = init_string + \"\\n\".join(worker_bees)\n state_output += \"\\n\\t\\tWu-Tang Killah Bees:\\n{}\".format(worker_str)\n if funemployed:\n lazy_str = init_string + \"\\n\".join(funemployed)\n state_output += \"\\n\\t\\tLazy Bums:\\n{}\".format(lazy_str)\n state_output_list.append(state_output)\n\n if state_output_list:\n output += version_output\n output += \"\\n\".join(state_output_list) + \"\\n\"\n return \"```%s```\" % output\n\n\ndef on_message(msg, server):\n text = msg.get(\"text\", \"\")\n return watchdog_status(text) or all_watchdog_status(text) or orphans(text) or watchdog_log(text)\n","sub_path":"limbo/plugins/watchdog.py","file_name":"watchdog.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"203510564","text":"#!/usr/bin/env python\n\nclass Juego():\n def __init__(self,titulo,precio,etiquetas,plataformas):\n self.titulo=titulo\n self.precio=precio\n self.etiquetas=etiquetas\n self.plataformas=plataformas\n\nimport requests\nimport lxml.html\n\nhtml = requests.get ('https://store.steampowered.com/explore/new/')\n\n#print(html.content)\n\ndoc = lxml.html.fromstring(html.content)\n\nnewReleases = doc.xpath('//div[@id=\"tab_newreleases_content\"]')[0]\n\n#print(newReleases)\n\ntittles=newReleases.xpath('.//div[@class=\"tab_item_name\"]/text()')\n\n#print(tittles)\n\nprices = newReleases.xpath('.//div[@class=\"discount_final_price\"]/text()')\n\n#print(prices)\n\ntags = newReleases.xpath('.//div[@class=\"tab_item_top_tags\"]')\n\ntotalTags=[]\n\nfor tag in tags:\n totalTags.append(tag.text_content())\n\ntotalTags = [tag.split(', ') for tag in totalTags]\n\n#print(totalTags)\n\nplatformsDiv = newReleases.xpath('.//div[@class=\"tab_item_details\"]')\n\ntotalPlatforms=[]\n\nfor game in platformsDiv:\n namePlatform = game.xpath('.//span[contains(@class, \"platform_img\")]')\n platforms=[t.get('class').split(' ')[-1] for t in namePlatform]\n if 'had_separator' in platforms:\n platforms.remove('had separator')\n totalPlatforms.append(platforms)\n\n#print(totalPlatforms)\n\noutput = []\n\nfor info in zip(tittles,prices,totalTags,totalPlatforms):\n response={}\n response['tittle']=info[0]\n response['price']=info[1]\n response['tags']=info[2]\n response['platforms']=info[3]\n output.append(response)\n\n#print(output)\n\ncontador=1\n\nfor info in zip(tittles,prices,totalTags,totalPlatforms):\n juego1=Juego(info[0],info[1],info[2],info[3])\n print(\"----------------------------------------------\")\n print(\"Juego \"+str(contador))\n contador=contador+1\n print(\"Titulo: \"+ juego1.titulo)\n print(\"Precio: \"+ juego1.precio)\n print(\"Etiquetas: \"+ str(juego1.etiquetas))\n print(\"Plataformas: \"+ str(juego1.plataformas))\n print(\"----------------------------------------------\")","sub_path":"scrapweb/scrapweb.py","file_name":"scrapweb.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"528279690","text":"import os\n\nfrom qua import constants\nfrom qua import settings as qua_settings\n\n\nAPP_NAME = qua_settings.PROGRAM_NAME + '.suggests'\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = 'somestrongdjangokey'\n\nDEBUG = True\n\nALLOWED_HOSTS = ['*']\n\nINSTALLED_APPS = [\n 'django.contrib.contenttypes',\n 'rest_framework',\n 'django_rq',\n 'suggests.apps.SuggestsConfig',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'qua.rest.middleware.LoggingMiddleware'\n]\n\nif DEBUG:\n DATABASE_HOST = '127.0.0.1'\nelse:\n DATABASE_HOST = qua_settings.POSTGRESQL['host']\n\nDATABASES = {\n 'default': {\n 'ENGINE': qua_settings.POSTGRESQL['engine'],\n 'NAME': APP_NAME.replace('.', '_'),\n 'HOST': DATABASE_HOST,\n 'PORT': qua_settings.POSTGRESQL['port'],\n 'USER': qua_settings.POSTGRESQL['user'],\n 'PASSWORD': qua_settings.POSTGRESQL['password']\n }\n}\n\nROOT_URLCONF = 'app.urls'\n\nWSGI_APPLICATION = 'app.wsgi.application'\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Moscow'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': [],\n 'DEFAULT_AUTHENTICATION_CLASSES': [],\n 'UNAUTHENTICATED_USER': None,\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n ),\n 'EXCEPTION_HANDLER': 'qua.rest.exceptions.api_exception_handler',\n}\n\nRQ_QUEUES = {\n APP_NAME: {\n 'HOST': qua_settings.REDIS['host'],\n 'PORT': qua_settings.REDIS['port'],\n 'DB': qua_settings.REDIS['db_cache']\n },\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(asctime)s %(filename)s:'\n '%(funcName)s:%(lineno)s '\n '%(levelname)s: %(message)s'\n },\n 'simple': {\n 'format': '%(asctime)s %(message)s'\n },\n },\n 'handlers': {\n 'qua': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(qua_settings.LOGS_DIR, APP_NAME + '.log'),\n 'formatter': 'verbose'\n },\n 'requests': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(\n qua_settings.LOGS_DIR, APP_NAME + '.requests.log'),\n 'formatter': 'simple'\n }\n },\n 'loggers': {\n 'qua': {\n 'handlers': ['qua'],\n 'level': 'DEBUG',\n 'propagate': True\n },\n 'qua.requests': {\n 'handlers': ['requests'],\n 'level': 'INFO',\n 'propagate': False\n }\n },\n}\n\nSUGGEST_PREPROCESSOR = {\n 'max_last': 100, # How old user history queries will be used (days)\n 'min_query_len': 2, # Minimal query len will be used in suggests\n 'replacements': [ # What letters will be replaces\n ('ё', 'е') # while preprocessing suggests data\n ]\n}\n\n# How many suggests will be returned by default\nSUGGESTS_DEFAULT_LIMIT = 10\n\n# Tree file properties\nSUGGESTS_TREE_PREFIX = APP_NAME + '.suggests_tree'\n\nSUGGESTS_TREE_PATH = os.path.join(qua_settings.DATA_DIR, SUGGESTS_TREE_PREFIX)\n\n# Check new data and recreate tree every such interval\nSUGGESTS_UPDATE_INTERVAL = 2 * constants.MINUTE\n\n# Property for request handler. Every `n` query check updates in tree file\nSUGGESTS_REQUEST_UPDATE_INTERVAL = 1000\n","sub_path":"suggests/src/app/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"440626872","text":"# Create two functions to encode and then decode a string using the Rail Fence Cipher.\r\n# This cipher is used to encode a string by placing each character successively in a diagonal along a set of \"rails\".\r\n# First start off moving diagonally and down. When you reach the bottom, reverse direction and move diagonally and up until you reach the top rail.\r\n# Continue until you reach the end of the string. Each \"rail\" is then read left to right to derive the encoded string.\r\n#\r\n# For example, the string \"WEAREDISCOVEREDFLEEATONCE\" could be represented in a three rail system as follows:\r\n#\r\n# W E C R L T E\r\n# E R D S O E E F E A O C\r\n# A I V D E N\r\n#\r\n# The encoded string would be:\r\n#\r\n# WECRLTEERDSOEEFEAOCAIVDEN\r\n\r\n\r\ndef matrix_filler(string, n, filler=\"\"):\r\n\t\"\"\" Function to build a matrix and to place string's letters into it using rail pattern\"\"\"\r\n\trow, col = 0, 0\r\n\tdown = 1 # movement direction determinator\r\n\tmatrix = [[\"%\" for i in range(2 * len(string))] for j in range(n)] # matrix creation\r\n\tfor letter in string:\r\n\t\tmatrix[row][col] = letter if not filler else filler # if we just want to mark items that should be used for deciphering - we use filler, otherwise we put string elements to prepare for encryption\r\n\t\tcol += 1\r\n\t\trow += down\r\n\t\tdown = down * -1 if row == 0 or row == n - 1 else down # if we're at 1st or last row - we need to invert our movement direction\r\n\treturn matrix\r\n\r\n\r\ndef encode_rail_fence_cipher(string, n):\r\n\tret = \"\"\r\n\tmatrix = matrix_filler(string, n) # build matrix with string's items placed in rail pattern\r\n\tfor s in matrix:\r\n\t\tret += \"\".join(s).replace(\"%\", \"\") # get row lists, make them strings, add them one by one to result\r\n\treturn ret\r\n\r\n\r\ndef decode_rail_fence_cipher(string, n):\r\n\tret = \"\"\r\n\ts = list(reversed(string))\r\n\tmatrix = matrix_filler(string, n, \"#\") # we fill matrix with symbols marking proper rail placement (since we know n), otherwise we'd just run 1 to ? n-size trying to decipher(bruteforce attack)\r\n\tfor rows in range(len(matrix)):\r\n\t\tfor columns in range(len(matrix[0])):\r\n\t\t\tif matrix[rows][columns] == \"#\": # if matrix element is in proper place, we replace it with string's symbol\r\n\t\t\t\ttmp = s.pop()\r\n\t\t\t\tmatrix[rows][columns] = tmp\r\n\trow, col = 0, 0\r\n\tdown = 1\r\n\tfor _ in range(len(string)): # we read the matrix using given n in hope that we will decipher. Since we know n - we do decipher right away :)\r\n\t\tret += matrix[row][col]\r\n\t\tcol += 1\r\n\t\trow += down\r\n\t\tdown = down * -1 if row == 0 or row == n - 1 else down\r\n\treturn ret\r\n\r\n\r\nprint(encode_rail_fence_cipher(\"WEAREDISCOVEREDFLEEATONCE\", 3))\r\nprint(decode_rail_fence_cipher(\"WECRLTEERDSOEEFEAOCAIVDEN\", 3))\r\n","sub_path":"Rail Fence Cipher.py","file_name":"Rail Fence Cipher.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"271063581","text":"import sys\n\ntry:\n with open(sys.argv[1]) as file:\n rawinput = file.read()\nexcept IndexError:\n rawinput = sys.stdin.read()\n\nexpressions = map(str.strip, rawinput.splitlines())\n\ndef torpn(exp):\n stack = []\n for char in exp:\n if char in '0123456789':\n yield int(char)\n elif char == '+':\n stack.append(char)\n elif char == '*':\n while stack and stack[-1] != '(' and stack[-1] == '+':\n yield stack.pop()\n stack.append(char)\n elif char == '(':\n stack.append(char)\n elif char == ')':\n while stack and stack[-1] != '(':\n yield stack.pop()\n assert stack.pop() == '('\n else:\n continue\n yield from reversed(stack)\n\ndef evalrpn(rpn):\n stack = []\n for item in rpn:\n if isinstance(item, int):\n stack.append(item)\n elif item == '+':\n stack.append(stack.pop() + stack.pop())\n elif item == '*':\n stack.append(stack.pop() * stack.pop())\n assert len(stack) == 1\n return stack.pop()\n\ndef evalexp(exp):\n return evalrpn(torpn(exp))\n\nprint(sum(map(evalexp, expressions)))\n","sub_path":"day-18/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"44658017","text":"import os\nimport sys\n\nfrom logger import logger\n\ncurr_path = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(curr_path, '../../'))\nimport os.path as osp\n\n# define a simple data batch\n\nimport mxnet as mx\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nfrom mxnet import nd\nfrom time import time\n\nfrom retrieval.dataloader.transform import transform_test, transform_padlong\nfrom retrieval.mxsymbol import symbol_factory\n\n\nclass Retrieval_model():\n def __init__(self, ctx, first_class_id):\n self.DEBUG = False\n self.prefix = 'retrieval'\n self.first_class_id = str(first_class_id)\n if int(first_class_id) in [5, ]:\n model = 'resnet50_v2'\n else:\n model = 'densenet201'\n logger.info('use model %s to get fea' % model)\n self.ctx = ctx\n self.model = self.get_mod(\n folder_name=osp.join(curr_path, '../../', 'retrieval/checkpoint/%s' % self.first_class_id),\n checkpoint_name='net_best.params', ctx=ctx, model=model)\n self.database = self.load_search_database(\n [osp.join('database', self.first_class_id), ])\n\n (self.cropus_index, self.cropus_index_inverse) = self.load_search_index()\n self.cropus_hist = self.load_hist_database()\n\n def load_hist_database(self):\n base = osp.join(curr_path, '../../', 'retrieval/cropus/hist', self.first_class_id)\n hist_path = osp.join(base, 'cropus_hist.npy')\n assert osp.exists(hist_path), 'hist %s not found' % hist_path\n return np.load(hist_path)\n\n def get_hist(self, img, **kwargs):\n if not isinstance(img, nd.NDArray):\n img1 = nd.array(img)\n else:\n img1 = img\n if int(self.first_class_id) not in [4, 5]:\n img1 = mx.img.resize_short(img1, 224)\n img1 = mx.img.center_crop(img1, (112, 112))[0].asnumpy().astype(np.uint8)\n img = cv2.cvtColor(img1, cv2.COLOR_RGB2LAB)\n return np.mean(np.transpose(img, (2, 0, 1)), axis=(1, 2))\n else:\n c_detector = kwargs['color_detector']\n tic = time()\n pos, imgs = c_detector.detect_and_return(img1, thresh=0.24)\n logger.info('use %f time to detect color' % (time() - tic))\n if kwargs.get('debug') is not None:\n c_detector.visualize_detection_matplot(pos, img1)\n res = []\n for i in range(len(imgs)):\n img = imgs[i].asnumpy().astype(np.uint8)\n if kwargs.get('debug') is not None:\n plt.imshow(img)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) # 在cvtColor前要先变成uint8\n m = np.mean(np.transpose(img, (2, 0, 1)), axis=(1, 2))\n res.append(m)\n return np.mean(res, axis=0)\n\n def compare_histgram(self, h1, h2):\n # logger.info('h1')\n # logger.info(h1)\n # logger.info('h2')\n # logger.info(h2)\n return np.mean(np.abs(h1 - h2))\n\n # @lru_cache(maxsize=4)\n def load_search_database(self, kinds):\n res = []\n for kind in kinds:\n cropus_datapath = osp.join(curr_path, '../../', 'retrieval/cropus', kind, 'cropus1920.npy')\n assert osp.exists(cropus_datapath), 'No cropus'\n res.append(np.load(cropus_datapath))\n return res\n\n def load_search_index(self):\n p = osp.join(curr_path, '../../', 'retrieval/cropus/index', self.first_class_id)\n cropus_datapath = osp.join(p, 'cropus.lst')\n\n def get_index(fn):\n with open(fn, 'r') as f:\n f_line = list(map(lambda x: x.strip(), f.readlines()))\n d = {}\n for idx, i in enumerate(f_line):\n d[i] = idx\n return d, f_line\n\n return get_index(cropus_datapath)\n\n def get_feature(self, img):\n tic = time()\n if not isinstance(img, nd.NDArray):\n img = nd.array(img)\n cv2.imwrite('test.jpg', img.asnumpy())\n if int(self.first_class_id) in [5, ]:\n img, _ = transform_padlong(img, None)\n img = nd.stack(*[img,])\n fea = self.model(img)\n else:\n imgs = transform_test(img)\n fea = nd.mean(self.model(imgs), axis=0)\n logger.info('use %s to get fea' % (time() - tic))\n '''\n fea = \\\n logger.info('h1')\n logger.info(h1)\n nd.mean(nd.stack(*[self.model(transform_val(img, None)[0].expand_dims(axis=0)) for i in range(tt)]),\n axis=0)[0]\n '''\n # print(np.sum(np.abs(fea.asnumpy())))\n # fea = self.pca.transform(fea.asnumpy()[np.newaxis,])\n\n # fea = self.pca.transform(fea.asnumpy().reshape(1,-1))\n # print(np.sum(np.abs(fea)))\n return fea.asnumpy().reshape(1, -1)\n\n def search_database(self, img, cropus_data, color_level, style_level, **kwargs):\n dof_threshold_config = {\n 6: ([0.24, 0.26, 0.28], [6, 6, 20], 2048, 64),\n 4: ([0.20, 0.21, 0.22], [8, 8, 10], 2048 * 4, 512),\n 5: ([0.45, 0.25, 0.22], [8, 6, 10], 2048 * 4, 512),\n 7: ([0.20, 0.21, 0.22], [8, 8, 10], 2048 * 4, 512)\n }\n threshold_styles, color_styles, c1, c2 = dof_threshold_config[int(self.first_class_id)]\n threshold_style = threshold_styles[style_level]\n color_style = color_styles[color_level]\n anchor = self.get_feature(img)\n if int(self.first_class_id) in [4, 5]:\n anchor_hist = self.get_hist(img, color_detector=kwargs['color_detector'])\n else:\n anchor_hist = self.get_hist(img)\n tic = time()\n fea_dist = np.sum((cropus_data - anchor) ** 2, axis=1)\n tic2 = time()\n logger.info('use %s second to cal distance' % (tic2 - tic))\n res = list(map(lambda x: x, filter(lambda x: fea_dist[x] < threshold_style, np.argsort(fea_dist)[:c1])))\n cropus_hists = self.cropus_hist[res]\n distance = np.arange(cropus_hists.shape[0]).astype(np.float32)\n for idx, i in enumerate(cropus_hists):\n distance[idx] = self.compare_histgram(anchor_hist, i)\n temp = []\n temp1 = []\n for idx, i in enumerate(res):\n if distance[idx] < color_style:\n temp.append(i)\n '''\n elif distance[idx] < 15:\n temp1.append(i)\n '''\n res = list(map(lambda x: self.cropus_index_inverse[x], temp[:c2]))\n logger.info('fea dist')\n logger.info(list(map(lambda x: fea_dist[x], temp[:c2]))[:16])\n # logger.info(sorted(fea_dist)[:c2])\n if len(res) == 0:\n logger.info('no result')\n elif len(res) < 10:\n logger.info('result counts less than 10')\n return res\n\n def get_mod(self, folder_name, checkpoint_name, model, ctx=mx.cpu()):\n net = symbol_factory.get_test_symbol(net_work=model, ctx=ctx)\n net.load_params(osp.join(folder_name, checkpoint_name), ctx=ctx)\n net.hybridize()\n return net\n\n\nif __name__ == '__main__':\n from ssd.detect.color_detector import ColorDetector\n from ssd import demo\n\n color_detector = demo.get_ssd_model(detector_class=ColorDetector,\n prefix=os.path.join(curr_path, '../../ssd', 'checkpoint', 'maincolor', 'ssd'),\n ctx=mx.cpu())\n img = cv2.cvtColor(cv2.imread(os.path.join('/Users/haowei/Downloads',\n 'mmexport1524748107151.jpg')),\n cv2.COLOR_BGR2RGB)\n\n plt.imshow(img)\n plt.show()\n for first_class_id in [5,4,6,7 ]:\n model = Retrieval_model(ctx=mx.cpu(), first_class_id=first_class_id)\n pairs_json = {}\n tic = time()\n res = model.search_database(img, model.database[0], 0, 0, color_detector=color_detector)\n print(res[:8])\n # print(model.search('820115'))\n # imglist = read_to_list(test_txt_path)\n\n # fea1 = model.get_feature(img)\n print(time() - tic)\n\n # time.sleep(1000)\n '''\n print(nd.sum(fea1 == 0))\n fea2 = nd.stack(*[model.get_feature(osp.join('../demo', i)) for i in\n ['177838.jpg', '202027.jpg','377045.jpg','526853.jpg','img.jpeg','img1.jpg','1.png','img2.jpg','3.png']])\n print(nd.argsort(nd.sum((fea1 - fea2) ** 2,axis=1)))\n '''\n","sub_path":"retrieval/retrieval/retrieval_model.py","file_name":"retrieval_model.py","file_ext":"py","file_size_in_byte":8417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"283305822","text":"import tensorflow as tf\nfrom models_utils import *\nfrom utils.cifar10_utils import *\nfrom run_model import per_device_batch_size\nimport os\nFILTERS = 64\nKERNELS = 7\nSTRIDES = 2\nPOOL_SIZE = 3\nPOOL_STRIDE = 2\n\nSTART_FILTER = 64\nRESNET_FILTERS = [3, 4, 6, 3]\nBLOCK_STRIDES = [2, 2, 2, 2]\n\nFINAL_SIZE = 2048\nNUM_CLASSES = 10\nDATA_DIR = \"data_dir\"\nBATCH_SIZE = 128\nNUM_EPOCHS = 1000\n\n_HEIGHT = 32\n_WIDTH = 32\n_NUM_CHANNELS = 3\n_DEFAULT_IMAGE_BYTES = _HEIGHT * _WIDTH * _NUM_CHANNELS\n# The record is the image plus a one-byte label\n_RECORD_BYTES = _DEFAULT_IMAGE_BYTES + 1\n_NUM_CLASSES = 10\n_NUM_DATA_FILES = 5\n\ndef resnet_model(features, training, use_batchnorm, data_format, name=\"resnet_50\"):\n with tf.variable_scope(name):\n inputs = resnet_model_no_last_layer(features, training, use_batchnorm, data_format, name)\n inputs = tf.layers.dense(inputs=inputs, units=NUM_CLASSES)\n inputs = tf.identity(inputs, 'final_dense')\n return inputs\n\ndef resnet_model_no_last_layer(features, training, use_batchnorm, data_format, name=\"resnet_50\"):\n with tf.variable_scope(name):\n inputs = tf.transpose(features, (0, 3, 1, 2))\n meta_data = (FILTERS, KERNELS, STRIDES, POOL_SIZE, POOL_STRIDE)\n inputs = classical_head(inputs, name, training, data_format, use_batchnorm, meta_data)\n inputs = tf.nn.relu(inputs)\n \n for i, num_blocks in enumerate(RESNET_FILTERS):\n num_filters = START_FILTER * (2**i)\n inputs = block_layer(inputs, num_filters, True, bottle_neck, num_blocks,\n strides=BLOCK_STRIDES[i], training=training,\n name='block_layer{}'.format(i + 1), data_format=data_format, use_batchnorm=use_batchnorm)\n \n norm = batch_norm if use_batchnorm else layer_norm\n\n inputs = norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n\n # The current top layer has shape\n # `batch_size x pool_size x pool_size x final_size`.\n # ResNet does an Average Pooling layer over pool_size,\n # but that is the same as doing a reduce_mean. We do a reduce_mean\n # here because it performs better than AveragePooling2D.\n axes = [2, 3] if data_format == 'channels_first' else [1, 2]\n inputs = tf.reduce_mean(inputs, axes, keepdims=True)\n inputs = tf.identity(inputs, 'final_reduce_mean')\n\n inputs = tf.reshape(inputs, [-1, FINAL_SIZE])\n return inputs\n\ndef input_fn_train():\n return input_fn(\n is_training=True, data_dir=DATA_DIR,\n batch_size=per_device_batch_size(\n BATCH_SIZE, 1),\n num_epochs=NUM_EPOCHS,\n num_gpus=1)\n\ndef input_fn_eval():\n return input_fn(\n is_training=False, data_dir=DATA_DIR,\n batch_size=per_device_batch_size(\n BATCH_SIZE, 1),\n num_epochs=1)\n","sub_path":"CNN/SampleCNNs/resnet50.py","file_name":"resnet50.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"506292005","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\nimport os\n\npath = './'\n\nfiles = []\n# r=root, d=directories, f = files\nfor r, d, f in os.walk(path):\n for file in f:\n if '.csv' in file:\n files.append(os.path.join(r, file))\n\nfor f in files:\n print(f)\n df = pd.read_csv(f,sep=',')\n try:\n df.plot()\n plt.title(f)\n plt.show()\n except:\n print('No data in',f)","sub_path":"awk/nmon_pandas.py","file_name":"nmon_pandas.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"389990443","text":"from flask import jsonify\nfrom config import *\nfrom entities.SmfTopic import Topic\n\ndef getAll():\n data = session.query(Topic).all()\n return jsonify([i.serialize for i in data])\n\n#Those values are by default, therefore it is created here.\ndef create(id_member, id_msg):\n topic = Topic(approved = 1,id_board = 4,id_first_msg = id_msg,\n id_last_msg = id_msg,id_member_started = id_member,\n id_member_updated = id_member,id_poll = 0,\n id_previous_board = 0,id_previous_topic = 0,\n is_sticky = 0,locked = 0,num_replies = 0,num_views = 0,\n unapproved_posts = 0\n )\n session.add(topic)\n session.commit()\n response = session.query(Topic).filter_by(id_first_msg = id_msg)\n return response[0].id_topic\n","sub_path":"repositories/topic_repository.py","file_name":"topic_repository.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"74807300","text":"import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import ImageGrab\r\nimport pyautogui\r\nimport time\r\nimport keyboard\r\nrun = True\r\nwhile run:\r\n #pyautogui fail safe\r\n pyautogui.PAUSE = 0.1\r\n #end program\r\n if keyboard.is_pressed(\"k\"): \r\n break\r\n wind = np.array(ImageGrab.grab(bbox=(0,0,1365,767)))\r\n wind = cv2.cvtColor(wind, cv2.COLOR_RGB2GRAY)\r\n hero_power = cv2.imread('D:\\\\Users\\\\lobot\\Desktop\\\\pyprojects\\\\macro test\\\\shield.JPG', 0)\r\n game_start = cv2.imread('D:\\\\Users\\\\lobot\\\\Desktop\\\\pyprojects\\\\macro test\\\\start.JPG', 0)\r\n card_warrior = cv2.imread('D:\\\\Users\\\\lobot\\\\Desktop\\\\pyprojects\\\\macro test\\\\minionw.PNG', 0)\r\n card_all = cv2.imread('D:\\\\Users\\\\lobot\\\\Desktop\\\\pyprojects\\\\macro test\\\\minionwil.PNG', 0)\r\n hero = cv2.imread('D:\\\\Users\\\\lobot\\\\Desktop\\\\pyprojects\\\\macro test\\\\warriiorh.JPG', 0)\r\n\r\n #hero power\r\n sift = cv2.xfeatures2d.SIFT_create() \r\n kp_hpower1, des_hpower1 = sift.detectAndCompute(hero_power,None)\r\n kp_hpower2, des_hpower2 = sift.detectAndCompute(wind,None) \r\n bf = cv2.BFMatcher()\r\n matches = bf.knnMatch(des_hpower1,des_hpower2, k=2)\r\n match_pts_hpower = []\r\n for m1, m2 in matches:\r\n if m1.distance < 0.3*m2.distance:\r\n idx = m1.trainIdx\r\n match_pts_hpower.append(kp_hpower2[idx].pt)\r\n \r\n if len(match_pts_hpower) != 0:\r\n match_pts_hpower = np.array(match_pts_hpower)\r\n pyautogui.click(match_pts_hpower[0, 0], match_pts_hpower[0, 1], button = \"left\")\r\n \r\n\r\n #gmae start\r\n sift2 = cv2.xfeatures2d.SIFT_create() \r\n kp_gstart1, des_gstart1 = sift2.detectAndCompute(game_start,None)\r\n kp_gstart2, des_gstart2 = sift2.detectAndCompute(wind,None) \r\n bf2= cv2.BFMatcher()\r\n matches2 = bf2.knnMatch(des_gstart1,des_gstart2, k=2)\r\n match_pts_gstart = []\r\n for m1, m2 in matches2:\r\n if m1.distance < 0.3*m2.distance:\r\n idx = m1.trainIdx\r\n match_pts_gstart.append(kp_gstart2[idx].pt)\r\n \r\n if len(match_pts_gstart) != 0:\r\n match_pts_gstart = np.array(match_pts_gstart)\r\n pyautogui.click(match_pts_gstart[0, 0], match_pts_gstart[0, 1], button = \"left\")\r\n \r\n\r\n #card player\r\n sift3 = cv2.xfeatures2d.SIFT_create() \r\n kp_wcard1, des_wcard1 = sift3.detectAndCompute(card_warrior,None)\r\n kp_wcard2, des_wcard2 = sift3.detectAndCompute(wind,None)\r\n FLANN_INDEX_KDTREE = 0\r\n index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\r\n search_params = dict(checks=50) # or pass empty dictionary\r\n flann = cv2.FlannBasedMatcher(index_params,search_params)\r\n matches3 = flann.knnMatch(des_wcard1,des_wcard2,k=2)\r\n # Need to draw only good matches, so create a mask\r\n matchesMask = []\r\n # ratio test as per Lowe's paper\r\n for m1,m2 in matches3:\r\n if m1.distance < 0.633*m2.distance:\r\n idx = m1.trainIdx\r\n matchesMask.append(kp_wcard2[idx].pt)\r\n \r\n if len(matchesMask) != 0:\r\n matchesMask = np.array(matchesMask)\r\n pyautogui.click(matchesMask[0, 0],matchesMask[0, 1], button = \"left\") \r\n pyautogui.dragTo(matchesMask[0, 0]-200,matchesMask[0, 1]-200, button = \"left\") \r\n\r\n #play card all\r\n sift4 = cv2.xfeatures2d.SIFT_create() \r\n kp_cardall1, des_cardall1 = sift4.detectAndCompute(card_all,None)\r\n kp_cardall2, des_cardall2 = sift4.detectAndCompute(wind,None) \r\n FLANN_INDEX_KDTREE = 0\r\n index_params2 = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\r\n search_params2 = dict(checks=50) # or pass empty dictionary\r\n flann2 = cv2.FlannBasedMatcher(index_params2,search_params2)\r\n matches4 = flann2.knnMatch(des_cardall1,des_cardall2,k=2)\r\n # Need to draw only good matches, so create a mask\r\n matchesMask2 = []\r\n # ratio test as per Lowe's paper\r\n for m1,m2 in matches3:\r\n if m1.distance < 0.633*m2.distance:\r\n idx = m1.trainIdx\r\n matchesMask2.append(kp_wcard2[idx].pt)\r\n \r\n if len(matchesMask2) != 0:\r\n matchesMask2 = np.array(matchesMask2)\r\n pyautogui.click(matchesMask2[0, 0],matchesMask2[0, 1], button = \"left\") \r\n pyautogui.dragTo(matchesMask2[0, 0]-200,matchesMask2[0, 1]-200, button = \"left\") \r\n \r\n #hero click\r\n sift5 = cv2.xfeatures2d.SIFT_create() \r\n kp_hero1, des_hero1 = sift5.detectAndCompute(hero,None)\r\n kp_hero2, des_hero2 = sift5.detectAndCompute(wind,None) \r\n bf3 = cv2.BFMatcher()\r\n matches5 = bf3.knnMatch(des_hero1,des_hero2, k=2)\r\n match_pts_hero = []\r\n for m1, m2 in matches5:\r\n if m1.distance < 0.4*m2.distance:\r\n idx = m1.trainIdx\r\n match_pts_hero.append(kp_hero2[idx].pt)\r\n \r\n if len(match_pts_hero) != 0:\r\n match_pts_hero = np.array(match_pts_hero)\r\n pyautogui.click(match_pts_hero[0, 0], match_pts_hero[0, 1], button = \"left\") \r\n \r\n\r\n\r\ncv2.destroyAllWindows()\r\n","sub_path":"hearthmacro4.py","file_name":"hearthmacro4.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"143990427","text":"# danawa.py\n\nimport time\nimport re\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver # selenium은 webdriver api를 통해 브라우저를 제어함\nfrom selenium.webdriver.common.by import By #https://www.seleniumhq.org/docs/03_webdriver.jsp#locating-ui-elements-webelements\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nfrom selenium.webdriver.support import expected_conditions as EC\n\nmain_url = 'http://www.danawa.com/'\nkeyword =input(\"제품명 입력: \")\ndriver=webdriver.Chrome(\"C:/driver/chromedriver.exe\")\ndriver.get(main_url)\n\ndriver.implicitly_wait(10)\n\n# 입력란 찾기 #AKCSearch # id인지, name 인지 ㅎㅎㅎ \nsearch=driver.find_element_by_id(\"AKCSearch\")\nsearch.clear()\nsearch.send_keys(keyword)\n#search.send_keys(Keys.RETURN)\n# search.submit()\n\n# 검색 버튼 클릭\nbtn_search = driver.find_element_by_css_selector(\"button.btn_search_submit\")\nbtn_search.click()\n\n# driver.find_element_by_css_selector(\"flights-search-controls-root > div > div > form > div:nth-child(3) > button\").click()\n\n'''\ntry:\n element = WebDriverWait(driver, 3).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"oTravelBox\"))\n )\nexcept Exception as e:\n print(\"검색 page 로드시 class 속성이 oTravelBox를 얻으려는 중 예외 발생 : \", e)\n'''\n# 목록 더보기 스크롤링 \n# driver.find_element_by_css_selector(\"div.oTravelBox > ul > li.moreBtnWrap > button\").click()\n# \n# 1\n\n'''\n가격 설정 : 200,000 ~ 500,000원\n추천순 max\n1~3 페이지의 기본 정보 가져오기 : 이미지, 가격, 상품명 등\n'''\n# 최소 가격 설정\nminVal=int(input(\"시작가 :\"))\nmaxVal=int(input(\"한도가 :\"))\nminPrice=driver.find_element_by_id(\"priceRangeMinPrice\")\nminPrice.clear()\nminPrice.send_keys(minVal)\n\n\n# 최대 가격 설정\nmaxPrice=driver.find_element_by_id(\"priceRangeMaxPrice\")\nmaxPrice.clear()\nmaxPrice.send_keys(maxVal)\n\nprice_search = driver.find_element_by_css_selector(\"button.btn_search\")\nprice_search.click()\n\ntry:\n \n for page in range(1, 2):\n driver.execute_script(\"getPage(%s); return false;\" % page)\n\n \n time.sleep(3)\n \n print(\"%s 페이지로 이동!!!\" % page)\n\n\n soup = BeautifulSoup(driver.page_source, \"lxml\" ) \n\n ppItems = soup.select(\"div.prod_pricelist\")\n \n\n for data in ppItems: \n proPrice = data.select(\"p a\")[0].text\n print('가격 :', proPrice)\n \n\nexcept Exception as e:\n print(\"---페이지 파싱 에러\", e)\n # finally:\n time.sleep(3)\n #driver.close()","sub_path":"크롤링프로젝트/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"602364426","text":"from django.conf.urls import url\nfrom django.urls import path\nfrom Test_Task.task_app import views\n\n\nurlpatterns = [\n path('', views.loginIndex, name='login'),\n path('main_page/', views.main_page),\n path('submit_fun/', views.submit_fun, name='files'),\n path('filesmryget/', views.filesummary, name='filesmrydata'),\n path('loginpswd/', views.loginpswd, name='loginpswd'),\n\n\n\n]\n","sub_path":"Test_Task/task_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"209211251","text":"import sys\r\n\r\nk = int(input())\r\nl = int(input())\r\nm = int(input())\r\nn = int(input())\r\nd = int(input())\r\n\r\nresult = []\r\n\r\ncount_k = []\r\ncount_l = []\r\ncount_m = []\r\ncount_n = []\r\n\r\n\r\ncount_k = list(range(k-1,d, k))\r\n\r\ncount_l = list(range(l-1,d, l))\r\n\r\ncount_m = list(range(m-1,d, m))\r\n\r\ncount_n = list(range(n-1,d, n))\r\n\r\ndebug = \"count_k = %d\\n , count_l = %d\\n, count_m = %d\\n, count_n = %d\\n\" % (len(count_k), len(count_l), len(count_m), len(count_n))\r\n#print(debug)\r\nall_count = len(list(set(count_k+count_l+count_m+count_n)))\r\nprint(all_count)\r\n","sub_path":"drakons.py","file_name":"drakons.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"584815311","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/twsfolders/_compat.py\n# Compiled at: 2014-07-04 02:20:56\nimport sys\nPY3 = sys.version_info[0] == 3\nif PY3:\n string_types = (\n str,)\n integer_types = (int,)\n text_type = str\nelse:\n string_types = (\n basestring,)\n integer_types = (int, long)\n text_type = unicode\n\ndef is_string(value):\n if isinstance(value, string_types):\n return True\n else:\n return False\n\n\ndef is_text(value):\n if isinstance(value, text_type):\n return True\n else:\n return False\n\n\ndef is_integer(value):\n if isinstance(value, integer_types):\n return True\n else:\n return False","sub_path":"pycfiles/twsfolders-0.1.2-py2.7/_compat.py","file_name":"_compat.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"551319146","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass PII(Model):\n \"\"\"Personal Identifier Information details.\n\n :param email:\n :type email:\n list[~azure.cognitiveservices.vision.contentmoderator.models.Email]\n :param ipa:\n :type ipa:\n list[~azure.cognitiveservices.vision.contentmoderator.models.IPA]\n :param phone:\n :type phone:\n list[~azure.cognitiveservices.vision.contentmoderator.models.Phone]\n :param address:\n :type address:\n list[~azure.cognitiveservices.vision.contentmoderator.models.Address]\n \"\"\"\n\n _attribute_map = {\n 'email': {'key': 'Email', 'type': '[Email]'},\n 'ipa': {'key': 'IPA', 'type': '[IPA]'},\n 'phone': {'key': 'Phone', 'type': '[Phone]'},\n 'address': {'key': 'Address', 'type': '[Address]'},\n }\n\n def __init__(self, email=None, ipa=None, phone=None, address=None):\n super(PII, self).__init__()\n self.email = email\n self.ipa = ipa\n self.phone = phone\n self.address = address\n","sub_path":"azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/models/pii.py","file_name":"pii.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"470385474","text":"from __future__ import absolute_import\nfrom engines import Engine\nfrom copy import deepcopy\nimport time\nfrom math import sqrt, log\nfrom reversi import winner\nimport random\n\n# import pickle\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom board import Board\n\nmainColor = -1 #caclue V as -1\n\nS = dict()\n\nsideAngleValue = [\n [100, -5, 10, 5, 5, 10, -5,100],\n [ -5,-45, 1, 1, 1, 1,-45, -5],\n [ 10, 1, 3, 2, 2, 3, 1, 10],\n [ 5, 1, 2, 1, 1, 2, 1, 5],\n [ 5, 1, 2, 1, 1, 2, 1, 5],\n [ 10, 1, 3, 2, 2, 3, 1, 10],\n [ -5,-45, 1, 1, 1, 1,-45, -5],\n [100, -5, 10, 5, 5, 10, -5,100]\n]\n\nlearning_rate=0.3\nreward_decay=0.9 \ne_greedy=0.5\n\ndef board_hash(board, color):\n s = \"\"\n for x in range(4):\n total = 0\n for y in range(8):\n total = total * 3 + board.pieces[2*x][y] + 1\n for y in range(8):\n total = total * 3 + board.pieces[2*x+1][y] + 1\n s += str(total)\n\n s += str(color + 1)\n return s\n\ndef create_and_link_node(action, parentHashValue, color, board):\n hashValue = board_hash(board, color)\n\n if not hashValue in S:\n S[hashValue] = Node(board, color, hashValue)\n # print(\"sad\")\n # else:\n # print(\"lucky\")\n\n S[parentHashValue].childrens[hashValue] = action\n S[hashValue].linkToParent(action, parentHashValue)\n # print(S)\n # print(hashValue)\n \n return S[hashValue]\n\ndef create_node(color, board):\n hashValue = board_hash(board, color)\n if not hashValue in S:\n S[hashValue] = Node(board, color, hashValue)\n # S[hashValue].exapnd()\n return S[hashValue]\n\nclass Node:\n def __init__(self, board, color, boardHashValue):\n self.parents = {}\n \n self.childrens = {}\n\n self.board = deepcopy(board)\n self.remain_action = self.board.get_legal_moves(color)\n\n self.hashValue = boardHashValue\n self.color = color #color go now\n\n # self.important = False\n \n self.V = 0\n\n def exapnd(self):\n for action in self.remain_action:\n self._append(action)\n\n def _get_result(self):\n if winner(self.board)[0] == 0:\n return 0\n elif winner(self.board)[0] == mainColor:\n return 1\n else:\n return -1 \n\n def get_board(self):\n return deepcopy(self.board)\n\n # need rewrite\n def _get_best_action(self):\n best_children = max(self.childrens.items(), key=lambda child: S[child[0]].V)\n\n # print(\"now succeed rate:\")\n # print(S[best_children[0]].V)\n # print(len(S))\n # S[best_children[0]].board.my_display()\n\n return best_children[1]\n\n def _get_worst_action(self):\n best_children = min(self.childrens.items(), key=lambda child: S[child[0]].V)\n\n # print(\"now succeed rate:\")\n # print(1-S[best_children[0]].V)\n # print(len(S))\n # S[best_children[0]].board.my_display()\n\n return best_children[1]\n\n def _eval_r(self,action):\n board = self.get_board()\n board.execute_move(action, self.color)\n\n reward = 0\n for x in range(8):\n for y in range(8):\n if board[x][y]==self.color:\n reward += sideAngleValue[x][y]\n\n return reward\n \n\n def _tree_policy(self):\n if self.remain_action:\n if random.random() < e_greedy:\n return max( self.remain_action, key=lambda action: self._eval_r(action))\n # return max( self.remain_action, key=lambda chessPieces: sideAngleValue[chessPieces[0]][chessPieces[1]])\n # return max( self.remain_action, key=lambda chessPieces: sideAngleValue[chessPieces[0]][chessPieces[1]])\n else:\n return random.choice(self.remain_action)\n else:\n return None\n\n def _simulateOnce(self):\n action = self._tree_policy()\n if action is not None:\n childrenNode = self._append(action)\n childrenNode._simulateOnce()\n else:\n return True\n return False\n\n def linkToParent(self, action, parentHashValue):\n self.parents[parentHashValue] = action\n if len(self.remain_action)==0 and len(self.childrens)==0:\n self.V = self._get_result()\n # self._end_back_forward(result)\n self._back_forward()\n # else:\n # self._back_forward()\n # return\n\n # def _end_back_forward(self, reward):\n # parents = self.parents\n # if len(parents) > 0:\n # for hashValue in parents:\n\n # action = S[hashValue].childrens[self.hashValue]\n # if action in S[hashValue].remain_action:\n # S[hashValue].remain_action.remove(action)\n\n # S[hashValue].V = reward\n # S[hashValue]._back_forward()\n\n\n # there is something wrong\n\n def _back_forward(self):\n isTerminal = len(self.remain_action)==0\n parents = self.parents\n if len(parents) > 0:\n for hashValue in parents:\n\n if isTerminal:\n action = S[hashValue].childrens[self.hashValue]\n if action in S[hashValue].remain_action:\n S[hashValue].remain_action.remove(action)\n\n if len(S[hashValue].childrens)==1 and len(S[hashValue].remain_action)==0:\n S[hashValue].V = self.V\n else:\n predict = S[hashValue].V\n maxHashValue = max( S[hashValue].childrens.items(), key=lambda child: S[child[0]].V)[0]\n\n target = reward_decay * S[maxHashValue].V\n\n S[hashValue].V += learning_rate * (target - predict)\n S[hashValue]._back_forward()\n\n\n\n def _append(self, action):\n board = self.get_board()\n board.execute_move(action, self.color)\n return create_and_link_node(action, self.hashValue, self.color*-1, board)\n\n \ndef nodeToDict(N):\n D = {}\n D[\"V\"] = N.V\n D[\"CO\"] = N.color\n D[\"B\"] = N.board.pieces\n D[\"R\"] = N.remain_action\n D[\"H\"] = N.hashValue\n D[\"P\"] = N.parents\n D[\"CH\"] = N.childrens\n\n return D\n\ndef dictToS(D):\n\n hashValue = D[\"H\"]\n board = Board()\n board.pieces = D[\"B\"]\n\n\n S[hashValue] = Node(board, D[\"CO\"], hashValue)\n S[hashValue].V = D[\"V\"]\n S[hashValue].remain_action = D[\"R\"]\n S[hashValue].hashValue = hashValue\n S[hashValue].parents = D[\"P\"]\n S[hashValue].childrens = D[\"CH\"]\n\n\nclass MCTSEngine(Engine):\n def get_move(self, board, color, move_num=None, time_remaining=None, time_opponent=None):\n # player = {-1 : \"Black\", 1 : \"White\"}\n # print(player[color] + \" go now\")\n self.color = color\n if time_remaining > 200 or time_remaining is None:\n time = 55\n else:\n print(\"time will run off\")\n time = time_remaining / 2\n return self.UCT_search(board, time)\n\n def UCT_search(self, board, cal_time):\n root = create_node(self.color, board)\n\n begin = time.time()\n count = 0\n while time.time() - begin < cal_time:\n # print(\"simulateOnce\")\n isOver = root._simulateOnce()\n if isOver:\n # print(\"over\")\n break\n count += 1\n print (\"q learning cacluted \" + str(count) + \" times\")\n if mainColor == self.color:\n return root._get_best_action()\n else:\n return root._get_worst_action()\n\n\n # def __init__(self):\n # print(\"start load\")\n # f = open(\"./temp9\",\"rb\")\n # p = pickle.load(f)\n # for key in p:\n # dictToS(p[key])\n # f.close()\n # print(\"load cache, length:\" + str(len(S)))\n \n # f = open(\"out\", \"w\")\n # for hashValue in S:\n # s = S[hashValue]\n # print(s.hashValue, file = f)\n # print(s.board.toString(), file = f)\n # print(\"parents: \" + str(s.parents), file = f)\n # print(\"childrens: \" + str(s.childrens), file = f)\n # # print(\"V: \" + str(s.V), file = f)\n # print(\"remain_action: \" + str(s.remain_action), file = f)\n # print(\"\\n\\n\", file = f)\n # print(\"end\")\n # f.close()\n # while True:\n # pass\n\n # def close(self):\n # p = {}\n # for hashValue in S:\n # p[hashValue] = nodeToDict(S[hashValue])\n # f = open(\"./temp12\",\"wb\")\n # pickle.dump(p,f,2)\n # f.close()\n # print(\"write cache, length:\" + str(len(p)))\n\n # f = open(\"out\", \"w\")\n # for hashValue in S:\n # s = S[hashValue]\n # print(s.hashValue, file = f)\n # print(s.board.toString(), file = f)\n # print(\"parents: \" + str(s.parents), file = f)\n # print(\"childrens: \" + str(s.childrens), file = f)\n # print(\"V: \" + str(s.V), file = f)\n # print(\"remain_action: \" + str(s.remain_action), file = f)\n # print(\"\\n\\n\", file = f)\n # print(\"end\")\n # f.close()\n# \nengine = MCTSEngine","sub_path":"engines/Qlearning.py","file_name":"Qlearning.py","file_ext":"py","file_size_in_byte":9198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"118985268","text":"from config import *\nfrom time import time\nfrom bomb import Bomb\n\n\nclass Boss:\n def __init__(self):\n self.x = SCREEN_COLS // 2\n self.y = BOSS_Y\n self.strength = BOSS_HEALTH\n self.last_bomb = time()\n self.brick_left = 2\n\n def move(self, paddle):\n if self.x == paddle.x:\n self.x += 0\n elif self.x < paddle.x:\n self.x += BOSS_VEL\n elif self.x > paddle.x:\n self.x -= BOSS_VEL\n\n if round(self.x - BOSS_WIDTH // 2) <= 0:\n self.x = 1 + BOSS_WIDTH // 2\n if round(self.x + BOSS_WIDTH // 2) >= SCREEN_COLS - 1:\n self.x = SCREEN_COLS - 2 - BOSS_WIDTH // 2\n if time() - self.last_bomb >= BOSS_BOMB_TIMEOUT:\n self.last_bomb = time()\n return Bomb(self.x)\n return None\n\n def before_brick(self, ball, brick):\n if ball.y_velocity > 0:\n return True\n return False","sub_path":"boss.py","file_name":"boss.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"650508701","text":"#!/usr/bin/env/python\n\nd = {\n 'A': 'AAA',\n 'B': 'BBB',\n 'C': 'CCC',\n}\n\ndef f(elt):\n if elt in d:\n return(d[elt])\n else:\n return(\"Undef\")\n\n\n#lst = ['B', 'A', 'C', 'A']\nlst = ['B', 'A', 'C', 'A', 'D']\n\nprint(d)\n\nprint(lst)\n\n#x = [d[elt] for elt in lst]\nx = [f(elt) for elt in lst]\n\nprint(x)","sub_path":"python/scripts/divers/apply_dict_to_list.py","file_name":"apply_dict_to_list.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"371807244","text":"\"\"\"Constants used by the Eero integration.\"\"\"\nfrom .api.const import (\n ACTIVITY_ADBLOCK_DAY,\n ACTIVITY_ADBLOCK_MONTH,\n ACTIVITY_ADBLOCK_WEEK,\n ACTIVITY_BLOCKED_DAY,\n ACTIVITY_BLOCKED_MONTH,\n ACTIVITY_BLOCKED_WEEK,\n ACTIVITY_DATA_USAGE_DAY,\n ACTIVITY_DATA_USAGE_MONTH,\n ACTIVITY_DATA_USAGE_WEEK,\n ACTIVITY_INSPECTED_DAY,\n ACTIVITY_INSPECTED_MONTH,\n ACTIVITY_INSPECTED_WEEK,\n)\n\n\nACTIVITY_MAP_TO_EERO = {\n \"Ad Blocks (Day)\": ACTIVITY_ADBLOCK_DAY,\n \"Ad Blocks (Week)\": ACTIVITY_ADBLOCK_WEEK,\n \"Ad Blocks (Month)\": ACTIVITY_ADBLOCK_MONTH,\n \"Data Usage (Day)\": ACTIVITY_DATA_USAGE_DAY,\n \"Data Usage (Week)\": ACTIVITY_DATA_USAGE_WEEK,\n \"Data Usage (Month)\": ACTIVITY_DATA_USAGE_MONTH,\n \"Scans (Day)\": ACTIVITY_INSPECTED_DAY,\n \"Scans (Week)\": ACTIVITY_INSPECTED_WEEK,\n \"Scans (Month)\": ACTIVITY_INSPECTED_MONTH,\n \"Threat Blocks (Day)\": ACTIVITY_BLOCKED_DAY,\n \"Threat Blocks (Week)\": ACTIVITY_BLOCKED_WEEK,\n \"Threat Blocks (Month)\": ACTIVITY_BLOCKED_MONTH,\n}\nACTIVITY_MAP_TO_HASS = {v: k for k, v in ACTIVITY_MAP_TO_EERO.items()}\n\nALL_ACTIVITIES = list(ACTIVITY_MAP_TO_EERO.keys())\nDATA_USAGE_ACTIVITIES = [key for key, value in ACTIVITY_MAP_TO_EERO.items() if value in [ACTIVITY_DATA_USAGE_DAY, ACTIVITY_DATA_USAGE_MONTH, ACTIVITY_DATA_USAGE_WEEK]]\n\nATTR_DNS_CACHING_ENABLED = \"dns_caching_enabled\"\nATTR_IPV6_ENABLED = \"ipv6_enabled\"\nATTR_TARGET_EERO = \"target_eero\"\nATTR_TARGET_NETWORK = \"target_network\"\nATTR_THREAD_ENABLED = \"thread_enabled\"\nATTR_TIME_OFF = \"time_off\"\nATTR_TIME_ON = \"time_on\"\n\nATTRIBUTION = \"Data provided by Eero\"\n\nCONF_ACTIVITY = \"activity\"\nCONF_ACTIVITY_CLIENTS = \"clients\"\nCONF_ACTIVITY_EEROS = \"eeros\"\nCONF_ACTIVITY_NETWORK = \"network\"\nCONF_ACTIVITY_PROFILES = \"profiles\"\nCONF_CLIENTS = \"clients\"\nCONF_CODE = \"code\"\nCONF_EEROS = \"eeros\"\nCONF_LOGIN = \"login\"\nCONF_NAME = \"name\"\nCONF_NETWORKS = \"networks\"\nCONF_PROFILES = \"profiles\"\nCONF_USER_TOKEN = \"user_token\"\nCONF_WIRED_CLIENTS = \"wired_clients\"\nCONF_WIRELESS_CLIENTS = \"wireless_clients\"\n\nDATA_COORDINATOR = \"coordinator\"\n\nDOMAIN = \"eero\"\n\nERROR_TIME_FORMAT = \"Time {} should be format 'HH:MM'\"\n\nMANUFACTURER = \"eero\"\n\nMODEL_CLIENT = \"Client\"\nMODEL_EERO = \"eero\"\nMODEL_NETWORK = \"Network\"\nMODEL_PROFILE = \"Profile\"\n\nNIGHTLIGHT_MODE_DISABLED = \"disabled\"\nNIGHTLIGHT_MODE_AMBIENT = \"ambient\"\nNIGHTLIGHT_MODE_SCHEDULE = \"schedule\"\nNIGHTLIGHT_MODES = [\n NIGHTLIGHT_MODE_DISABLED,\n NIGHTLIGHT_MODE_AMBIENT,\n NIGHTLIGHT_MODE_SCHEDULE,\n]\n\nSERVICE_ENABLE_DNS_CACHING = \"enable_dns_caching\"\nSERVICE_ENABLE_IPV6 = \"enable_ipv6\"\nSERVICE_ENABLE_THREAD = \"enable_thread\"\nSERVICE_RESTART_EERO = \"restart_eero\"\nSERVICE_RESTART_NETWORK = \"restart_network\"\nSERVICE_SET_NIGHTLIGHT_MODE = \"set_nightlight_mode\"\n\nUNDO_UPDATE_LISTENER = \"undo_update_listener\"\n\nCONF_SAVE_RESPONSES = \"save_responses\"\nCONF_TIMEOUT = \"timeout\"\n\nVALUES_SCAN_INTERVAL = [30, 60, 120, 300, 600]\nVALUES_TIMEOUT = [10, 15, 30, 45, 60]\n\nDEFAULT_SAVE_LOCATION = \"/config/custom_components/eero/api/responses\"\nDEFAULT_SAVE_RESPONSES = False\nDEFAULT_SCAN_INTERVAL = VALUES_SCAN_INTERVAL[2]\nDEFAULT_TIMEOUT = VALUES_TIMEOUT[0]\n\nEERO_LOGO_ICON = \"/config/custom_components/eero/logo_icon.png\"\n","sub_path":"custom_components/eero/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"228399302","text":"import csv\nimport re\nimport sys\nimport time\nfrom concurrent.futures import TimeoutError\n\nimport pandas as pd\nimport phonenumbers\nimport psycopg2\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pebble import ProcessPool\nfrom pyisemail import is_email\nfrom selenium import webdriver\nfrom url_normalize import url_normalize\nfrom usp.tree import sitemap_tree_for_homepage\nfrom usp.web_client.requests_client import RequestsWebClient\n\n\nclass _RequestsWebClient(RequestsWebClient):\n __USER_AGENT = 'Mozilla/5.0'\n\n\nclass DomainsAndSubdomains(object):\n words_for_shop = [\n 'checkout', 'shopping cart', 'warenkorb', 'korb', 'basket'\n ]\n\n words_for_goods = [\n 'goods', 'produ', 'commodity', 'ware', 'item', 'article', 'artikel', 'objekte', 'object',\n 'dienstleistungen', 'services', 'service', 'bedienung', 'CHF', 'buy', 'franken', 'pay'\n ]\n\n words_for_company_leader = [\n 'leader', 'head', 'chief', 'Leiter', 'Chef', 'Geschäftsführer', 'Geschäftsleitung', 'führer', 'Director'\n ]\n\n words_for_company_team = [\n 'team', 'staff', 'personnel', 'mitarbeiter', 'Ueber_uns', 'ber uns', 'ber_uns', 'about us', 'about_us',\n 'kontakt', 'contact', 'contatti', 'firma'\n ]\n\n def __init__(self, file, mode='1', timeout=3600): # noqa\n self.file = file\n self.result_file = f'result_{self.file}'\n self.mode = mode\n self.domains = list()\n self.buffer = list()\n self.timeout = timeout\n self.headers = {'User-Agent': 'Mozilla/5.0'}\n\n def get_domains(self):\n \"\"\"get url and other data from file\"\"\"\n\n # open and read the file\n df = pd.read_excel(self.file, engine='openpyxl')\n self.domains = df.to_dict(orient='record')\n\n # create output file\n columns = list(df.columns.values)\n columns.append('shop (Yes/No)')\n columns.append('number of products')\n columns.append('shop-domain')\n columns.append('phone')\n columns.append('phone_main_page')\n columns.append('leader_phone_without_sitemap')\n columns.append('phones_all_pages')\n columns.append('leader_phone_sitemap')\n columns.append('leader_phone_from_team_sitemap')\n columns.append('email')\n columns.append('email_main_page')\n columns.append('leader_email_without_sitemap')\n columns.append('emails_all_pages')\n columns.append('leader_email_sitemap')\n columns.append('leader_email_from_team_sitemap')\n\n # with csv lib\n with open(self.result_file, \"w\", newline=\"\", encoding='UTF-8') as f:\n writer = csv.writer(f)\n writer.writerows([columns])\n\n @staticmethod\n def clear_url(target):\n \"\"\"tidy up url\"\"\"\n return re.sub('.*www\\.', '', target, 1).split('/')[0].strip() # noqa\n\n def normalize_urls_list(self, common_list):\n \"\"\"tidy up the url list\"\"\"\n lst = []\n for dom in common_list:\n dom = dom.replace('www.', '').replace('http://', '').replace('https://', '')\n lst.append(url_normalize(dom))\n lst = list(set(lst))\n final_list = []\n for link in lst:\n try:\n req = requests.get(link, headers=self.headers)\n if req.status_code == 200:\n final_list.append(link)\n except Exception as e:\n print(f'normalize_urls_list: {e}')\n return lst\n\n @staticmethod\n def get_sitemap_tree(common_list):\n \"\"\"get all links from sitemap\"\"\"\n sitemap_tree = []\n for link in common_list:\n web_client = _RequestsWebClient()\n tree = sitemap_tree_for_homepage(link, web_client)\n for page in tree.all_pages():\n sitemap_tree.append(page.url)\n return sitemap_tree\n\n def find_phone_by_keyword(self, url, word):\n \"\"\"looking for a phone number on the page by keyword\"\"\"\n phone_list = []\n try:\n html = requests.get(url, headers=self.headers).text\n soup = BeautifulSoup(html, 'lxml')\n phone = soup.find(text=re.compile(word)).parent # TODO\n for match in phonenumbers.PhoneNumberMatcher(str(phone), \"CH\"):\n result = str(match).split(sep=') ', maxsplit=1)[1]\n if result:\n phone_list.append(result)\n if not phone_list:\n for match in phonenumbers.PhoneNumberMatcher(\n str(soup.find(text=re.compile(word)).parent.parent), \"CH\"): # noqa\n result = str(match).split(sep=') ', maxsplit=1)[1]\n if result:\n phone_list.append(result)\n return phone_list\n except Exception as e: # noqa\n return phone_list\n\n def find_email_by_keyword(self, url, word):\n \"\"\"looking for a email on the page by keyword\"\"\"\n email_list = []\n try:\n html = requests.get(url, headers=self.headers).text\n soup = BeautifulSoup(html, 'lxml')\n tag = soup.find(text=re.compile(word)).parent.parent # TODO\n email = tag.find(text=re.compile(r'[\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+')) # noqa\n if email is not None:\n email = email.strip()\n if self.check_email_valid(email) is True:\n email_list.append(email)\n else:\n result = email.split(sep=' ')\n result = [word for word in result if '@' in word]\n if self.check_email_valid(result[0]) is True:\n email_list.append(result[0])\n return email_list\n except Exception as e: # noqa\n print(f'find_email_by_keyword: {e}')\n return email_list\n\n def get_leader_phone_and_email_from_sitemap(self, sitemap_tree):\n \"\"\"looking for the phone number and the email of the head of the company\"\"\"\n leader_phone = []\n leader_email = []\n try:\n for url in sitemap_tree:\n for word in self.words_for_company_leader:\n if word in url:\n leader_phone.append(self.find_phone_by_keyword(url, word))\n leader_email.append(self.find_email_by_keyword(url, word))\n except Exception as e: # noqa\n print(f'get_leader_phone_and_email_from_sitemap: {e}')\n phones = [j for i in leader_phone for j in i]\n phones = self.unique_phones(phones)\n emails = [j for i in leader_email for j in i]\n emails = self.unique_emails(emails)\n return phones, emails\n\n def get_leader_phone_and_email_from_sitemap_section_team(self, sitemap_tree):\n \"\"\"looking for leader phone and email number in team section\"\"\"\n leader_phone_from_team = []\n leader_email_from_team = []\n try:\n for url in sitemap_tree:\n for word in self.words_for_company_team:\n if word in url:\n leader_phone_from_team.append(\n self.find_phones(requests.get(url, headers=self.headers).text, leader=True)\n )\n leader_email_from_team.append(\n self.find_emails(requests.get(url, headers=self.headers).text, leader=True)\n )\n except Exception as e: # noqa\n print(f'get_leader_phone_and_email_from_sitemap_section_team: {e}')\n phones = [j for i in leader_phone_from_team for j in i]\n phones = self.unique_phones(phones)\n emails = [j for i in leader_email_from_team for j in i]\n emails = self.unique_emails(emails)\n return phones, emails\n\n def check_phones_emails_on_every_page_and_count_the_quantity_of_goods(self, sitemap_tree):\n \"\"\"find the number of products (counting the number of keywords in the links found in the sitemap)\n and looking for phones, emails on each page\"\"\"\n counter = 0\n all_pages_phone = []\n all_pages_emails = []\n\n # 1 way: counting the number of products according to the sitemap links\n urls_list = str(sitemap_tree)\n\n if self.mode == '1':\n for word in self.words_for_goods:\n counter += urls_list.count(word)\n\n # 2 way: follow each link in sitemap and check keywords on each page\n # (if no goods were found in the way 1)\n # (using requests, since with selenium it will take much longer)\n if self.mode == '2':\n for word in self.words_for_goods:\n counter += urls_list.count(word)\n if counter == 0:\n counter, all_pages_phone, all_pages_emails = self.check_every_page(sitemap_tree)\n\n # 3 way: follow each link in sitemap and check keywords on each page (anyway)\n if self.mode == '3':\n counter, all_pages_phone, all_pages_emails = self.check_every_page(sitemap_tree)\n\n return counter, all_pages_phone, all_pages_emails\n\n def check_every_page(self, sitemap_tree):\n \"\"\"check each page for product availability (by keywords)\"\"\"\n counter = 0\n phones = []\n emails = []\n for url in sitemap_tree:\n try:\n response = requests.get(url, headers=self.headers)\n text = response.text\n for word in self.words_for_goods:\n counter += text.count(word)\n phones.append(self.find_phones(text, leader=True))\n emails.append(self.find_emails(text, leader=True))\n except Exception as e:\n print(f'check_every_page: {e}')\n phones = [j for i in phones for j in i]\n phones = self.unique_phones(phones)\n emails = [j for i in emails for j in i]\n emails = self.unique_emails(emails)\n return counter, phones, emails\n\n def find_phones(self, text, leader=False):\n \"\"\"the method searches for phone numbers on the page\"\"\"\n phones = list()\n try:\n if leader is False:\n for match in phonenumbers.PhoneNumberMatcher(text, \"CH\"):\n phone = str(match).split(sep=') ', maxsplit=1)[1]\n if phone:\n phones.append(phone)\n if leader is True:\n soup = BeautifulSoup(text, 'lxml')\n for word in self.words_for_company_leader:\n if word in str(soup):\n try:\n for match in phonenumbers.PhoneNumberMatcher(str(soup.find(text=re.compile(word)).parent), \"CH\"): # noqa TODO\n result = str(match).split(sep=') ', maxsplit=1)[1]\n if result:\n phones.append(result)\n except Exception: # noqa\n continue\n if not phones: # noqa\n for word in self.words_for_company_leader:\n if word in str(soup):\n try:\n for match in phonenumbers.PhoneNumberMatcher(str(soup.find(text=re.compile(word)).parent.parent), \"CH\"): # noqa TODO\n result = str(match).split(sep=') ', maxsplit=1)[1]\n if result:\n phones.append(result)\n except Exception: # noqa\n continue\n phones = self.unique_phones(phones)\n except Exception as e:\n print(f'find_phones: {e}')\n return phones\n\n def find_emails(self, text, leader=False):\n \"\"\"the method searches for email on the page\"\"\"\n emails = list()\n try:\n soup = BeautifulSoup(text, 'lxml')\n if leader is False:\n results = soup.findAll(text=re.compile(r'[\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+')) # noqa\n for email in results:\n if email is not None:\n email = email.strip()\n if self.check_email_valid(email) is True:\n emails.append(email)\n else:\n result = email.split(sep=' ')\n result = [word for word in result if '@' in word]\n if self.check_email_valid(result[0]) is True:\n emails.append(result[0])\n if leader is True:\n for word in self.words_for_company_leader:\n if word in str(soup):\n try:\n tag = soup.find(text=re.compile(word)).parent # TODO\n email = tag.find(text=re.compile(r'[\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+')) # noqa\n if email is not None:\n email = email.strip()\n if self.check_email_valid(email) is True:\n emails.append(email)\n else:\n result = email.split(sep=' ')\n result = [word for word in result if '@' in word]\n if self.check_email_valid(result[0]) is True:\n emails.append(result[0])\n except Exception: # noqa\n continue\n if not emails:\n for word in self.words_for_company_leader:\n if word in str(soup):\n try:\n tag = soup.find(text=re.compile(word)).parent.parent # TODO\n email = tag.find(text=re.compile(r'[\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+')) # noqa\n if email is not None:\n email = email.strip()\n if self.check_email_valid(email) is True:\n emails.append(email)\n else:\n result = email.split(sep=' ')\n result = [word for word in result if '@' in word]\n if self.check_email_valid(result[0]) is True:\n emails.append(result[0])\n except Exception: # noqa\n continue\n emails = self.unique_emails(emails) # noqa\n return emails\n except Exception as e:\n print(f'find_emails: {e}')\n return emails\n\n def task_done(self, future):\n \"\"\"this is needed to handle the timeout in multithreaded mode\"\"\" # noqa\n try:\n result = future.result() # noqa (blocks until results are ready)\n except TimeoutError as error:\n self.buffer.append(future.item)\n print(\"Function took longer than %d seconds\" % error.args[1])\n except Exception as error:\n self.buffer.append(future.item)\n print(\"Function raised %s\" % error)\n\n def start(self):\n \"\"\"start of the program\"\"\"\n # get domains from file\n self.get_domains()\n\n # create a pool for multi-threaded processing\n with ProcessPool(max_workers=5, max_tasks=10) as pool:\n for i in self.domains:\n future = pool.schedule(self.check_domain, args=[i], timeout=self.timeout)\n future.item = i\n future.add_done_callback(self.task_done)\n\n # add objects to the database with which a connection could not be established\n try:\n self.run_buffer()\n except Exception as e:\n print(f'run_buffer error: {e}')\n\n def run_buffer(self):\n \"\"\"add objects to the database with which a connection could not be established\"\"\"\n for item in self.buffer:\n main_page_phone = ''\n all_pages_phone = ''\n leader_phone = ''\n leader_phone_from_team = ''\n main_page_email = ''\n all_pages_email = ''\n leader_email = ''\n leader_email_from_team = ''\n domain = str(item['Internet-Adresse']) # noqa\n is_shop = False\n leader_phone_without_sitemap = ''\n leader_email_without_sitemap = ''\n\n if 'shop' in domain or 'store' in domain:\n is_shop = True\n\n try:\n is_shop, main_page_phone, main_page_email, phone, email = self.is_shop_and_main_page(domain, is_shop)\n leader_phone_without_sitemap = phone\n leader_email_without_sitemap = email\n except Exception as e:\n is_shop = False\n print(f'start: {e}')\n\n phone, leader_phone_without_sitemap, leader_phone, leader_phone_from_team, main_page_phone,\\\n all_pages_phone = self.phone(\n leader_phone_without_sitemap=leader_phone_without_sitemap, leader_phone=leader_phone,\n leader_phone_from_team=leader_phone_from_team,\n main_page_phone=main_page_phone, all_pages_phone=all_pages_phone\n )\n\n email, leader_email_without_sitemap, leader_email, leader_email_from_team, main_page_email,\\\n all_pages_email = self.email(\n leader_email_without_sitemap=leader_email_without_sitemap, leader_email=leader_email,\n leader_email_from_team=leader_email_from_team,\n main_page_email=main_page_email, all_pages_email=all_pages_email\n )\n\n self.write_to_file(\n item, is_shop=is_shop, number_of_goods=0, shop_domain='', phone=phone, main_page_phone=main_page_phone,\n leader_phone_without_sitemap=leader_phone_without_sitemap,\n all_pages_phone=all_pages_phone, leader_phone=leader_phone,\n leader_phone_from_team=leader_phone_from_team, email=email,\n leader_email_without_sitemap=leader_email_without_sitemap, leader_email=leader_email,\n leader_email_from_team=leader_email_from_team, main_page_email=main_page_email,\n all_pages_email=all_pages_email\n )\n self.open_db()\n self.cur.execute(\n \"\"\"INSERT INTO Domains_and_subdomains (\n DUNS, Handelsregister_Nummer, UID, Internet_Adresse, subdomains, Rechtsform, Filiale_Indikator,\n Mitarbeiter, Mitarbeiter_Gruppe, is_shop, number_of_goods, phone, phone_main_page,\n leader_phone_without_sitemap, phones_all_pages, leader_phone_sitemap, \n leader_phone_from_team_sitemap, email, email_main_page, leader_email_without_sitemap, \n emails_all_pages, leader_email_sitemap, leader_email_from_team_sitemap\n )\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\", ( # noqa\n item['DUNS'],\n item['Handelsregister-Nummer'],\n item['UID'],\n item['Internet-Adresse'],\n '',\n item['Rechtsform'],\n item['Filiale Indikator'],\n item['Mitarbeiter'],\n item['Mitarbeiter Gruppe'],\n is_shop,\n 0,\n str(phone),\n str(main_page_phone),\n str(leader_phone_without_sitemap),\n str(all_pages_phone),\n str(leader_phone),\n str(leader_phone_from_team),\n str(email),\n str(main_page_email),\n str(leader_email_without_sitemap),\n str(all_pages_email),\n str(leader_email),\n str(leader_email_from_team)\n )\n )\n self.connection.commit()\n self.close_db()\n\n def check_domain(self, item):\n \"\"\"check subdomains and check if url is a store (by keywords)\"\"\"\n counter = 0\n leader_phone = ''\n leader_phone_from_team = ''\n all_pages_phone = ''\n main_page_phone = ''\n phone = ''\n leader_email = ''\n leader_email_from_team = ''\n all_pages_email = ''\n main_page_email = ''\n email = ''\n leader_phone_without_sitemap = ''\n leader_email_without_sitemap = ''\n\n try:\n domain = item['Internet-Adresse']\n subdomains = []\n subdomains_list = list()\n domain_is_shop = False\n domain = str(domain)\n if domain != 'nan':\n\n # take a domain\n target = self.clear_url(domain)\n\n # make a request to an external service\n req = requests.get(\"https://crt.sh/?q=%.{d}&output=json\".format(d=target), headers=self.headers)\n\n if req.status_code != 200:\n print(\"[X] Information not available!\")\n\n else:\n for (key, value) in enumerate(req.json()):\n subdomains.append(value['name_value'])\n\n subdomains = sorted(set(subdomains))\n\n # select the required subdomains\n for subdomain in subdomains:\n if 'shop' in subdomain or 'store' in subdomain:\n domain_is_shop = True\n if '\\n' in subdomain:\n s = subdomain.split(sep='\\n')\n for v in s:\n if 'shop' in v or 'store' in v:\n subdomains_list.append(url_normalize(v))\n print(f'subdomain_m: {v}')\n else:\n subdomains_list.append(url_normalize(subdomain))\n print(f'subdomain_o: {subdomain}')\n\n is_shop, main_page_phone, main_page_email, phone, email = self.is_shop_and_main_page(domain, domain_is_shop) # noqa\n leader_phone_without_sitemap = phone\n leader_email_without_sitemap = email\n\n if is_shop is True:\n domain_is_shop = True\n\n if domain_is_shop is True:\n # check the quantity of goods\n common_list = [link for link in subdomains_list]\n subdomains_list = self.normalize_urls_list(common_list)\n common_list.append(domain)\n common_list = self.normalize_urls_list(common_list)\n sitemap_tree = self.get_sitemap_tree(common_list)\n if sitemap_tree:\n leader_phone, leader_email = self.get_leader_phone_and_email_from_sitemap(sitemap_tree)\n leader_phone_from_team, leader_email_from_team = \\\n self.get_leader_phone_and_email_from_sitemap_section_team(sitemap_tree)\n counter, all_pages_phone, all_pages_email = \\\n self.check_phones_emails_on_every_page_and_count_the_quantity_of_goods(sitemap_tree)\n else:\n pass\n\n phone, leader_phone_without_sitemap, leader_phone, leader_phone_from_team, main_page_phone,\\\n all_pages_phone = self.phone(\n leader_phone_without_sitemap=leader_phone_without_sitemap, leader_phone=leader_phone,\n leader_phone_from_team=leader_phone_from_team,\n main_page_phone=main_page_phone, all_pages_phone=all_pages_phone\n )\n\n email, leader_email_without_sitemap, leader_email, leader_email_from_team, main_page_email,\\\n all_pages_email = self.email(\n leader_email_without_sitemap=leader_email_without_sitemap, leader_email=leader_email,\n leader_email_from_team=leader_email_from_team,\n main_page_email=main_page_email, all_pages_email=all_pages_email\n )\n\n self.write_to_file(\n item, is_shop=domain_is_shop, number_of_goods=counter, shop_domain=subdomains_list, phone=phone,\n main_page_phone=main_page_phone,\n leader_phone_without_sitemap=leader_phone_without_sitemap,\n all_pages_phone=all_pages_phone, leader_phone=leader_phone,\n leader_phone_from_team=leader_phone_from_team, email=email,\n leader_email_without_sitemap=leader_email_without_sitemap, leader_email=leader_email,\n leader_email_from_team=leader_email_from_team, main_page_email=main_page_email,\n all_pages_email=all_pages_email\n )\n\n self.open_db()\n self.cur.execute(\n \"\"\"INSERT INTO Domains_and_subdomains (\n DUNS, Handelsregister_Nummer, UID, Internet_Adresse, subdomains, Rechtsform, Filiale_Indikator,\n Mitarbeiter, Mitarbeiter_Gruppe, is_shop, number_of_goods, phone, phone_main_page,\n leader_phone_without_sitemap, phones_all_pages, leader_phone_sitemap, \n leader_phone_from_team_sitemap, email, email_main_page, leader_email_without_sitemap, \n emails_all_pages, leader_email_sitemap, leader_email_from_team_sitemap\n )\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\", ( # noqa\n item['DUNS'],\n item['Handelsregister-Nummer'],\n item['UID'],\n item['Internet-Adresse'],\n str(subdomains_list),\n item['Rechtsform'],\n item['Filiale Indikator'],\n item['Mitarbeiter'],\n item['Mitarbeiter Gruppe'],\n domain_is_shop,\n counter,\n str(phone),\n str(main_page_phone),\n str(leader_phone_without_sitemap),\n str(all_pages_phone),\n str(leader_phone),\n str(leader_phone_from_team),\n str(email),\n str(main_page_email),\n str(leader_email_without_sitemap),\n str(all_pages_email),\n str(leader_email),\n str(leader_email_from_team)\n )\n )\n self.connection.commit()\n self.close_db()\n\n else:\n self.write_to_file(\n item, is_shop=False, number_of_goods=0, shop_domain='', phone=phone,\n main_page_phone=main_page_phone,\n leader_phone_without_sitemap=leader_phone_without_sitemap,\n all_pages_phone=all_pages_phone, leader_phone=leader_phone,\n leader_phone_from_team=leader_phone_from_team, email=email,\n leader_email_without_sitemap=leader_email_without_sitemap, leader_email=leader_email,\n leader_email_from_team=leader_email_from_team, main_page_email=main_page_email,\n all_pages_email=all_pages_email\n )\n self.open_db()\n self.cur.execute(\n \"\"\"INSERT INTO Domains_and_subdomains (\n DUNS, Handelsregister_Nummer, UID, Internet_Adresse, subdomains, Rechtsform, Filiale_Indikator,\n Mitarbeiter, Mitarbeiter_Gruppe, is_shop, number_of_goods, phone, phone_main_page,\n leader_phone_without_sitemap, phones_all_pages, leader_phone_sitemap, \n leader_phone_from_team_sitemap, email, email_main_page, leader_email_without_sitemap, \n emails_all_pages, leader_email_sitemap, leader_email_from_team_sitemap\n )\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\", ( # noqa\n item['DUNS'],\n item['Handelsregister-Nummer'],\n item['UID'],\n item['Internet-Adresse'],\n '',\n item['Rechtsform'],\n item['Filiale Indikator'],\n item['Mitarbeiter'],\n item['Mitarbeiter Gruppe'],\n False,\n 0,\n '',\n '',\n '',\n '',\n '',\n '',\n '',\n '',\n '',\n '',\n '',\n ''\n )\n )\n self.connection.commit()\n self.close_db()\n\n except Exception as e:\n print(f'check_domain: {e}')\n self.write_to_file(\n item, is_shop=False, number_of_goods=0, shop_domain='', phone=phone, main_page_phone=main_page_phone,\n leader_phone_without_sitemap=leader_phone_without_sitemap,\n all_pages_phone=all_pages_phone, leader_phone=leader_phone,\n leader_phone_from_team=leader_phone_from_team, email=email,\n leader_email_without_sitemap=leader_email_without_sitemap, leader_email=leader_email,\n leader_email_from_team=leader_email_from_team, main_page_email=main_page_email,\n all_pages_email=all_pages_email\n )\n self.open_db()\n self.cur.execute(\n \"\"\"INSERT INTO Domains_and_subdomains (\n DUNS, Handelsregister_Nummer, UID, Internet_Adresse, subdomains, Rechtsform, Filiale_Indikator,\n Mitarbeiter, Mitarbeiter_Gruppe, is_shop, number_of_goods, phone, phone_main_page,\n leader_phone_without_sitemap, phones_all_pages, leader_phone_sitemap, \n leader_phone_from_team_sitemap, email, email_main_page, leader_email_without_sitemap, \n emails_all_pages, leader_email_sitemap, leader_email_from_team_sitemap\n )\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\", ( # noqa\n item['DUNS'],\n item['Handelsregister-Nummer'],\n item['UID'],\n item['Internet-Adresse'],\n '',\n item['Rechtsform'],\n item['Filiale Indikator'],\n item['Mitarbeiter'],\n item['Mitarbeiter Gruppe'],\n False,\n 0,\n '',\n '',\n '',\n '',\n '',\n '',\n '',\n '',\n '',\n '',\n '',\n ''\n )\n )\n self.connection.commit()\n self.close_db()\n\n def is_shop_and_main_page(self, domain, domain_is_shop):\n \"\"\"check if url is a store (by keywords)\"\"\"\n shop = False\n main_page_phone = []\n main_page_email = []\n phone = []\n email = []\n try:\n shops = []\n chrome_options = webdriver.ChromeOptions()\n chrome_options.headless = True\n browser = webdriver.Chrome('chromedriver', chrome_options=chrome_options)\n browser.get(domain)\n time.sleep(5)\n html = browser.page_source\n # is shop\n soup = str(BeautifulSoup(html, 'lxml'))\n for word in self.words_for_shop:\n if word in soup:\n shops.append(domain)\n else:\n pass\n\n # main_page_phone\n try:\n main_page_phone = self.find_phones(html)\n except Exception as e:\n main_page_phone = ''\n print(f'is_shop_and_main_page (phones error): {e}')\n\n # main_page_email\n try:\n main_page_email = self.find_emails(html)\n except Exception as e:\n main_page_email = ''\n print(f'is_shop_and_main_page (emails error): {e}')\n\n browser.close()\n\n if len(shops) > 0 or domain_is_shop is True:\n shop = True\n\n # phone_and_email_from_contacts\n try:\n contact_links_from_main_page = self.contact(html, domain)\n print(f'contact_links_from_main_page {contact_links_from_main_page}')\n except Exception as e:\n contact_links_from_main_page = ''\n print(f'is_shop_and_main_page (contacts error): {e}')\n\n try:\n for link in contact_links_from_main_page:\n response = requests.get(link, headers=self.headers)\n text = response.text\n phone.append(self.find_phones(text, leader=True)) # noqa\n except Exception as e:\n print(f'is_shop_and_main_page (phone error): {e}')\n\n try:\n for link in contact_links_from_main_page:\n response = requests.get(link, headers=self.headers)\n text = response.text\n email.append(self.find_emails(text, leader=True))\n except Exception as e:\n print(f'is_shop_and_main_page (email error): {e}')\n else:\n shop = False\n main_page_phone = []\n main_page_email = []\n phone = []\n email = []\n\n phone = self.unique_phones([j for i in phone for j in i])\n email = self.unique_emails([j for i in email for j in i])\n return shop, main_page_phone, main_page_email, phone, email\n\n except Exception as e:\n print(f'is_shop: {e}')\n return shop, main_page_phone, main_page_email, phone, email\n\n def contact(self, text, url):\n \"\"\"looking for a section with contacts on the main page\"\"\"\n urls_list = []\n try:\n soup = BeautifulSoup(text, 'lxml')\n links = [[link.get('href'), link] for link in soup.findAll('a')]\n for result in links:\n link = result[0]\n tag_text = str(result[1].text).lower()\n for word in self.words_for_company_team:\n if link is not None and (word in link or word in tag_text):\n try:\n if requests.get(url + '/' + link).status_code == 200:\n urls_list.append(url + '/' + link)\n except: # noqa\n pass\n try:\n if requests.get(url + link).status_code == 200:\n urls_list.append(url + link)\n except: # noqa\n pass\n try:\n if requests.get(link).status_code == 200:\n urls_list.append(link)\n except: # noqa\n pass\n except Exception: # noqa\n pass\n return urls_list\n\n def open_db(self):\n \"\"\"open the database\"\"\"\n hostname = '127.0.0.1'\n username = 'parsing_admin'\n password = 'parsing_adminparsing_admin'\n database = 'parsing'\n port = \"5444\"\n self.connection = psycopg2.connect( # noqa\n host=hostname,\n user=username,\n password=password,\n dbname=database,\n port=port)\n self.cur = self.connection.cursor() # noqa\n\n def close_db(self):\n \"\"\"close the database\"\"\"\n self.cur.close()\n self.connection.close()\n\n @staticmethod\n def unique_phones(lst):\n \"\"\"remove duplicate phones\"\"\"\n phones = []\n for ph in lst:\n phone = ph.replace(\n ' ', '').replace('+', '').replace('(', '').replace(')', '').replace('-', '').replace('.', '').replace('/', '') # noqa\n phones.append(phone)\n set_lst = list(set(phones))\n return set_lst\n\n @staticmethod\n def unique_emails(lst):\n \"\"\"remove duplicate emails\"\"\"\n set_lst = list(set(lst))\n return set_lst\n\n def write_to_file(\n self, item, is_shop, number_of_goods, shop_domain,\n phone, leader_phone_without_sitemap, main_page_phone, all_pages_phone, leader_phone, leader_phone_from_team,\n email, leader_email_without_sitemap, main_page_email, all_pages_email, leader_email, leader_email_from_team\n ):\n \"\"\"write data to file\"\"\"\n lst = list(item.values())\n lst.append(is_shop)\n lst.append(number_of_goods)\n lst.append(shop_domain)\n lst.append(phone)\n lst.append(main_page_phone)\n lst.append(leader_phone_without_sitemap)\n lst.append(all_pages_phone)\n lst.append(leader_phone)\n lst.append(leader_phone_from_team)\n lst.append(email)\n lst.append(main_page_email)\n lst.append(leader_email_without_sitemap)\n lst.append(all_pages_email)\n lst.append(leader_email)\n lst.append(leader_email_from_team)\n\n # with csv lib\n with open(self.result_file, \"a\", newline=\"\", encoding='UTF-8') as f:\n writer = csv.writer(f)\n writer.writerows([lst])\n\n @staticmethod\n def phone(leader_phone_without_sitemap, leader_phone, leader_phone_from_team, all_pages_phone, main_page_phone):\n \"\"\"choose only one phone\"\"\"\n phone = ''\n if not leader_phone_without_sitemap:\n leader_phone_without_sitemap = ''\n else:\n try:\n leader_phone_without_sitemap = leader_phone_without_sitemap[0]\n phone = leader_phone_without_sitemap\n except Exception as e: # noqa\n print(f'phone: phone error: {e}')\n\n if not leader_phone:\n leader_phone = ''\n else:\n try:\n leader_phone = leader_phone[0]\n if not leader_phone_without_sitemap:\n phone = leader_phone\n except Exception as e: # noqa\n print(f'phone: leader_phone error: {e}')\n\n if not leader_phone_from_team:\n leader_phone_from_team = ''\n else:\n try:\n leader_phone_from_team = leader_phone_from_team[0]\n if not leader_phone_without_sitemap and not leader_phone:\n phone = leader_phone_from_team\n except Exception as e: # noqa\n print(f'phone: leader_phone_from_team error: {e}')\n\n if not all_pages_phone:\n all_pages_phone = ''\n else:\n try:\n all_pages_phone = all_pages_phone[0]\n if not leader_phone_without_sitemap and not leader_phone and not leader_phone_from_team:\n phone = all_pages_phone\n except Exception as e: # noqa\n print(f'phone: all_pages_phone error: {e}')\n\n if not main_page_phone:\n main_page_phone = ''\n else:\n try:\n new_list = [e for e in main_page_phone if e]\n main_page_phone = new_list[0] if type(new_list[0]) is not list else new_list[0][0]\n if not leader_phone_without_sitemap and not leader_phone and not leader_phone_from_team and not all_pages_phone: # noqa\n phone = main_page_phone\n except Exception as e: # noqa\n print(f'phone: main_page_phone error: {e}')\n\n return phone, leader_phone_without_sitemap, leader_phone, leader_phone_from_team, main_page_phone,\\\n all_pages_phone\n\n @staticmethod\n def email(leader_email_without_sitemap, leader_email, leader_email_from_team, all_pages_email, main_page_email):\n \"\"\"choose only one email\"\"\"\n email = ''\n if not leader_email_without_sitemap:\n leader_email_without_sitemap = ''\n else:\n try:\n leader_email_without_sitemap = leader_email_without_sitemap[0]\n email = leader_email_without_sitemap\n except Exception as e: # noqa\n print(f'email: email error: {e}')\n\n if not leader_email:\n leader_email = ''\n else:\n try:\n leader_email = leader_email[0]\n if not leader_email_without_sitemap:\n email = leader_email\n except Exception as e: # noqa\n print(f'email: leader_email error: {e}')\n\n if not leader_email_from_team:\n leader_email_from_team = ''\n else:\n try:\n leader_email_from_team = leader_email_from_team[0]\n if not leader_email_without_sitemap and not leader_email:\n email = leader_email_from_team\n except Exception as e: # noqa\n print(f'email: leader_email_from_team error: {e}')\n\n if not all_pages_email:\n all_pages_email = ''\n else:\n try:\n all_pages_email = all_pages_email[0]\n if not leader_email_without_sitemap and not leader_email and not leader_email_from_team:\n email = all_pages_email\n except Exception as e: # noqa\n print(f'email: all_pages_email error: {e}')\n\n if not main_page_email:\n main_page_email = ''\n else:\n try:\n new_list = [e for e in main_page_email if e]\n main_page_email = new_list[0] if type(new_list[0]) is not list else new_list[0][0]\n if not leader_email_without_sitemap and not leader_email and not leader_email_from_team and not all_pages_email: # noqa\n email = main_page_email\n except Exception as e: # noqa\n print(f'email: all_pages_email error: {e}')\n\n return email, leader_email_without_sitemap, leader_email, leader_email_from_team, main_page_email,\\\n all_pages_email\n\n @staticmethod\n def check_email_valid(email):\n \"\"\"check email validity\"\"\"\n bool_result = is_email(email)\n return bool_result\n\n\nif __name__ == '__main__':\n \"\"\"\n you need to insert the file name, select mode (1, 2 or 3), specify timeout (in seconds))\n for example:\n python store_identifier.py \"example.xlsx\" 3 360\n Mods:\n 1. counting the number of products according to the sitemap links\n 2. follow each link in sitemap and check keywords on each page (if no goods were found in the way 1) \n (using requests, since with selenium it will take much longer)\n 3. follow each link in sitemap and check keywords on each page (anyway)\n \"\"\"\n\n # get file name\n file = sys.argv[1]\n # select mode\n mode = sys.argv[2]\n # specify timeout\n timeout = int(sys.argv[3])\n\n # create object\n obj = DomainsAndSubdomains(file=file, mode=mode, timeout=timeout)\n\n # get data\n obj.start()\n","sub_path":"store_identifier.py","file_name":"store_identifier.py","file_ext":"py","file_size_in_byte":44312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"564070706","text":"# ESZA019−17 − Visao Computacional − NA − 2Q2019\n# PRATICA 01\n#\n# RA: 11039113\n# NOME: Matheus E. S. Araújo\n#\n# E−MAIL: matheus.esa17@gmail.com\n# Git-Hub: https://github.com/MatheusESAraujo/Visao_computacional\n#\n# DESCRICAO:\n# − Código utilizado para aprender o funcionamento da biblioteca openCV\n# - Este código importa uma imagem e a salva em cinza além disso foi adicionado uma linha para analisar o reconhecimento de bordas Canny\n\nimport numpy as np\nimport cv2 as cv\n\n\n\nimg = cv.imread('Imagens_e_videos/messi_color.jpg',0)\nout = cv.imwrite(\"Imagens_e_videos/messi_gray.JPG\",img)\nimg_canny = cv.Canny(img,50,70)\ncv.imshow(\"Imagens_e_videos/Messi\",img_canny)\n\nk=cv.waitKey(0)\nif k == 27:\n\tcv.destroyAllWindows()\nelif k == ord('s'):\n\tout.write\n\tcv.destroyAllWindows()\n\n\n\n\n","sub_path":"Lab01/lab01.py","file_name":"lab01.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"627696480","text":"# Create your views here.\nfrom django.http import HttpResponse\nfrom django.core.servers.basehttp import FileWrapper\nfrom django.conf import settings\n\nfrom responsive_images.utils import get_final_resolution, get_file\n\nfrom PIL import Image\n\nimport os\nfrom subprocess import call\n\n\ndef adapt_image(request):\n try:\n resolution = int(request.COOKIES['resolution'])\n final_resolution = get_final_resolution(resolution)\n except KeyError:\n final_resolution = 100\n\n filename = request.META['QUERY_STRING'].replace(\"___asnf874wthwengsfduy\", \"\")\n\n fullname = settings.STATIC_ROOT + filename[len(settings.STATIC_URL)-1:]\n filename = filename.split(\"/\").pop()\n filename, extension = filename.split(\".\")\n i = Image.open(fullname)\n\n if max(i.size[0], i.size[1]) < final_resolution:\n return get_file(fullname, extension)\n filename = \"%s_%s_%s.%s\" % (filename, final_resolution, final_resolution,\n extension)\n\n if not os.path.exists(settings.STATIC_ROOT+'/responsive_images_cache'):\n os.chdir(settings.STATIC_ROOT)\n call(['mkdir', 'responsive_images_cache'])\n os.chdir(settings.STATIC_ROOT+'/responsive_images_cache')\n\n image = Image.open(fullname)\n if not os.path.exists(filename):\n resized_image = image.resize((final_resolution, final_resolution), Image.ANTIALIAS)\n resized_image.save(filename, extension, quality=75)\n\n # Return resized image\n return get_file(filename, extension)\n\n","sub_path":"responsive_images/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"641044827","text":"import numpy as np\nfrom skimage import io\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\n\nimg = io.imread(\"char_seg_2.png\", as_grey=True)\n# 图片的大小\n\nrows,cols = img.shape\nh_profile = np.sum(np.where(img >= 0.99, 0, 1), axis=1) / 2\nv_profile = np.sum(np.where(img >= 0.99, 0, 1), axis=0) / 2\n\n\n# DPI设为100时候,即为原图的大小\nDPI = 100\n\n# 根据图像的真实大小计算figsize\nfigsize = cols/ DPI ,rows / DPI\nfig = plt.figure(figsize=figsize)\n# 使用 [0,0,1,1] 屏蔽边框以及将图像填满整个区域\nax = fig.add_axes([0, 0, 1, 1])\n# 起始坐标290,341 宽度84,高度46,笔画宽度为3\n\n# 注意一定要先绘制图,然后再绘制Rectangle\nax.set_frame_on(False)\nax.imshow(img,cmap=\"gray\")\nx = np.arange(len(v_profile))\nax.fill(x,v_profile,'b-',alpha=0.7)\n# plt.show()\n# 保存绘图到文件当中 .jpg .png .tif .svg\nfig.savefig(\"test.png\")","sub_path":"papaer_pic/char_seg.py","file_name":"char_seg.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"640543716","text":"# Converts TSV data from IMDB for crew into a MongoDB collection.\n\nimport os\nimport csv\n\nfrom pymongo import MongoClient\n\ndb = MongoClient().imdbws\n\nroot_path = os.path.dirname(os.path.realpath(__file__))\npath = root_path + \"/datasets.imdbws.com/name.basics.tsv\"\n\ncounter = 0\n\nfor row in csv.DictReader(open(path), delimiter='\\t', quoting=csv.QUOTE_NONE):\n c = {}\n for k, v in row.items():\n if v == \"\\\\N\":\n v = None\n if k == \"nconst\":\n k = \"_id\"\n c[k] = v\n if c['birthYear']:\n c['birthYear'] = int(c['birthYear'])\n\n if c['deathYear']:\n c['deathYear'] = int(c['deathYear'])\n\n if c['primaryProfession']:\n c['primaryProfession'] = c['primaryProfession'].split(',')\n\n if c['knownForTitles']:\n c['knownForTitles'] = c['knownForTitles'].split(',')\n\n counter += 1\n if counter % 10000 == 0:\n print (\"{} crew inserted.\".format(counter))\n\n db.crew.save(c)\n","sub_path":"extract-imdbws/05_names.py","file_name":"05_names.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"144167228","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis file is a Firefish spider created on top of the ATSSpider\nscrapy crawl firefish -a mining_job_id=999 -a iteration=1 -a extract=1 -a url=\"https://www.edgarstewart.co.uk/vacancies.aspx\"\n\nsample url:\n http://www.ineosopportunities.co.uk/\n https://www.ninetwentyenergy.com/vacancies.aspx\n https://www.snapchildcare.co.uk/current-jobs.aspx\n https://www.worldteachers.net/jobs.aspx\n https://www.taylorhopkinson.com/vacancies.aspx\n https://isemiddleeastjobs.firefishsoftware.com/\n https://www.ultimatebanking.co.uk/candidates/vacancies.aspx\n http://www.e-placementscotland.com/job-search.aspx\n https://www.edgarstewart.co.uk/vacancies.aspx\n https://cloister-jobs.firefishsoftware.com/\n https://renaissancecarejobs.firefishsoftware.com/\n https://jobs.civil.external.lmco.com/latest-vacancies.aspx\n https://www.imultiplyresourcing.com/candidates.aspx\n\"\"\"\n\nfrom hashlib import md5\nfrom re import compile\nfrom urlparse import urljoin\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, HtmlFormatter, ConvertDateString, ShrinkURL, Replace\n\n\nclass Firefish(ATSSpider):\n\n name = 'firefish'\n loc_re = compile(\".*-(.*)\")\n date_re = compile('Posted on\\s(.*)')\n multi_br_re = compile(\"(
\\s*){2,}\")\n remove_txt_re = compile(\"%s\\s*Download\\s*Job\\s*Profile\" % unicode('', 'utf-8'))\n\n def parse(self, response):\n sel = Selector(response)\n if not hasattr(self, 'logo_url'):\n logo_url = sel.xpath(\"(//header//a|//*[@class='brand']|//div[@id='Logo']|//strong[@class='logo']|//div[contains(@class,'logo-header') or @class='page-top' or @class='logoArea'])//img/@src\").extract()\n self.logo_url = urljoin(response.url, logo_url[0]) if logo_url else ''\n job_count = sel.xpath(\"//td[contains(@class,'records')]/text()\").extract()\n if job_count:\n self.expected_job_count = job_count[0]\n\n jobs = sel.xpath(\"//div[@class='textArea' or @class='ff-jobs-single-post']\")\n for job in jobs:\n job_link = job.xpath(\"(div[@class='main']|h3[@class='entry-title'])/a/@href\").extract()\n if job_link:\n job_url = urljoin(response.url, job_link[0])\n meta = {\n 'title': job.xpath(\"(div[@class='main']|h3[@class='entry-title'])//text()\").extract(),\n 'date': job.xpath(\"div[contains(text(),'Posted on')]\").extract()\n }\n yield Request(\n url=job_url, meta=meta, callback=self.parse_job_callback()\n )\n\n next_page = sel.xpath(\"//a[contains(text(),'Next')]/@href\").extract()\n if next_page:\n next_url = urljoin(response.url, next_page[0])\n yield Request(url=next_url, callback=self.parse)\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url, ShrinkURL(['destination']))\n loader.add_value('logo_url', self.logo_url)\n loader.add_value('title', response.meta['title'])\n loader.add_value('date', response.meta['date'], ConvertDateString(\"%B %d, %Y\"), re=self.date_re)\n loader.add_xpath('location', \"//strong[contains(text(),'Nearest tube/station or location:')]/following-sibling::node()\")\n if not loader.get_output_value('location'):\n loader.add_value('location', response.meta['title'], re=self.loc_re)\n if not loader.get_output_value('location'):\n loader.add_xpath('location', \"//span[contains(@id,'_lblTitleMainInHeader')]/text()\", re=self.loc_re)\n loader.add_xpath('description', \"//div[@class='advertBody']\", HtmlFormatter(), Replace(self.remove_txt_re), Replace(self.multi_br_re, '
'))\n loader.add_value('referencenumber', md5(response.url).hexdigest(), Prefix(\"%s-\" % self.name))\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/firefish.py","file_name":"firefish.py","file_ext":"py","file_size_in_byte":4069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"445861218","text":"# Builtins\nimport unittest\nimport pathlib\nimport shutil\n\n# Submodule imports\nfrom harvest.trader import BackTester\nfrom harvest.algo import BaseAlgo\nfrom harvest.api.dummy import DummyStreamer\nfrom harvest.utils import *\n\n\nclass TestTester(unittest.TestCase):\n def tear_up_down(func):\n def wrapper(*args, **kwargs):\n try:\n func(*args, **kwargs)\n finally:\n path = pathlib.Path(\"data\")\n shutil.rmtree(path)\n\n return wrapper\n\n @tear_up_down\n def test_start_do_nothing(self):\n \"\"\"Do a quick run-through of the BackTester\n to ensure it can run without crashing.\n \"\"\"\n s = DummyStreamer()\n t = BackTester(s)\n t.set_symbol(\"A\")\n t.set_algo(BaseAlgo())\n t.start(\"1MIN\", [\"5MIN\"], period=\"1DAY\")\n self.assertTrue(True)\n\n @tear_up_down\n def test_check_aggregation(self):\n \"\"\" \"\"\"\n t = BackTester(DummyStreamer())\n t.set_symbol(\"A\")\n t.set_algo(BaseAlgo())\n t.start(\"1MIN\", [\"1DAY\"], period=\"1DAY\")\n\n minutes = list(t.storage.load(\"A\", Interval.MIN_1)[\"A\"][\"close\"])[-200:]\n days_agg = list(t.storage.load(\"A\", int(Interval.DAY_1) - 16)[\"A\"][\"close\"])[\n -200:\n ]\n\n self.assertListEqual(minutes, days_agg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_tester.py","file_name":"test_tester.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"386843106","text":"# -*- coding: utf-8 -*-\n\n# A very simple setup script to create a single executable\n#\n# hello.py is a very simple 'Hello, world' type script which also displays the\n# environment in which the script runs\n#\n# Run the build process :\n#\n# 'python setup.py build'\n# or \n# 'python setup.py bdist_msi'\n#\n# If everything works well you should find a subdirectory in the build\n# subdirectory that contains the files needed to run the script without Python\n\nimport sys\nfrom cx_Freeze import setup, Executable\nfrom ngchecker import __version__\n\noptions = {\n 'build_exe': {\n 'compressed': True,\n 'include_files': [\n 'readme.md',\n 'ngchecker.conf',\n 'check.list',\n 'modules/',\n 'log/',\n ],\n 'excludes' : ['Tkinter'],\n 'path': sys.path + ['modules']\n }\n}\n\nexecutables = [\n Executable('ngchecker.py')\n]\n\nsetup(name = 'ngchecker',\n version = __version__,\n auther = \"Junest LX\",\n auther_emai = \"support@junest.com\",\n options = options, \n description ='FlexiNG Smart Checker',\n executables =executables \n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"246785710","text":"# test_print_contour.py by CoccaGuo at 2021/09/28 18:13\n\nimport cv2\nimport numpy as np\nfrom DataProcessor import *\nfrom ContourDetecter import *\nfrom utils import Color\nfrom ColorBlender import *\nfrom RingFactoryMaker import getRingFactory\n\n\ndef result_plot(image_path: str):\n def print_color(ring, color):\n if ring.colored == False:\n ring.colored = True\n pts = rf.contour_list[ring.id]\n # singleLayerBlender(image, ring, rf.contour_list[ring.id], color, Color.BG_COLOR, k=3)\n multiLayerBlender(image, ring, rf.contour_list[ring.id], color, Color.BG_COLOR, k=4)\n cv2.fillConvexPoly(image, pts, color.value)\n \n\n image = cv2.imread(image_path)\n rf = getRingFactory(image)\n\n for ring in rf.ring_list:\n\n if ring.sides == 7:\n print_color(ring, Color.SLATEBLUE)\n\n for nnRing in rf.findRingsByID(rf.nearestNeighbor(ring.id)):\n if nnRing.sides == 6:\n print_color(nnRing, Color.IVORY)\n\n if ring.sides == 5:\n print_color(ring, Color.WHEAT)\n\n for nnRing in rf.findRingsByID(rf.nearestNeighbor(ring.id)):\n if nnRing.sides == 6:\n print_color(nnRing, Color.IVORY)\n \n \n if ring.sides == 6:\n flag = True\n for nnRing in rf.findRingsByID(rf.nearestNeighbor(ring.id)):\n if nnRing.sides != 6:\n flag = False\n if flag:\n print_color(ring, Color.YELLOW)\n \n # cv2.putText(image, str(ring.id), ring.position, cv2.FONT_HERSHEY_SIMPLEX, 0.4, Color.RED.value)\n \n cv2.imshow(\"pic\", image)\n cv2.waitKey()\n cv2.destroyAllWindows()\n cv2.imwrite('readme.assert/final_1.png', image)\n\nif __name__ == '__main__':\n image_path = 'data/raw2.png'\n result_plot(image_path)","sub_path":"print_contour.py","file_name":"print_contour.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"529712396","text":"from tealight.art import (fill_polygon, color, line, spot, circle, box, image, text, background, polygon)\nimport random\nfrom math import cos, sin, pi\nimport math\n\nfrom tealight.art import screen_width, screen_height, test_polygon\nfrom tealight.utils import github_load, sleep\nfoo = github_load(\"c-ryan747\", \"art\", \"carClass\")\nfoo2 = github_load(\"c-ryan747\", \"art\", \"mapClass\")\n\n##########################################################\n##########################################################\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n## This is the wrong one go to racetrack.py\n##########################################################\n##########################################################\n\n# Car stats\nacc = 0.3\nmax_speed = 10\nturn_speed = 0.07\nfriction = 0.05\n\n# Bounce stats\nside_bounce = -2\n\nsleep(100)\n\nplayer_won = 0\n\nlap1 = 1\nlap2 = 1\ntime = 0\n\ncar_x = 200\ncar_y = 160\ncar_v = 0\ncar_a = 0\ndirection = 0\nspin = 0\n\ncar_x2 = 200\ncar_y2 = 240\ncar_v2 = 0\ncar_a2 = 0\ndirection2 = 0\nspin2 = 0\n\nplayer_1 = foo.car(car_x,car_y,direction)\nplayer_2 = foo.car(car_x2,car_y2,direction2)\ntrack = foo2.track()\n\n##########################################################\n### Frame event\n##########################################################\n\ndef handle_frame():\n global time, friction, car_x, car_y, car_v, car_a, direction, lap1, car_x2, car_y2, car_v2, car_a2, direction2, lap2, player_won\n \n # refreshes screen\n color(\"rgba(255,255,255,0.1)\")\n \n \n box(0,screen_height/10,screen_width,screen_height/10*9)\n \n # Does time\n if lap1 < 4:\n time = time + 0.04\n color( \"rgba(150,150,150,0.7)\")\n box(0,0,screen_width,screen_height/10)\n color(\"blue\")\n #text(40,20,str(math.ceil(time)))\n text(180,10,\"Polygon Racers!\")\n text(20,36,\"arrow keys lap: \" + str(math.floor(lap1))[0]+\"/4\")\n text(280,36,\"wasd keys lap: \" + str(math.floor(lap2))[0]+\"/4\")\n # Does laps for player 1\n if track.test_in_top_detector(player_1.points[0],player_1.points[1]) == True:\n if ((lap1-0.5) % 1) == 0:\n lap1 = lap1 + 0.5\n if lap1 == 4.0 and lap2 < 4:\n player_won = 1\n if track.test_in_bottom_detector(player_1.points[0],player_1.points[1]) == True:\n if (lap1 % 1) == 0:\n lap1 = lap1 + 0.5\n \n # Does laps for player 2\n if track.test_in_top_detector(player_2.points[0],player_2.points[1]) == True:\n if ((lap2-0.5) % 1) == 0:\n lap2 = lap2 + 0.5\n if lap2 == 4.0 and lap1 < 4:\n player_won = 2\n if track.test_in_bottom_detector(player_2.points[0],player_2.points[1]) == True:\n if (lap2 % 1) == 0:\n lap2 = lap2 + 0.5 \n \n \n ################################\n # Collisions with the polygons\n ################################\n \n if not track.test_point(player_1.points[0],player_1.points[1]):\n #if track.test_point(player_1.points[2],player_1.points[3]) and track.test_point(player_1.points[4],player_1.points[5]):\n car_v = car_v*-0.6\n car_x = car_x + -5 * cos(direction)\n car_y = car_y - -5 * sin(direction)\n \n if not track.test_point(player_1.points[2],player_1.points[3]):\n if car_v > 0:\n car_v = car_v*-0.6\n if track.test_point(player_1.points[4],player_1.points[5]):\n car_x = car_x - side_bounce * sin(direction)\n car_y = car_y + side_bounce * cos(direction)\n car_x = car_x - 4 * sin(direction+(pi/2))\n car_y = car_y + 4 * cos(direction+(pi/2))\n else:\n #if track.test_point(player_1.points[0],player_1.points[1]):\n \n car_v = 5\n #if track.test_point(player_1.points[0],player_1.points[1]):\n #car_x = car_x - 4 * sin(direction+(pi/2))\n #car_y = car_y + 4 * cos(direction+(pi/2))\n #print \"should be working\"\n \n elif not track.test_point(player_1.points[4],player_1.points[5]):\n if car_v > 0:\n car_v = car_v*-0.6\n if track.test_point(player_1.points[2],player_1.points[3]):\n car_x = car_x - side_bounce * sin(direction)\n car_y = car_y + side_bounce * cos(direction)\n car_x = car_x - 4 * sin(direction-(pi/2))\n car_y = car_y + 4 * cos(direction-(pi/2))\n else:\n car_v = 5\n #if track.test_point(player_1.points[0],player_1.points[1]):\n\n #if track.test_point(player_1.points[0],player_1.points[1]):\n #car_x = car_x - 4 * sin(direction-(pi/2))\n #car_y = car_y + 4 * cos(direction-(pi/2))\n #print \"should be working2\"\n \n \n #############################\n # Colisions for second car\n \n if not track.test_point(player_2.points[0],player_2.points[1]):\n car_v2 = car_v2*-0.6\n car_x2 = car_x2 + -5 * cos(direction2)\n car_y2 = car_y2 - -5 * sin(direction2)\n \n if not track.test_point(player_2.points[2],player_2.points[3]):\n if car_v2 > 0:\n car_v2 = car_v2*-0.6\n if track.test_point(player_2.points[2],player_2.points[3]):\n car_x2 = car_x2 - side_bounce * sin(direction2)\n car_y2 = car_y2 + side_bounce * cos(direction2)\n else:\n car_v2 = 2\n #car_x = car_x - 10 * sin(direction)\n #car_y = car_y + 10 * cos(direction)\n #print \"should be working\"\n \n if not track.test_point(player_2.points[4],player_2.points[5]):\n if car_v2 > 0:\n car_v2 = car_v2*-0.6\n if track.test_point(player_2.points[2],player_2.points[3]):\n car_x2 = car_x2 - side_bounce * sin(direction2)\n car_y2 = car_y2 + side_bounce * cos(direction2)\n else:\n car_v2 = 2\n #car_x = car_x - 10 * sin(direction)\n #car_y = car_y + 10 * cos(direction)\n #print \"should be working2\"\n \n \n \n \n ##########\n # Physics\n \n #car1\n if car_v > 0:\n car_v -= friction\n else:\n car_v += friction\n \n car_v = min(car_v + car_a,10)\n \n car_x = car_x + car_v * cos(direction)\n car_y = car_y - car_v * sin(direction)\n\n direction = direction + spin*min(1,car_v/5)\n \n #car2\n if car_v2 > 0:\n car_v2 -= friction\n else:\n car_v2 += friction\n \n car_v2 = min(car_v2 + car_a2,10)\n \n car_x2 = car_x2 + car_v2 * cos(direction2)\n car_y2 = car_y2 - car_v2 * sin(direction2)\n\n direction2 = direction2 + spin2*min(1,car_v2/5)\n \n ################\n # Drawing bits\n ################\n color(\"black\")\n track.draw_polygons()\n player_1.move(car_x,car_y,direction)\n player_2.move(car_x2,car_y2,direction2)\n \n if player_won == 1:\n image(screen_width/2 - 320,screen_height/2 - 200,\"http://i.imgur.com/OQ5AJok.png\")\n if player_won == 2:\n image(screen_width/2 - 320,screen_height/2 - 200,\"http://i.imgur.com/Bzd2sgV.png\")\n\n \n \n##########################################################\n### Keypressing events\n##########################################################\n \ndef handle_keydown(key):\n global spin, turn_speed, car_a, acc, spin2, car_a2\n \n if key == \"left\":\n spin = turn_speed\n elif key == \"right\":\n spin = -turn_speed\n elif key == \"up\":\n car_a = acc\n elif key == \"down\":\n car_a = -acc/1.5\n \n if key == \"a\":\n spin2 = turn_speed\n elif key == \"d\":\n spin2 = -turn_speed\n elif key == \"w\":\n car_a2 = acc\n elif key == \"s\":\n car_a2 = -acc/1.5\n\ndef handle_keyup(key):\n global spin, car_a,spin2, car_a2\n\n if key == \"left\" or key == \"right\":\n spin = 0\n elif key == \"up\" or key == \"down\":\n car_a = 0\n \n if key == \"a\" or key == \"d\":\n spin2 = 0\n elif key == \"s\" or key == \"w\":\n car_a2 = 0\n \n \n \ndef draw_static_things():\n blue_top = [(0,0),\n (0,screen_height/10),\n (screen_width,screen_height/10),\n (screen_width,0)]\n color(\"blue\")\n fill_polygon(blue_top)\n \n \ndraw_static_things()\n","sub_path":"art/untitled.py","file_name":"untitled.py","file_ext":"py","file_size_in_byte":8996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"225787555","text":"import torch\r\nfrom PIL import Image\r\nfrom torchvision import transforms\r\nfrom torch.autograd import Variable\r\nfrom torch.nn import functional\r\nfrom model import *\r\nimport numpy\r\nfrom sklearn.externals import joblib\r\nfrom sklearn import svm\r\nimport os\r\n\r\nrgb = RGB_model()\r\nrgb.load_state_dict(torch.load(r'C:\\Users\\Neticle\\Desktop\\bishedaima\\Fusing Multiple plus\\casia_model\\rgb.pkl'))\r\nrgb.eval().cuda()\r\nhsv = HSV_model()\r\nhsv.load_state_dict(torch.load(r'C:\\Users\\Neticle\\Desktop\\bishedaima\\Fusing Multiple plus\\casia_model\\hsv.pkl'))\r\nhsv.eval().cuda()\r\nycbcr = YCbCr_model()\r\nycbcr.load_state_dict(torch.load(r'C:\\Users\\Neticle\\Desktop\\bishedaima\\Fusing Multiple plus\\casia_model\\ycbcr.pkl'))\r\nycbcr.eval().cuda()\r\npatch = patch_model()\r\npatch.load_state_dict(torch.load(r'C:\\Users\\Neticle\\Desktop\\bishedaima\\Fusing Multiple plus\\casia_model\\patch.pkl'))\r\npatch.eval().cuda()\r\nlbp = LBP_model()\r\nlbp.load_state_dict(torch.load(r'C:\\Users\\Neticle\\Desktop\\bishedaima\\Fusing Multiple plus\\casia_model\\lbp.pkl'))\r\nlbp.eval().cuda()\r\n\r\n\r\npath = r'E:/database/nuaa/data/test/0/'\r\nlbp_path = r'E:/database/nuaa/LBP/test/0/'\r\n# path = r'E:/database/CBSR-Antispoofing/croped/test/0/'\r\n# lbp_path = r'E:/database/CBSR-Antispoofing/LBP/test/0/'\r\nt = transforms.Compose([\r\n transforms.Resize((256, 256)),\r\n transforms.ToTensor()])\r\ntr = transforms.Compose([\r\n transforms.Resize((256, 256)),\r\n transforms.RandomCrop(96),\r\n transforms.Resize((256, 256)),\r\n transforms.ToTensor()])\r\n\r\nnum_true = 0\r\nfor test_img in os.listdir(path):\r\n pil_img = Image.open(path + test_img).convert('RGB')\r\n lbp_img = Image.open(lbp_path + test_img).convert('RGB')\r\n img = t(pil_img)\r\n img.unsqueeze_(dim=0)\r\n img = Variable(img).cuda()\r\n imge = tr(pil_img)\r\n imge.unsqueeze_(dim=0)\r\n imge = Variable(imge).cuda()\r\n limg = t(lbp_img)\r\n limg.unsqueeze_(dim=0)\r\n limg = Variable(limg).cuda()\r\n\r\n rgb_out = rgb(img)\r\n rgb_out = functional.softmax(rgb_out, dim=1)\r\n rgb_out = rgb_out.cpu()\r\n rgb_out = rgb_out.detach().numpy()\r\n #rgb_result = int(numpy.argmax(rgb_out.cpu().detach().numpy()))\r\n hsv_out = hsv(img)\r\n hsv_out = functional.softmax(hsv_out, dim=1)\r\n hsv_out = hsv_out.cpu()\r\n hsv_out = hsv_out.detach().numpy()\r\n #hsv_result = int(numpy.argmax(hsv_out.cpu().detach().numpy()))\r\n ycbcr_out = ycbcr(img)\r\n ycbcr_out = functional.softmax(ycbcr_out, dim=1)\r\n ycbcr_out = ycbcr_out.cpu()\r\n ycbcr_out = ycbcr_out.detach().numpy()\r\n #ycbcr_result = int(numpy.argmax(ycbcr_out.cpu().detach().numpy()))\r\n patch_out = patch(imge)\r\n patch_out = functional.softmax(patch_out, dim=1)\r\n patch_out = patch_out.cpu()\r\n patch_out = patch_out.detach().numpy()\r\n #patch_result = int(numpy.argmax(patch_out.cpu().detach().numpy()))\r\n lbp_out = lbp(limg)\r\n lbp_out = functional.softmax(lbp_out, dim=1)\r\n lbp_out = lbp_out.cpu()\r\n lbp_out = lbp_out.detach().numpy()\r\n\r\n #out = out.detach().numpy()\r\n #ind = int(numpy.argmax(out))\r\n # ind = out[0, 0]+out[0, 1]\r\n #print(rgb_out, hsv_out, ycbcr_out, patch_out)\r\n #print(rgb_result, hsv_result, ycbcr_result, patch_result)\r\n\r\n\r\n clf = joblib.load(r\"C:\\Users\\Neticle\\Desktop\\bishedaima\\Fusing Multiple plus\\casia_model\\svm.m\")\r\n test_X = [[rgb_out[0, 0], hsv_out[0, 0], ycbcr_out[0, 0], lbp_out[0, 0], patch_out[0, 0]]]\r\n #print(clf.predict(test_X)[0])\r\n if clf.predict(test_X)[0] == 0:\r\n num_true +=1\r\n print(num_true)\r\n\r\n","sub_path":"Fusing Multiple plus/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"633198255","text":"# /*\n# * CIS2750 F2017\n# * Assignment 3\n# * Jackson Zavarella 0929350\n# * This file is a python wrapper for the c functions in CalendarParser.h\n# * No code was used from previous classes/ sources\n# */\n\nfrom ctypes import *\n\nclass CalendarDriver(object):\n \"\"\"docstring for CalendarDriver.\"\"\"\n def __init__(self, sofile):\n self.sofile = sofile\n self.calLib = CDLL(sofile)\n\n self.calLib.openCalendarPython.restype = c_void_p\n self.calLib.openCalendarPython.argtypes = [c_char_p]\n self.calLib.deleteCalendar.argtypes = [c_void_p]\n self.calLib.writeCalendar.argtypes = [c_char_p, c_void_p]\n self.calLib.writeCalendar.restype = c_uint\n\n # Set return and argument types\n self.calLib.printError.restype = c_char_p\n self.calLib.validateCalendarPython.restype = c_uint;\n self.calLib.validateCalendarPython.argtypes = [c_char_p]\n self.calLib.getCalendarComponentsPython.restype = c_char_p\n self.calLib.getCalendarComponentsPython.argtypes = [c_void_p]\n self.calLib.getComponentPropsPython.restype = c_char_p\n self.calLib.getComponentPropsPython.argtypes = [c_void_p, c_int]\n self.calLib.getComponentAlarmsPython.restype = c_char_p\n self.calLib.getComponentAlarmsPython.argtypes = [c_void_p, c_int]\n self.calLib.validateVersionPython.restype = c_int\n self.calLib.validateVersionPython.argtypes = [c_char_p]\n self.calLib.matchTEXTField.restype = c_int\n self.calLib.matchTEXTField.argtypes = [c_char_p]\n self.calLib.matchDATEField.restype = c_int\n self.calLib.matchDATEField.argtypes = [c_char_p]\n\n self.calLib.getComponentPropertiesDatabasePython.argtypes = [c_void_p, c_int]\n self.calLib.getComponentPropertiesDatabasePython.restype = c_char_p\n\n self.calLib.createBasicCalendarPython.restype = c_void_p\n self.calLib.createBasicCalendarPython.argtypes = [c_float, c_char_p]\n\n self.calLib.addEventPython.argtypes = [c_void_p, c_char_p, c_char_p, c_char_p]\n self.calLib.safelyFreeString.argtypes = [c_void_p]\n\n def addEvent(self, calPointer, uid, date, start):\n self.calLib.addEventPython(calPointer, self.encode(uid), self.encode(date), self.encode(start))\n\n def createBasicCalendar(self, version, prodId):\n return self.calLib.createBasicCalendarPython(c_float(version), self.encode(prodId))\n\n def writeCalendar(self, fileName, calPointer):\n response = self.calLib.writeCalendar(self.encode(fileName), calPointer)\n return self.printError(response)\n\n def printError(self, error):\n charp = self.calLib.printError(error)\n humanReadable = charp.decode()\n # self.calLib.safelyFreeString(charp)\n return humanReadable\n\n def openCalendar(self, fileName):\n return self.calLib.openCalendarPython(self.encode(fileName))\n\n def validateCalendar(self, fileName):\n response = self.calLib.validateCalendarPython(self.encode(fileName))\n return self.printError(response)\n\n def getCalendarComponents(self, calPointer):\n response = self.calLib.getCalendarComponentsPython(calPointer)\n components = response.decode().split(\"\\\"\\\\\")\n parsedComponents = []\n for component in components:\n if component == '':\n continue\n parsedComponents.append(component.split(\"\\\\\\\"\")) # Add this component to the list\n return parsedComponents\n\n def deleteCalendar(self, calPointer):\n self.calLib.deleteCalendar(calPointer)\n\n def getComponentAlarms(self, calPointer, compNum):\n response = self.calLib.getComponentAlarmsPython(calPointer, c_int(compNum))\n if response is None:\n return None\n return \"Component Alarms:\\n\" + response.decode()\n\n def getComponentProperties(self, calPointer, compNum):\n response = self.calLib.getComponentPropsPython(calPointer, c_int(compNum))\n if response is None:\n return None\n return \"Component Properties:\\n\" + response.decode()\n\n def getComponentPropertiesDatabase(self, calPointer, compNum):\n response = self.calLib.getComponentPropertiesDatabasePython(calPointer, c_int(compNum))\n if response is None:\n return None\n return response.decode()\n\n def encode(self, string):\n return string.encode('utf-8')\n\n def validateVersion(self, version):\n response = self.calLib.validateVersionPython(self.encode(version))\n if response == 1:\n return True\n return False\n\n def matchDATEField(self, date):\n response = self.calLib.matchDATEField(self.encode(date))\n if response == 1:\n return True\n return False\n\n def matchTEXTField(self, text):\n response = self.calLib.matchTEXTField(self.encode(text))\n if response == 1:\n return True\n return False\n","sub_path":"bin/cal/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"361059728","text":"import os\nimport tensorflow as tf\nimport tensorflow_probability as tfp\ntfd = tfp.distributions\nimport numpy as np\nimport functools\nimport tfmpl\nimport mnist_data\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ntf.app.flags.DEFINE_enum(\"algo\", \"nis_vae_proposal\",\n [\"nis_vae_proposal\", \"nis_gaussian_proposal\", \n \"vae_gaussian_prior\", \"vae_nis_prior\"],\n \"Algorithm to run.\")\ntf.app.flags.DEFINE_integer(\"latent_dim\", 50,\n \"Dimension of the latent space of the VAE.\")\ntf.app.flags.DEFINE_integer(\"K\", 128,\n \"Number of samples for NIS model.\")\ntf.app.flags.DEFINE_float(\"scale_min\", 1e-5,\n \"Minimum scale for various distributions.\")\ntf.app.flags.DEFINE_float(\"learning_rate\", 3e-4,\n \"The learning rate to use for ADAM or SGD.\")\ntf.app.flags.DEFINE_integer(\"batch_size\", 4,\n \"The number of examples per batch.\")\ntf.app.flags.DEFINE_string(\"logdir\", \"/tmp/nis\",\n \"Directory for summaries and checkpoints.\")\ntf.app.flags.DEFINE_integer(\"max_steps\", int(1e6),\n \"The number of steps to run training for.\")\ntf.app.flags.DEFINE_integer(\"summarize_every\", int(1e3),\n \"The number of steps between each evaluation.\")\nFLAGS = tf.app.flags.FLAGS\n\ndef mlp(inputs, \n layer_sizes,\n hidden_activation=tf.math.tanh,\n final_activation=tf.math.log_sigmoid,\n name=None):\n \"\"\"Creates a simple multi-layer perceptron.\"\"\"\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n for i, s in enumerate(layer_sizes[:-1]):\n inputs = tf.layers.dense(inputs,\n units=s,\n activation=hidden_activation,\n kernel_initializer=tf.initializers.glorot_uniform,\n name=\"layer_%d\" % (i+1))\n output = tf.layers.dense(inputs,\n units=layer_sizes[-1],\n activation=final_activation,\n kernel_initializer=tf.initializers.glorot_uniform,\n name=\"layer_%d\" % (len(layer_sizes)+1))\n return output\n\ndef conditional_normal(\n inputs,\n data_dim,\n hidden_sizes,\n hidden_activation=tf.math.tanh,\n scale_min=1e-5,\n name=None):\n raw_params = mlp(inputs, \n hidden_sizes + [2*data_dim],\n hidden_activation=hidden_activation,\n final_activation=None,\n name=name)\n loc, raw_scale = tf.split(raw_params, 2, axis=-1)\n scale = tf.math.maximum(scale_min, tf.math.softplus(raw_scale))\n return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale)\n\ndef conditional_bernoulli(\n inputs,\n data_dim,\n hidden_sizes,\n hidden_activation=tf.math.tanh,\n name=None):\n bern_logits = mlp(inputs, \n hidden_sizes + [data_dim],\n hidden_activation=hidden_activation,\n final_activation=None,\n name=name)\n return tfd.Bernoulli(logits=bern_logits)\n\ndef vae(data,\n latent_dim,\n q_fn,\n prior_fn,\n generative_fn):\n batch_size = tf.shape(data)[0]\n data_dim = data.get_shape().as_list()[1]\n\n # Construct approximate posterior and sample z.\n q = q_fn(data)\n z = q.sample()\n log_q_z = q.log_prob(z)\n\n # compute the prior prob of z\n log_p_z = prior_fn(z)\n\n # Compute the model logprob of the data \n p_x_given_z = generative_fn(z)\n log_p_x_given_z = p_x_given_z.log_prob(data)\n\n elbo = log_p_z + log_p_x_given_z - log_q_z\n return elbo\n\ndef make_vae_with_nis_prior(\n data,\n latent_dim,\n K,\n nis_hidden_sizes,\n q_hidden_sizes,\n p_x_hidden_sizes,\n scale_min=1e-5):\n\n q_fn = functools.partial(\n conditional_normal,\n data_dim=latent_dim,\n hidden_sizes=q_hidden_sizes,\n scale_min=scale_min,\n name=\"q\")\n\n prior_fn = functools.partial(\n make_nis_lower_bound, \n K=K, \n hidden_layer_sizes=nis_hidden_sizes)\n\n generative_fn = functools.partial(\n conditional_normal,\n data_dim=data.get_shape().as_list()[1],\n hidden_sizes=p_x_hidden_sizes,\n scale_min=scale_min,\n name=\"vae_p_x\")\n\n return vae(data, latent_dim, q_fn, prior_fn, generative_fn)\n\ndef make_vae_with_gaussian_prior(\n data,\n latent_dim,\n q_hidden_sizes,\n p_x_hidden_sizes,\n scale_min=1e-5):\n\n dtype = data.dtype\n q_fn = functools.partial(\n conditional_normal,\n data_dim=latent_dim,\n hidden_sizes=q_hidden_sizes,\n scale_min=scale_min,\n name=\"q\")\n\n prior_fn = tfd.MultivariateNormalDiag(loc=tf.zeros([latent_dim], dtype=dtype),\n scale_diag=tf.ones([latent_dim], dtype=dtype)).log_prob\n\n generative_fn = functools.partial(\n conditional_normal,\n data_dim=data.get_shape().as_list()[1],\n hidden_sizes=p_x_hidden_sizes,\n scale_min=scale_min,\n name=\"vae_p_x\")\n\n return vae(data, latent_dim, q_fn, prior_fn, generative_fn)\n\ndef make_nis_with_gaussian_proposal(target_samples, K, hidden_layer_sizes):\n \"\"\"Constructs an NIS distribution for the given target samples and parameters.\n \n Args:\n target_samples: [batch_size, data_size]\n \"\"\"\n batch_size = tf.shape(target_samples)[0]\n data_size = tf.shape(target_samples)[1]\n dtype = target_samples.dtype\n proposal = tfd.MultivariateNormalDiag(loc=tf.zeros([data_size], dtype=dtype),\n scale_diag=tf.ones([data_size], dtype=dtype))\n proposal_samples = proposal.sample([batch_size, K]) #[batch_size, K, data_size]\n\n mlp_fn = functools.partial(\n mlp,\n layer_sizes=hidden_layer_sizes + [1],\n final_activation=None,\n name=\"nis_mlp\")\n\n log_energy_target = tf.reshape(mlp_fn(target_samples), [batch_size])\n log_energy_proposal = tf.reshape(mlp_fn(proposal_samples), [batch_size, K])\n\n proposal_lse = tf.reduce_logsumexp(log_energy_proposal, axis=1) # [batch_size]\n \n #[batch_size]\n denom = tf.reduce_logsumexp(tf.stack([log_energy_target, proposal_lse], axis=-1), axis=1)\n denom -= tf.log(tf.to_float(K+1))\n\n lower_bound = proposal.log_prob(target_samples) + log_energy_target - denom\n return lower_bound\n\ndef make_nis_with_vae_proposal(\n data,\n K,\n vae_latent_dim,\n nis_hidden_sizes,\n q_hidden_sizes,\n p_x_hidden_sizes,\n scale_min=1e-5,\n lr=1e-4):\n batch_size = data.get_shape().as_list()[0]\n data_size = data.get_shape().as_list()[1]\n dtype = data.dtype\n # Sample z_1:K from the VAE\n vae_prior = tfd.MultivariateNormalDiag(loc=tf.zeros([vae_latent_dim], dtype=dtype),\n scale_diag=tf.ones([vae_latent_dim], dtype=dtype))\n z = vae_prior.sample([batch_size, K]) #[batch_size, K, vae_latent_dim]\n # Use zs to sample xs\n p_x_given_z_fn = functools.partial(conditional_normal,\n data_dim=data_size, \n hidden_sizes=p_x_hidden_sizes, \n scale_min=scale_min, \n name=\"p_x_given_z\")\n p_x_given_z = p_x_given_z_fn(z)\n x = p_x_given_z.sample() #[batch_size, K, data_size]\n\n # compute lower bound on log prob of data\n q = conditional_normal(\n data, vae_latent_dim, q_hidden_sizes, scale_min=scale_min, name=\"q\")\n z_q = q.sample() # [batch_size, vae_latent_dim]\n p_x_given_z_q = p_x_given_z_fn(z_q)\n log_p_data_lb = vae_prior.log_prob(z_q) + p_x_given_z_q.log_prob(data) - q.log_prob(z_q)\n \n mlp_fn = functools.partial(\n mlp,\n layer_sizes=nis_hidden_sizes+ [1],\n final_activation=None,\n name=\"nis_mlp\")\n\n log_energy_target = tf.reshape(mlp_fn(data), [batch_size])\n log_energy_proposal = tf.reshape(mlp_fn(x), [batch_size, K])\n\n proposal_lse = tf.reduce_logsumexp(log_energy_proposal, axis=1) # [batch_size]\n \n #[batch_size]\n denom = tf.reduce_logsumexp(tf.stack([log_energy_target, proposal_lse], axis=-1), axis=1)\n denom -= tf.log(tf.to_float(K+1))\n\n lower_bound = log_p_data_lb + log_energy_target - denom\n return lower_bound\n\ndef make_log_hooks(global_step, elbo):\n hooks = []\n def summ_formatter(d):\n return (\"Step {step}, elbo: {elbo:.5f}\".format(**d))\n elbo_hook = tf.train.LoggingTensorHook(\n {\"step\": global_step, \"elbo\": elbo},\n every_n_iter=FLAGS.summarize_every,\n formatter=summ_formatter)\n hooks.append(elbo_hook)\n if len(tf.get_collection(\"infrequent_summaries\")) > 0:\n infrequent_summary_hook = tf.train.SummarySaverHook(\n save_steps=10000,\n output_dir=FLAGS.logdir,\n summary_op=tf.summary.merge_all(key=\"infrequent_summaries\")\n )\n hooks.append(infrequent_summary_hook)\n return hooks\n\ndef main(unused_argv):\n FLAGS.logdir = os.path.join(FLAGS.logdir, FLAGS.algo)\n g = tf.Graph()\n with g.as_default():\n\n data_batch, _, _ = mnist_data.get_mnist(\n batch_size=FLAGS.batch_size,\n split=\"train\")\n\n if FLAGS.algo == \"nis_vae_proposal\":\n print(\"Running NIS with VAE proposal\")\n elbo = make_nis_with_vae_proposal(\n data_batch,\n K=FLAGS.K,\n vae_latent_dim=FLAGS.latent_dim,\n nis_hidden_sizes=[200, 100],\n q_hidden_sizes=[300, 300],\n p_x_hidden_sizes=[300, 300],\n scale_min=FLAGS.scale_min)\n elif FLAGS.algo == \"nis_gaussian_proposal\":\n print(\"Running NIS with Gaussian proposal\")\n elbo = make_nis_with_gaussian_proposal(\n data_batch, \n K=FLAGS.K, \n hidden_layer_sizes=[200,100])\n elif FLAGS.algo == \"vae_nis_prior\":\n print(\"Running VAE with NIS prior\")\n elbo = make_vae_with_nis_prior(\n data_batch,\n latent_dim=FLAGS.latent_dim,\n K=FLAGS.K,\n nis_hidden_sizes=[200, 100],\n q_hidden_sizes=[300, 300],\n p_x_hidden_sizes=[300, 300],\n scale_min=FLAGS.scale_min)\n elif FLAGS.algo == \"vae_gaussian_prior\":\n print(\"Running VAE with gaussian prior\")\n elbo = make_vae_with_gaussian_prior(\n data_batch,\n latent_dim=FLAGS.latent_dim,\n q_hidden_sizes=[300, 300],\n p_x_hidden_sizes=[300, 300],\n scale_min=1e-5)\n\n # Finish constructing the graph\n elbo_avg = tf.reduce_mean(elbo)\n tf.summary.scalar(\"elbo\", elbo_avg)\n global_step = tf.train.get_or_create_global_step()\n opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)\n grads = opt.compute_gradients(-elbo_avg)\n train_op = opt.apply_gradients(grads, global_step=global_step)\n\n log_hooks = make_log_hooks(global_step, elbo_avg) \n\n with tf.train.MonitoredTrainingSession(\n master=\"\",\n is_chief=True,\n hooks=log_hooks,\n checkpoint_dir=FLAGS.logdir,\n save_checkpoint_secs=120,\n save_summaries_steps=FLAGS.summarize_every,\n log_step_count_steps=FLAGS.summarize_every) as sess:\n cur_step = -1\n while cur_step <= FLAGS.max_steps and not sess.should_stop():\n _, cur_step = sess.run([train_op, global_step])\n\nif __name__ == \"__main__\":\n tf.app.run(main)\n","sub_path":"nis.py","file_name":"nis.py","file_ext":"py","file_size_in_byte":11496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"131413039","text":"\"\"\"\nCode for comparing clustering redshifts to photo-z\n\"\"\"\nfrom __future__ import print_function, division\nfrom .comparator import Comparator\n\nimport numpy as np\nfrom scipy import stats\n\nclass ClusterComparator(Comparator):\n def __init__(self, z_bins, z, pz, cov, z_pz, pz_pz, z_min, z_max, weight_fit=1):\n \"\"\"Reference cluster redshifts are over z_bins and are pz with\n covariance cov normalized over range z_min to z_max\n\n Parameters\n ----------\n z_bins : array (Ntomo, Nbins,)\n Bins used for clustering redshifts\n\n z_pz : array (Ntomo, Npz,)\n clustering redshifts\n\n pz : array (Ntomo, Nbins-1)\n Clustering Redshift Estimates\n\n cov : array (Ntomo, Nbins-1, Nbins-1)\n Covariance matrix of clustering redshift estimates\n\n z_pz : array (Ntomo, Npz,)\n redshift of Pz estimate\n\n pz_pz : array (Ntomo, Npz)\n Pz estimates\n\n z_min : (Ntomo,)\n Lower normalization ranges\n\n z_max : (Ntomo,)\n Upper normalization ranges\n\n Notes\n -----\n Priors are all truncorm (mu, sigma, +-cutoff) :\n\n b : (0, 0.05, 0.4)\n k : (0, 0.1, 1)\n gamma : (0, 0.5, 3)\n \"\"\"\n self.z_bins = z_bins\n # self.z = [0.5 * (z_bins_i[1:] + z_bins_i[:-1])\n # for z_bins_i in self.z_bins]\n self.z = z\n # should probably check z_bins vs z here\n self.pz = pz\n self.cov = cov\n self.z_min = z_min\n self.z_max = z_max\n\n self.z_pz = z_pz\n self.pz_pz = pz_pz\n\n self.labels = np.array([['$b_{{{0}}}$'.format(i + 1),\n '$k_{{{0}}}$'.format(i + 1)]\n for i in range(len(self.pz))]).flatten().tolist()\n self.labels += ['$\\gamma$']\n self.prior = [stats.truncnorm(a=(-0.4 - 0) / 0.05, b=(0.4 - 0) / 0.05,\n loc=0, scale=0.05),\n stats.truncnorm(a=(-5 - 0) / 1., b=(5 - 0) / 1.,\n loc=0, scale=1.)] * len(self.pz) + \\\n [stats.uniform(loc=-8, scale=16)]\n # [stats.truncnorm(a=(-3 - 0) / 0.5, b=(3 - 0) / 0.5,\n # loc=0, scale=0.5)]\n\n self.weight_fit = weight_fit\n\n def distribution_clustering(self, z, pz, cov, p, z_min, z_max, **kwargs):\n \"\"\"Function that modifies the clustering redshift distribution\n\n Parameters\n ----------\n z : redshift centers\n pz : redshift distribution\n p : array\n Parameter values\n\n Returns\n -------\n z_prime : redshift centers\n pz_prime : redshift distribution\n cov_prime : shifted covariance matrix\n\n \"\"\"\n log_amplitude = p[1:-1:2]\n gamma = p[-1]\n\n z_prime = []\n pz_prime = []\n cov_prime = []\n for z_i, pz_i, cov_i, z_min_i, z_max_i, log_amplitude_i in zip(\n z, pz, cov, z_min, z_max, log_amplitude):\n z0 = 0.5 * (z_min_i + z_max_i)\n amplitude = np.exp(log_amplitude_i)\n mult = ((1. + z_i) / (1 + z0)) ** gamma * amplitude\n z_prime.append(z_i)\n pz_prime.append(pz_i * mult)\n cov_prime.append(cov_i * mult[None] * mult[:, None])\n\n return z_prime, pz_prime, cov_prime\n\n def distribution_photoz(self, z_bins, z, pz, p, **kwargs):\n \"\"\"Function that returns the shifted photo-z distribution\n\n Parameters\n ----------\n z : redshift centers\n pz : redshift distribution\n p : array\n Parameter values\n\n Returns\n -------\n z_prime : redshift centers\n pz_prime : redshift distribution\n\n \"\"\"\n bias = p[:-1:2]\n\n z_prime = []\n pz_prime = []\n\n for z_i, pz_i, z_bins_i, bias_i in zip(\n z, pz, z_bins, bias):\n z_prime_i, pz_prime_i = self.rebin(z_i - bias_i, pz_i, z_bins_i)\n z_prime.append(z_prime_i)\n pz_prime.append(pz_prime_i)\n\n return z_prime, pz_prime\n\n def distribution(self, p, z_bins, z, pz, cov, z_pz, pz_pz, z_min, z_max, **kwargs):\n \"\"\"Given parameter p values, return modified clustering and photo-z\n\n Parameters\n ----------\n p : array\n Parameter values\n\n Returns\n -------\n z_pz_p : photoz redshift centers\n pz_pz_p : photoz redshift distribution\n z_p : clustering redshift centers\n pz_p : clustering redshift distribution\n cov_p : clustering covariance matrix\n\n \"\"\"\n z_p, pz_p, cov_p = self.distribution_clustering(z, pz, cov, p, z_min, z_max)\n # get the photoz modifications\n z_pz_p, pz_pz_p = self.distribution_photoz(z_bins, z_pz, pz_pz, p)\n\n return z_pz_p, pz_pz_p, z_p, pz_p, cov_p\n\n def lnlike(self, p):\n \"\"\"Function that evaluates the goodness of fit of parameters\n\n Parameters\n ----------\n p : array\n Parameter values\n\n Returns\n -------\n chi squares\n \"\"\"\n\n z_pz_p, pz_pz_p, z_p, pz_p, cov_p = self.distribution(p,\n self.z_bins, self.z, self.pz, self.cov, self.z_pz, self.pz_pz,\n self.z_min, self.z_max)\n\n # get the modifications to the clustering redshifts\n chi2s = [self._chi2(pz_pz_p[i], pz_p[i], cov_p[i])\n for i in range(len(pz_p))]\n return -0.5 * sum(chi2s) * self.weight_fit\n","sub_path":"zbc/cluster_comparator.py","file_name":"cluster_comparator.py","file_ext":"py","file_size_in_byte":5633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"247547942","text":"\"\"\"change int to bigint for downloads in recent table\n\nRevision ID: c81b3715b9e5\nRevises: 9116cea0e0d7\nCreate Date: 2018-04-05 00:56:02.276823\n\n\"\"\"\n# flake8: noqa\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c81b3715b9e5'\ndown_revision = '9116cea0e0d7'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('recent', 'downloads',\n existing_type=sa.INTEGER(),\n type_=sa.BigInteger(),\n existing_nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('recent', 'downloads',\n existing_type=sa.BigInteger(),\n type_=sa.INTEGER(),\n existing_nullable=False)\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/c81b3715b9e5_.py","file_name":"c81b3715b9e5_.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"145383223","text":"r\"\"\"\nInterface for extracting data and generating images from Jmol readable files.\n\nJmolData is a no GUI version of Jmol useful for extracting data from files Jmol\nreads and for generating image files.\n\nAUTHORS:\n\n- Jonathan Gutow (2012-06-14): complete doctest coverage\n- Jonathan Gutow (2012-03-21): initial version\n\"\"\"\n\n#*******************************************************************************\n# Copyright (C) 2012 Jonathan Gutow (gutow@uwosh.edu)\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n# as published by the Free Software Foundation; either version 2 of\n# the License, or (at your option) any later version.\n# http://www.gnu.org/licenses/\n#*******************************************************************************\nfrom __future__ import print_function\n\nfrom sage.structure.sage_object import SageObject\n\nfrom sage.env import SAGE_LOCAL\nfrom sage.misc.temporary_file import tmp_filename\n\nimport subprocess\nimport os\n\nclass JmolData(SageObject):\n r\"\"\"\n .. todo::\n\n Create an animated image file (GIF) if spin is on and put data\n extracted from a file into a variable/string/structure to return\n \"\"\"\n def __init__(self):\n \"\"\"\n EXAMPLES:\n\n Create a JmolData object::\n\n sage: from sage.interfaces.jmoldata import JmolData\n sage: JData = JmolData()\n \"\"\"\n pass\n\n def is_jvm_available(self):\n \"\"\"\n Returns True if the Java Virtual Machine is available and False if not.\n\n EXAMPLES:\n\n Check that it returns a boolean::\n\n sage: from sage.interfaces.jmoldata import JmolData\n sage: JData = JmolData()\n sage: type(JData.is_jvm_available())\n \n \"\"\"\n try:\n version = subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)\n except (subprocess.CalledProcessError, OSError):\n return False\n\n import re\n java_version = re.search(\"version.*[1][.][78]\", version)\n return java_version is not None\n\n def export_image(self,\n targetfile,\n datafile, #name (path) of data file Jmol can read or script file telling it what to read or load\n datafile_cmd='script', #\"script\" or \"load\"\n image_type ='PNG', #PNG, JPG, GIF\n figsize=5,\n **kwds):\n r\"\"\"\n This executes JmolData.jar to make an image file.\n\n INPUT:\n\n - targetfile -- the full path to the file where the image\n should be written.\n\n - datafile -- full path to the data file Jmol can read or\n text of a script telling Jmol what to read or load.\n If it is a script and the platform is cygwin, the filenames in\n the script should be in native windows format.\n\n - datafile_cmd -- (default ``'script'``) ``'load'`` or ``'script'``\n should be ``\"load\"`` for a data file.\n\n - image_type -- (default ``\"PNG\"``) ``'PNG'`` ``'JPG'`` or ``'GIF'``\n\n - figsize -- number (default 5) equal to (pixels/side)/100\n\n OUTPUT:\n\n Image file, .png, .gif or .jpg (default .png)\n\n .. note::\n\n Examples will generate an error message if a functional Java Virtual Machine (JVM)\n is not installed on the machine the Sage instance is running on.\n\n .. warning::\n\n Programmers using this module should check that the JVM is\n available before making calls to avoid the user getting\n error messages. Check for the JVM using the function\n :meth:`is_jvm_available`, which returns True if a JVM is available.\n\n EXAMPLES:\n\n Use Jmol to load a pdb file containing some DNA from a web data\n base and make an image of the DNA. If you execute this in the\n notebook, the image will appear in the output cell::\n\n sage: from sage.interfaces.jmoldata import JmolData\n sage: JData = JmolData()\n sage: script = \"load =1lcd;display DNA;moveto 0.0 { -473 -713 -518 59.94} 100.0 0.0 0.0 {21.17 26.72 27.295} 27.544636 {0.0 0.0 0.0} -25.287832 64.8414 0.0;\"\n sage: testfile = tmp_filename(ext=\"DNA.png\")\n sage: JData.export_image(targetfile=testfile,datafile=script,image_type=\"PNG\") # optional -- java internet\n sage: print(os.path.exists(testfile)) # optional -- java internet\n True\n\n Use Jmol to save an image of a 3-D object created in Sage.\n This method is used internally by plot3d to generate static images.\n This example doesn't have correct scaling::\n\n sage: from sage.interfaces.jmoldata import JmolData\n sage: JData = JmolData()\n sage: D=dodecahedron()\n sage: from sage.misc.misc import SAGE_TMP\n sage: archive_name=os.path.join(SAGE_TMP, \"archive.jmol.zip\")\n sage: D.export_jmol(archive_name) #not scaled properly...need some more steps.\n sage: archive_native = archive_name\n sage: import sys\n sage: if sys.platform == 'cygwin':\n ....: from subprocess import check_output, STDOUT\n ....: archive_native = check_output(['cygpath', '-w', archive_native],\n ....: stderr=STDOUT).rstrip()\n sage: script = 'set defaultdirectory \"{0}\"\\n script SCRIPT\\n'.format(archive_native)\n sage: testfile = os.path.join(SAGE_TMP, \"testimage.png\")\n sage: JData.export_image(targetfile=testfile, datafile=script, image_type=\"PNG\") # optional -- java\n sage: print(os.path.exists(testfile)) # optional -- java\n True\n \"\"\"\n # Set up paths, file names and scripts\n jmolpath = os.path.join(SAGE_LOCAL, \"share\", \"jmol\", \"JmolData.jar\")\n target_native = targetfile\n import sys\n if sys.platform == 'cygwin':\n jmolpath = subprocess.check_output(['cygpath', '-w', jmolpath],\n stderr=subprocess.STDOUT).rstrip()\n target_native = subprocess.check_output(['cygpath', '-w', target_native],\n stderr=subprocess.STDOUT).rstrip()\n if (datafile_cmd != 'script'):\n datafile = subprocess.check_output(['cygpath', '-w', datafile],\n stderr=subprocess.STDOUT).rstrip()\n launchscript = \"\"\n if (datafile_cmd!='script'):\n launchscript = \"load \"\n launchscript = launchscript + datafile\n imagescript = \"write \" + image_type + \" \" + target_native + \"\\n\"\n\n sizeStr = \"%sx%s\" %(figsize*100,figsize*100)\n # Scratch file for Jmol errors\n scratchout = tmp_filename(ext=\".txt\")\n with open(scratchout, 'w') as jout:\n # Now call the java application and write the file.\n env = dict(os.environ)\n env['LC_ALL'] = 'C'\n env['LANG'] = 'C'\n subprocess.call([\"java\", \"-Xmx512m\", \"-Djava.awt.headless=true\",\n \"-jar\", jmolpath, \"-iox\", \"-g\", sizeStr,\n \"-J\", launchscript, \"-j\", imagescript],\n stdout=jout, stderr=jout, env=env)\n if not os.path.isfile(targetfile):\n raise RuntimeError(\"Jmol failed to create file %s, see %s for details\"%(repr(targetfile), repr(scratchout)))\n os.unlink(scratchout)\n","sub_path":"sage/src/sage/interfaces/jmoldata.py","file_name":"jmoldata.py","file_ext":"py","file_size_in_byte":7480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"282212588","text":"\"\"\"This module Creates a awesome Engendro bot.\"\"\"\nimport error_handler\nimport global_messages\nimport commands\nimport logging\nimport os\nimport sys\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n\n\nclass Engendro(object):\n \"\"\"Awesome random behaviour Telegram bot.\"\"\"\n def __init__(self, token):\n \"\"\"\n Args:\n config (obj): Configuration object parameter\n \"\"\"\n\n self.__updater = Updater(token=token)\n\n dispatcher = self.__updater.dispatcher\n dispatcher.add_handler(CommandHandler('uptime', commands.uptime))\n dispatcher.add_handler(CommandHandler('roll', commands.roll))\n\n dispatcher.add_handler(\n MessageHandler(Filters.text, global_messages.handle))\n\n dispatcher.add_error_handler(error_handler.handle)\n\n def start(self):\n \"\"\"Starts Engendro bot.\"\"\"\n self.__updater.start_polling(clean=True)\n self.__updater.idle()\n\n\nif __name__ == '__main__':\n # Config logger\n logging.basicConfig(\n level=logging.WARNING,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n if 'DEBUG' in os.environ and os.environ['DEBUG'] == '1':\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # Start bot\n if len(sys.argv) > 1:\n engendro = Engendro(sys.argv[1])\n elif 'BOT_TOKEN' in os.environ:\n engendro = Engendro(os.environ['BOT_TOKEN'])\n else:\n raise Exception(\"BOT_TOKEN not defined\")\n engendro.start()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"422004790","text":"#!/usr/bin/env python3\nimport re\nimport sys\nimport os\nfrom traceback import print_exception\nfrom openpyxl import load_workbook\nfrom pandas import DataFrame\nimport numpy as np\nimport pickle\n\n# Parses all experiment files (xls or xlsx format), looks for 'freezes' worksheet,\n# assembles into csv output.\n\n# --------- func defs -----------------\n\ndef parse_file(exp_file):\n\ttry:\n\t\twb = load_workbook(exp_file,read_only=True, data_only=True)\n\texcept:\n\t\treturn(None)\n\tif \"freezes\" in wb.sheetnames:\n\t\tws = wb['freezes']\n\t\treturn(ws)\n\telse:\n\t\treturn(None)\n\t\n\n# -------- end func defs ------------\n\nif __name__ == '__main__':\n# gets all experiment files\n\tpath = \"~/Desktop/experiments\"\n\tfiles = os.listdir(os.path.expanduser(path))\n\tfiles = [os.path.abspath(os.path.join(os.path.expanduser(path),x)) for x\n\t\t\t\t\t\tin files if x[-4:] == \".xls\" or\n\t\t\t\t\t\tx[-5:] == \".xlsx\"]\n#print(\"\\n\".join(files))\n\n\tdat = map(parse_file,files)\n\tres = DataFrame()\n\n\tfor i,n in enumerate(dat):\n\t\tprint(f'processing {i+1}/{len(files)} {files[i]}')\n\t\ttry:\n\t\t\tif n is None:\n\t\t\t\tcontinue\n\t\t\t# reads sheet by DataFrame\n\t\t\tdf = DataFrame([[c.value for c in r] for r in n.iter_rows()])\n\t\t\tdf.columns = df.iloc()[0,]\n\t\t\tdf.drop(index=0,axis=0,inplace=True)\n\t\t\ttemp = np.array(df.columns)\n#\t\tprint('cols',temp)\n#\t\tpickle.dump(temp,open('/Users/sku/Desktop/temp.pkl','wb'))\n\t\t\tmask = [x is None for x in temp]\n\t\t\tdrops = df.columns[mask]\n#\t\tprint('drops',drops)\n\t\t\tdf.drop(labels=drops,axis=1,inplace=True)\n\n\t\t\t# appends to res\n\t\t\tres = res.append(df,ignore_index=True, sort=True)\n\t\texcept:\n\t\t\tprint('iter',i)\n\t\t\tprint(df)\n\t\t\tprint('----------')\n\t\t\tprint(res)\n\t\t\traise\n\n\tres.dropna(how='all',inplace=True)\n#pickle.dump(res[['date','sampleID','type','location','notes']],\n#\topen('/Users/sku/Desktop/results.pkl','wb'))\n#res = res[['date','sampleID','type','location','notes']]\n\tres.to_excel(os.path.expanduser('~/Desktop/freezes.xlsx'),index=False,\n\t\tfreeze_panes=(1,0))\n#print(res.to_csv(None))\n\n\n#\tfor m in n.iter_rows():\n#\t\tif m is None:\n#\t\t\tcontinue\n#\t\tret = [str(x.value) for x in m]\n#\t\tif all([x == \"None\" for x in ret]):\n#\t\t\tnext\n#\t\telse:\n#\t\t\ttemp = ','.join(ret)\n#\t\t\tprint(temp)\n","sub_path":"compile_freezes.py","file_name":"compile_freezes.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"235788543","text":"# Initial node class\n\nclass Node: \n\t# simply initialize the class, with itself and the dataval\n\tdef __init__(self, dataval=None):\n\t\tself.dataval = dataval\n\t\t# this itself stores a node class vs a pointer to it in C\n\t\tself.nextval = None\n\nclass head:\n\t#head basically\n\tdef __init__(self):\n\t\tself.headval = None\n\t\t\nlist1 = head()\nlist1.headval = Node(\"Mon\")\ne2 = Node(\"Tue\")\ne3 = Node(\"Wed\")\n# Link first Node to second node\nlist1.headval.nextval = e2\n\n# Link second Node to third node\ne2.nextval = e3\n","sub_path":"linkedList/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"483016671","text":"try:\n import torch\n from pytorch_transformers import GPT2Tokenizer, GPT2LMHeadModel\nexcept ImportError:\n # No installation required if not using this function\n pass\n\nfrom nlpaug.model.lang_models import LanguageModels\n\n\nclass Gpt2(LanguageModels):\n SUBWORD_PREFIX = 'Ġ'\n\n def __init__(self, model_path='gpt2', device='cuda'):\n super().__init__()\n self.model_path = model_path\n self.device = device\n\n self.tokenizer = GPT2Tokenizer.from_pretrained(model_path)\n self.model = GPT2LMHeadModel.from_pretrained(model_path)\n self.model.to(device)\n self.model.eval()\n\n def id2token(self, _id):\n return self.tokenizer.decode(_id, clean_up_tokenization_spaces=True).strip()\n\n def predict(self, text, target_word=None, top_n=5):\n # Convert feature\n input_idxes = self.tokenizer.encode(text)\n input_idxes = torch.tensor(input_idxes, device=self.device).unsqueeze(0).repeat(1, 1)\n\n # Prediction\n with torch.no_grad():\n outputs = self.model(input_idxes)\n target_token_logits = outputs[0][0][-1] # GPT2 should only predict last token\n\n # Generate candidates\n candidate_ids, candidate_probas = self.prob_multinomial(target_token_logits, top_n=top_n + 20)\n results = self.get_candidiates(candidate_ids, candidate_probas, target_word, top_n)\n\n return results\n","sub_path":"nlpaug/model/lang_models/gpt2.py","file_name":"gpt2.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"90520510","text":"import pandas as pd\nimport os\nimport glob\nimport numpy as np\nfrom keras.models import load_model\nfrom keras.layers import Input, Dense\nfrom keras.models import Model\nimport keras.optimizers\nimport keras.callbacks\nfrom keras import regularizers\nfrom sklearn.model_selection import StratifiedShuffleSplit\nimport Saver\nimport Reader\nimport importlib\nimportlib.reload(Saver)\nimportlib.reload(Reader)\n\ngpu = input(\"GPU number: \")\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu\n\nname = input(\"Output dir: \")\nepochs = int(input(\"Epochs: \"))\nperiods = int(input(\"Period: \"))\nall_cls = int(input(\"All class? (0-1): \"))\nresume = int(input(\"Resume? (0-1): \")) \n\nspec_dir = \"../Surveys/css-sloan-x-match/CSDR1/specs/\"\nlabels_path = \"../Surveys/css-sloan-x-match/CSDR1/keys-cls.csv\"\nspec_dim = 3795\nmodel_dir = \"./models/\"+name+\"/\" \n\nauto = None\ninit_epoch = 0\n\nif resume==1:\n files = glob.glob(model_dir+\"*.hdf5\")\n path = max(files, key = os.path.getctime)\n auto = load_model(path)\n init_epoch = int(path.split(\".\")[-2])\n\nelse: \n units = input(\"Encoding dim: \")\n units = [int(u) for u in units.split(\",\")]\n reg = int(input(\"Reg to encoding? (0-1): \"))\n in_spec = Input(shape=(spec_dim,))\n layer = in_spec \n if len(units)>1:\n layer = Dense(units=units[-2], activation=\"relu\")(in_spec)\n if reg==1:\n reg_type = int(input(\"Reg type (1-2): \"))\n w = float(input(\"Reg weight: \"))\n if reg_type ==1:\n layer = Dense(units=units[-1], activation=\"relu\", activity_regularizer=regularizers.l1(w))(layer)\n else: \n layer = Dense(units=units[-1], activation=\"relu\", activity_regularizer=regularizers.l2(w))(layer)\n else: \n layer = Dense(units=units[-1], activation=\"relu\")(layer)\n if len(units)>1:\n layer = Dense(units=units[-2], activation=\"sigmoid\")(layer)\n out_spec = Dense(units=spec_dim, activation=\"sigmoid\")(layer)\n\n auto = Model(in_spec, out_spec)\n auto.compile(optimizer=\"Adam\", loss='mean_squared_error')\n\nauto.summary()\n\n\ndb = Reader.DB()\nif all_cls==1:\n db.build_db(spec_dir, down_sample=True, normed=True, labeled=True, labels_path=labels_path, down_cls=True)\nelse:\n db.build_db(spec_dir, down_sample=True, normed=True, labeled=True, labels_path=labels_path, one_class=5)\n\nsss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=0)\ntrain, test = None, None\nX_train, X_test = None, None\n\nfor train_index, test_index in sss.split(np.zeros(len(db.labels)), db.labels):\n train, test = train_index, test_index\n X_train, X_test = db.flux[train_index], db.flux[test_index]\n\nif not os.path.exists(model_dir):\n os.mkdir(model_dir)\nfile_path = model_dir+name+\".{epoch:02d}.hdf5\"\nmodel_checkpoint = keras.callbacks.ModelCheckpoint(file_path, period=periods)\n\nhis = auto.fit(X_train, X_train,\n epochs = init_epoch+epochs,\n shuffle = True,\n batch_size=len(X_train),\n validation_data=(X_test, X_test),\n initial_epoch = init_epoch,\n callbacks=[model_checkpoint],\n )\nhis_files = glob.glob(model_dir+\"*.npy\")\n\nif resume and his_files:\n path = max(his_files , key = os.path.getctime)\n his_last = np.load(path).item()\n loss = his_last[\"loss\"] + his.history[\"loss\"]\n val_loss = his_last[\"val_loss\"] + his.history[\"val_loss\"]\n his_all = {\"loss\":loss, \"val_loss\":val_loss}\n np.save(model_dir+name+\".npy\", his_all)\n\nelse:\n np.save(model_dir+name+\".npy\", his.history)\n","sub_path":"Base-line/Spec-features.py","file_name":"Spec-features.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"9044483","text":"\"\"\"\nStream helper for liquidpy\n\"\"\"\nimport io\nfrom .defaults import LIQUID_DEBUG_SOURCE_CONTEXT\n\ndef words_to_matrix(words):\n\t\"\"\"\n\tConvert words to matrix for searching.\n\tFor example:\n\t```\n\t['{%', '{%-', '{{'] => [\n\t\t{'{': 0}, 3 shares, 0 endings\n\t\t{'%': 1, '{': 1},\n\t\t{'-': 1}\n\t]\n\t```\n\t@params:\n\t\twords: The words to be converted\n\t@returns:\n\t\tThe converted matrix\n\t\"\"\"\n\tmatrix = [{} for _ in range(max(len(word) for word in words))]\n\tfor word in words:\n\t\tfor i, char in enumerate(word):\n\t\t\tmatrix[i].setdefault(char, 0)\n\t\t\tif i == len(word) - 1:\n\t\t\t\tmatrix[i][char] += 1\n\treturn matrix\n\nclass LiquidStream:\n\t\"\"\"\n\tThe stream helper for liquidpy\n\t\"\"\"\n\tdef __init__(self, stream):\n\t\t\"\"\"\n\t\tInitialize the stream\n\t\t@params:\n\t\t\tstream (Stream): A python stream\n\t\t\"\"\"\n\t\tself.stream = stream\n\t\tself.cursor = stream.tell()\n\n\t@staticmethod\n\tdef from_file(path):\n\t\t\"\"\"\n\t\tGet stream of a file\n\t\t@params:\n\t\t\tpath (str): The path of the file\n\t\t@returns:\n\t\t\tLiquidStream\n\t\t\"\"\"\n\t\treturn LiquidStream(io.open(path, mode = 'r', encoding = 'utf-8'))\n\n\t@staticmethod\n\tdef from_string(string):\n\t\t\"\"\"\n\t\tGet stream of a string\n\t\t@params:\n\t\t\tstring (str): The string\n\t\t@returns:\n\t\t\tLiquidStream\n\t\t\"\"\"\n\t\treturn LiquidStream(io.StringIO(string))\n\n\t@staticmethod\n\tdef from_stream(stream):\n\t\t\"\"\"\n\t\tGet stream of a stream\n\t\t@params:\n\t\t\tstream (Stream): A stream\n\t\t@returns:\n\t\t\tLiquidStream\n\t\t\"\"\"\n\t\treturn LiquidStream(stream)\n\n\tdef __del__(self):\n\t\t\"\"\"\n\t\tClose the stream when GC\n\t\t\"\"\"\n\t\tself.close()\n\n\tdef close(self):\n\t\t\"\"\"\n\t\tClose the stream\n\t\t\"\"\"\n\t\tif self.stream and not self.stream.closed:\n\t\t\tself.stream.close()\n\n\tdef next(self):\n\t\t\"\"\"\n\t\tRead next character from the stream\n\t\t@returns:\n\t\t\tstr: the next character\n\t\t\"\"\"\n\t\tret = self.stream.read(1)\n\t\tself.cursor += len(ret.encode('utf-8'))\n\t\treturn ret\n\n\tdef back(self):\n\t\t\"\"\"\n\t\tPut cursor 1-character back\n\t\t\"\"\"\n\t\tself.cursor -= 1\n\t\tself.stream.seek(self.cursor)\n\n\tdef rewind(self):\n\t\t\"\"\"\n\t\tRewind the stream\n\t\t\"\"\"\n\t\tself.stream.seek(0)\n\t\tself.cursor = 0\n\n\tdef eos(self):\n\t\t\"\"\"\n\t\tTell if the stream is ended\n\t\t@returns:\n\t\t\t`True` if it is else `False`\n\t\t\"\"\"\n\t\tnchar = self.next()\n\t\tif not nchar:\n\t\t\treturn True\n\t\tself.cursor -= 1\n\t\tself.stream.seek(self.cursor)\n\t\treturn False\n\n\tdef dump(self):\n\t\t\"\"\"\n\t\tDump the rest of the stream\n\t\t@returns:\n\t\t\tstr: The rest of the stream\n\t\t\"\"\"\n\t\treturn self.stream.read()\n\n\tdef readline(self):\n\t\t\"\"\"Read a single line from the stream\"\"\"\n\t\treturn self.stream.readline()\n\n\tdef get_context(self, lineno, context = LIQUID_DEBUG_SOURCE_CONTEXT, baselineno = 1):\n\t\t\"\"\"\n\t\tGet the line of source code and its context\n\t\t@params:\n\t\t\tlineno (int): Line number of current line\n\t\t\tcontext (int): How many lines of context to show\n\t\t@returns:\n\t\t\tlist: The formated code with context\n\t\t\"\"\"\n\t\tself.rewind()\n\t\tret = []\n\t\tmaxline = lineno + context\n\t\tnbit = len(str(maxline)) + 1\n\t\ti = baselineno\n\t\tline = self.readline()\n\t\twhile line:\n\t\t\tif i < lineno - context or i > maxline:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tret.append(\"{} {} {}\".format(\n\t\t\t\t\t'>' if i == lineno else ' ', (str(i) + '.').ljust(nbit), line.rstrip()))\n\t\t\ti += 1\n\t\t\tline = self.readline()\n\t\treturn ret\n\n\tdef split(self, delimiter, limit = 0, trim = True,\n\t\twraps = None, quotes = '\"\\'`', escape = '\\\\'):\n\t\t\"\"\"\n\t\tSplit the string of the stream\n\t\t@params:\n\t\t\tdelimiter (str): The delimiter\n\t\t\tlimit (int): The max limit of the split\n\t\t\ttrim (bool): Whether to trim each part or not\n\t\t\twraps (list): A list of paired wraps to skip of the delimiter is wrapped by them\n\t\t\tquotes (str): A series of quotes to skip of the delimiter is wrapped by them\n\t\t\tescape (str): The escape character to see if any character is escaped\n\t\t@returns:\n\t\t\tlist: The split strings\n\t\t\"\"\"\n\t\twraps = ['{}', '[]', '()'] if wraps is None else wraps\n\t\tpreceding, stop = self.until([delimiter], False, wraps, quotes, escape)\n\t\tret = [preceding.strip() if trim else preceding]\n\t\tnsplit = 0\n\t\twhile stop:\n\t\t\tnsplit += 1\n\t\t\tif limit and nsplit >= limit:\n\t\t\t\trest = self.dump()\n\t\t\t\tret.append(rest.strip() if trim else rest)\n\t\t\t\tbreak\n\t\t\tpreceding, stop = self.until([delimiter], False, wraps, quotes, escape)\n\t\t\tret.append(preceding.strip() if trim else preceding)\n\t\treturn ret\n\n\tdef until(self, words, greedy = True, wraps = None, quotes = '\"\\'`', escape = '\\\\'):\n\t\t\"\"\"\n\t\tGet the string until certain words\n\t\tFor example:\n\t\t```\n\t\ts = LiquidStream.from_string(\"abcdefgh\")\n\t\ts.until([\"f\", \"fg\"]) == \"abcde\", \"fg\"\n\t\t# cursor point to 'h'\n\t\ts.until([\"f\", \"fg\"], greedy = False) == \"abcde\", \"f\"\n\t\t# cursor point to 'g'\n\t\ts.until([\"x\", \"xy\"]) == \"abcdefg\", \"\"\n\t\t# cursor point to eos\n\t\t```\n\t\t@params:\n\t\t\twords (list): A list of words to search\n\t\t\tgreedy (bool): Whether do a greedy search or not\n\t\t\t\t- Only effective when the words have prefices in common. For example\n\t\t\t\t- ['abc', 'ab'], then abc will be matched first\n\t\t\twraps (list): A list of paired wraps to skip of the delimiter is wrapped by them\n\t\t\tquotes (str): A series of quotes to skip of the delimiter is wrapped by them\n\t\t\tescape (str): The escape character to see if any character is escaped\n\t\t@returns:\n\t\t\tstr: The string that has been searched\n\t\t\tstr: The matched word\n\t\t\"\"\"\n\t\t# pylint:disable=too-many-locals,too-many-nested-blocks,too-many-branches\n\t\twraps = ['{}', '[]', '()'] if wraps is None else wraps\n\t\tret = ''\n\t\tmatrix = words_to_matrix(words)\n\t\tlen_matrix = len(matrix)\n\t\tmatched_chars = ''\n\t\tmatched_candidate = None\n\t\twrap_opens = {wrap[0]:i for i, wrap in enumerate(wraps)\n\t\t\tif not any(wrap[0] in mat for mat in matrix) and \\\n\t\t\t\tnot any(wrap[1] in mat for mat in matrix)}\n\t\twrap_closes = {wraps[i][1]:i for wrap_open,i in wrap_opens.items()}\n\t\tquote_index = {quote:i for i, quote in enumerate(quotes)\n\t\t\tif not any(quote in mat for mat in matrix)}\n\t\twrap_flags = [0 for _ in range(len(wraps))]\n\t\tquote_flags = [False for _ in range(len(quotes))]\n\t\tescape_flags = False\n\t\tchar = self.next()\n\t\t#print('START', '-' * 10, matrix)\n\t\twhile True:\n\t\t\t#print(char, end = ' ')\n\t\t\tif not char:\n\t\t\t\treturn ret, matched_candidate\n\t\t\tif char == escape:\n\t\t\t\tif matched_candidate and escape not in matrix[len(matched_candidate)]:\n\t\t\t\t\tself.back()\n\t\t\t\t\treturn ret, matched_candidate\n\t\t\t\tescape_flags = not escape_flags\n\t\t\t\tret += matched_chars + char\n\t\t\t\tmatched_chars = ''\n\t\t\telif not escape_flags: # and char != escape\n\t\t\t\tif char in wrap_opens and not any(quote_flags):\n\t\t\t\t\twrap_flags[wrap_opens[char]] += 1\n\t\t\t\telif char in wrap_closes and not any(quote_flags):\n\t\t\t\t\twrap_flags[wrap_closes[char]] -= 1\n\t\t\t\telif char in quote_index and \\\n\t\t\t\t\tnot any(flag for i, flag in enumerate(quote_flags) if i != quote_index[char]):\n\t\t\t\t\t# make sure I am not quoted\n\t\t\t\t\tquote_flags[quote_index[char]] = not quote_flags[quote_index[char]]\n\t\t\t\tif sum(wrap_flags) > 0 or any(quote_flags):\n\t\t\t\t\tif matched_candidate:\n\t\t\t\t\t\tself.back()\n\t\t\t\t\t\treturn ret, matched_candidate\n\t\t\t\t\tret += matched_chars + char\n\t\t\t\t\tmatched_chars = ''\n\t\t\t\telse:\n\t\t\t\t\tlen_matched_chars = len(matched_chars)\n\t\t\t\t\tmatching_dict = matrix[len_matched_chars]\n\t\t\t\t\t#print(' ', len_matched_chars, matching_dict)\n\t\t\t\t\tif char in matching_dict:\n\t\t\t\t\t\tmatched_chars += char\n\t\t\t\t\t\tendings = matching_dict[char]\n\t\t\t\t\t\tif not greedy and endings:\n\t\t\t\t\t\t\treturn ret, matched_chars\n\t\t\t\t\t\tif endings:\n\t\t\t\t\t\t\tmatched_candidate = matched_chars\n\t\t\t\t\t\t\t#print('mm', matched_candidate)\n\t\t\t\t\t\t\tif len_matched_chars + 1 == len_matrix: # we have matched all chars\n\t\t\t\t\t\t\t\treturn ret, matched_chars\n\n\t\t\t\t\telif matched_candidate:\n\t\t\t\t\t\tself.back()\n\t\t\t\t\t\treturn ret, matched_candidate\n\t\t\t\t\telse:\n\t\t\t\t\t\tret += matched_chars + char\n\t\t\t\t\t\tmatched_chars = ''\n\t\t\telse: # char == escape or escape_flags\n\t\t\t\tescape_flags = False\n\t\t\t\tret += matched_chars + char\n\t\t\t\tmatched_chars = ''\n\n\t\t\tchar = self.next()\n","sub_path":"liquid/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":7774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"46013754","text":"import ROOT as root\nfrom rootpy.plotting import Hist, Canvas\nfrom math import log, exp\nimport array, sys\nimport numpy as np\n\nlLimits = [0, 6000]\n\ndTacDelays = {0:{'height':2.68665e-01, 'width':5.28788e-03},\n\t\t\t 8:{'height':3.06615e-01, 'width':5.19475e-03},\n\t\t\t 16:{'height':3.43537e-01, 'width':4.54747e-03},\n\t\t\t 24:{'height':3.82137e-01, 'width':5.52308e-03},\n\t\t\t 32:{'height':4.19476e-01, 'width':5.43046e-03},\n\t\t\t 64:{'height':5.32018e-01, 'width':4.95080e-03},\n\t\t\t 96:{'height':6.82914e-01, 'width':5.30571e-03},\n\t\t\t 112:{'height':7.57700e-01, 'width':4.51142e-03},\n\t\t\t }\n\n\nlDelays = sorted(dTacDelays.keys())\naDelays = array.array('f', lDelays)\naDelaysErr = array.array('f', [0 for i in xrange(len(lDelays))])\naHeight = array.array('f')\naHeightErr = array.array('f')\n\nfor delay in lDelays:\n\theight = dTacDelays[delay]['height']\n\theightErr = dTacDelays[delay]['width']\n\taHeight.append(height)\n\taHeightErr.append(heightErr)\n\n\n\nc1 = Canvas()\n\ngTacHeight = root.TGraphErrors(len(lDelays), aDelays, aHeight, aDelaysErr, aHeightErr)\ngTacHeight.SetTitle('')\ngTacHeight.GetXaxis().SetTitle('Delay [ns]')\ngTacHeight.GetXaxis().SetTitleOffset(1.12)\ngTacHeight.GetXaxis().SetRangeUser(-10, 120)\ngTacHeight.GetYaxis().SetTitle('TAC Height [V]')\n\nfCal = root.TF1('fG1', 'pol1', lLimits[0], lLimits[1])\n#fG1.FixParameter(0, 0)\n\nfCal.SetLineStyle(9)\nfCal.SetLineWidth(1)\n\n\ngTacHeight.Draw('ap')\ngTacHeight.Fit('fG1')\n\nfCal.Draw('same')\n\nslope = fCal.GetParameter(1)*1000\nslopeErr = fCal.GetParError(1)*1000\nintercept = fCal.GetParameter(0)*1000\ninterceptErr = fCal.GetParError(0)*1000\n\nsFitInfo1 = 'slope = %.2f +/- %.2f [mV/ns]' % (slope, slopeErr)\nsFitInfo2 = 'intercept = %.2f +/- %.2f [mV]' % (intercept, interceptErr)\npt1 = root.TPaveText(.2,.65,.6,.75,'blNDC')\ntext1 = pt1.AddText(sFitInfo1)\ntext2 = pt1.AddText(sFitInfo2)\npt1.SetTextColor(root.kBlue)\npt1.SetFillStyle(0)\npt1.SetBorderSize(0)\npt1.Draw('same')\n\n\n\n\n\n\n\nc1.Update()\n\n\nraw_input('Press enter to continue...')\n","sub_path":"calibrations/tac_cal/tac_delay.py","file_name":"tac_delay.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"390586015","text":"\n\n\nfrom config import data_path\nfrom config import db, cur\nimport os, traceback\nfrom update_jobs.cal_boardIndex import board_stock\nfrom update_jobs.cal_boardIndex import board_change\n\n\n\n\ndef get_board_new_date():\n file_path = os.path.join(data_path, 'index_data.csv')\n date_list = []\n with open(file_path, 'r', encoding='utf-8') as file:\n for content in file:\n item = content.split(',')\n date = item[0]\n date_list.append(date)\n\n return date_list[-1]\n\n\n\n\ndef get_update_date_list(last_date):\n sql = \"SELECT cal_date from trade_days where \" \\\n \"cal_date > '%s' and is_open = 1 order by cal_date asc\" % last_date\n trade_date = []\n try:\n cur.execute(sql)\n result = cur.fetchall()\n for item in result:\n date = item[0].strftime('%Y%m%d')\n trade_date.append(date)\n return trade_date\n except Exception:\n traceback.print_exc()\n\n\ndef update_board_change():\n last_date = get_board_new_date()\n date_list = get_update_date_list(last_date)\n board_list = board_stock.board_to_stock().keys()\n out_put = data_path + '/index_data.csv'\n with open('%s' % out_put, 'a', encoding='utf-8') as file:\n print(board_list)\n for date in date_list[:5]:\n change_line = []\n for board in board_list:\n change = board_change.board_date_change(board, date)\n print(date, board, change)\n change_line.append(str(change))\n change_str = ','.join(change_line)\n value = date + ',' + change_str + '\\n'\n print(value)\n file.write(value)\n\n\nif __name__ == \"__main__\":\n # r = get_update_date_list('20081010')\n r = update_board_change()\n print(r)","sub_path":"update_jobs/boards_update.py","file_name":"boards_update.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"645096336","text":"import math\n\n# Die n-stelligen Zahlen liegen zwischen 10**(n-1) und 10**n - 1.\n# Die größte n-te Potenz, die wir uns ansehen müssen, ist also 9**n.\n# Aus der Ungleichung 10**(n-1) <= 9**n erhalten wir\n# n <= (log(9) / log(10/9)) + 1 = 21,85434...\n# Also betrachten wir maximal 21-te Potenzen.\n\ndef numberOfDigits(n):\n sn = str(n)\n return len(sn)\n\ntotalCount = 0\n\nfor n in range(1, 21+1):\n count = 0\n for x in range(1, 9 + 1):\n p = x**n\n if numberOfDigits(p) == n:\n count += 1\n totalCount += count\n print(n, count)\n\nprint('Total count is', totalCount)\n","sub_path":"PY/063.py","file_name":"063.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"129804075","text":"import numpy as np\nimport cPickle as pickle\nfrom ..fitter.template import Template\nfrom scipy.interpolate import splev\nfrom ..dir_list import template_path\n\nMsun = 1.9891e33 #unit: gram\nMpc = 3.08567758e24 #unit: cm\nm_H = 1.6726219e-24 #unit: gram\nr0 = 1.1 # pc\n\nfp = open(template_path+\"Cat3d_H.tmplt\")\ntp_cat3d_H = pickle.load(fp)\nfp.close()\ntcat3d_H = Template(**tp_cat3d_H)\nwaveLim = [1.0, 1e4]\n\ndef Cat3d_H(a, h, N0, i, logL, DL, z, wave, frame=\"rest\", t=tcat3d_H, waveLim=waveLim):\n \"\"\"\n This function generates the modified CLUMPY torus radiation from Garcia-Gonzalez et al. 2017.\n\n Parameters\n ----------\n a : float\n \tThe index a of the radial dust cloud distribution power law.\n N0 : float\n The number of clouds along an equatorial line-of-sight.\n theta : degree\n The half-opening angle.\n i : degree\n Inclination angle.\n logL : float\n UV luminosity erg/s in log.\n DL : float\n The luminosity distance.\n z : float\n The redshift.\n wave : float array\n The wavelengths of the output flux.\n frame : string\n \"rest\" for the rest frame SED and \"obs\" for the observed frame.\n t : Template object\n The template of DL07 model provided by user.\n waveLim : list\n The min and max of the wavelength covered by the template.\n\n Returns\n -------\n flux : float array\n The flux density of the model.\n\n Notes\n -----\n None.\n \"\"\"\n fltr = (wave > waveLim[0]) & (wave < waveLim[1])\n if np.sum(fltr) == 0:\n return np.zeros_like(wave)\n para = [a, h, N0, i]\n if frame == \"rest\":\n idx = 2.0\n elif frame == \"obs\":\n idx = 1.0\n else:\n raise ValueError(\"The frame '{0}' is not recognised!\".format(frame))\n f0 = (1 + z)**idx * 10**(logL - 46) * (r0 / DL * 1e-6)**2\n flux = np.zeros_like(wave)\n flux[fltr] = f0 * t(wave[fltr], para) * 1e29 # unit: mJy\n return flux\n\n","sub_path":"sedfit/models/model_cat3d_H_onetau.py","file_name":"model_cat3d_H_onetau.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"513412133","text":"\n\n#calss header\nclass _AXIOM():\n\tdef __init__(self,): \n\t\tself.name = \"AXIOM\"\n\t\tself.definitions = [u'a statement or principle that is generally accepted to be true, but need not be so: ', u'a formal statement or principle in mathematics, science, etc., from which other statements can be obtained: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_axiom.py","file_name":"_axiom.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"366181802","text":"\"\"\"Motor Imagery contexts\"\"\"\n\nimport numpy as np\nfrom .base import WithinSubjectContext, BaseContext\nfrom mne import Epochs, find_events\nfrom mne.epochs import concatenate_epochs, equalize_epoch_counts\nfrom sklearn.model_selection import cross_val_score, LeaveOneGroupOut, KFold\n\nclass BaseMotorImagery(BaseContext):\n \"\"\"Base Motor imagery context\n\n\n Parameters\n ----------\n datasets : List of Dataset instances.\n List of dataset instances on which the pipelines will be evaluated.\n pipelines : Dict of pipelines instances.\n Dictionary of pipelines. Keys identifies pipeline names, and values\n are scikit-learn pipelines instances.\n fmin : float | None, (default 7.)\n Low cut-off frequency in Hz. If None the data are only low-passed.\n fmax : float | None, (default 35)\n High cut-off frequency in Hz. If None the data are only high-passed.\n\n See Also\n --------\n MotorImageryTwoClasses\n \"\"\"\n\n def __init__(self, datasets, pipelines, fmin=7., fmax=35.):\n self.fmin = fmin\n self.fmax = fmax\n super().__init__(datasets, pipelines)\n\n def _epochs(self, dataset, subjects, event_id):\n \"\"\"epoch data\"\"\"\n raws = dataset.get_data(subjects=subjects)\n raws = raws[0]\n ep = []\n # we process each run independently\n for raw in raws:\n\n # find events\n events = find_events(raw, shortest_event=0, verbose=False)\n\n # pick some channels\n raw.pick_types(meg=True, eeg=True, stim=False,\n eog=False, exclude='bads')\n\n # filter data\n raw.filter(self.fmin, self.fmax, method='iir')\n\n # epoch data\n epochs = Epochs(raw, events, event_id, dataset.tmin, dataset.tmax,\n proj=False, baseline=None, preload=True,\n verbose=False)\n ep.append(epochs)\n return ep\n\n\nclass MotorImageryMultiClasses(BaseMotorImagery, WithinSubjectContext):\n \"\"\"Motor Imagery for multi class classification\n\n Multiclass motor imagery context. Evaluation is done in Randomized KFold or\n LeaveOneGroupOut (depending on the group variable, can be run or session)\n with accuracy as a metric. Epochs count are equalized.\n\n Parameters\n ----------\n datasets : List of Dataset instances.\n List of dataset instances on which the pipelines will be evaluated.\n pipelines : Dict of pipelines instances.\n Dictionary of pipelines. Keys identifies pipeline names, and values\n are scikit-learn pipelines instances.\n fmin : float | None, (default 7.)\n Low cut-off frequency in Hz. If None the data are only low-passed.\n fmax : float | None, (default 35)\n High cut-off frequency in Hz. If None the data are only high-passed.\n\n See Also\n --------\n MotorImageryTwoClasses\n \"\"\"\n\n def prepare_data(self, dataset, subjects):\n \"\"\"Prepare data for classification.\"\"\"\n if len(dataset.event_id) < 3:\n # multiclass, pick two first classes\n raise(ValueError(\"Dataset %s only contains two classes\" %\n dataset.name))\n\n event_id = dataset.event_id\n epochs = self._epochs(dataset, subjects, event_id)\n groups = []\n full_epochs = []\n\n for ii, epoch in enumerate(epochs):\n epochs_list = [epoch[k] for k in event_id]\n # equalize for accuracy\n equalize_epoch_counts(epochs_list)\n ep = concatenate_epochs(epochs_list)\n groups.extend([ii] * len(ep))\n full_epochs.append(ep)\n\n epochs = concatenate_epochs(full_epochs)\n X = epochs.get_data()*1e6\n y = epochs.events[:, -1]\n groups = np.asarray(groups)\n return X, y, groups\n\n def score(self, clf, X, y, groups, n_jobs=1):\n \"\"\"get the score\"\"\"\n if len(np.unique(groups)) > 1:\n # if group as different values, use group\n cv = LeaveOneGroupOut()\n else:\n # else use kfold\n cv = KFold(5, shuffle=True, random_state=45)\n\n auc = cross_val_score(clf, X, y, groups=groups, cv=cv,\n scoring='accuracy', n_jobs=n_jobs)\n return auc.mean()\n\n\nclass MotorImageryTwoClasses(BaseMotorImagery, WithinSubjectContext):\n \"\"\"Motor Imagery for binary classification\n\n Binary motor imagery context. Evaluation is done in Randomized KFold or\n LeaveOneGroupOut (depending on the group variable, can be run or session)\n with AUC as a metric.\n\n Parameters\n ----------\n datasets : List of Dataset instances.\n List of dataset instances on which the pipelines will be evaluated.\n pipelines : Dict of pipelines instances.\n Dictionary of pipelines. Keys identifies pipeline names, and values\n are scikit-learn pipelines instances.\n fmin : float | None, (default 7.)\n Low cut-off frequency in Hz. If None the data are only low-passed.\n fmax : float | None, (default 35)\n High cut-off frequency in Hz. If None the data are only high-passed.\n\n See Also\n --------\n MotorImageryTwoClasses\n \"\"\"\n\n def prepare_data(self, dataset, subjects):\n \"\"\"Prepare data for classification.\"\"\"\n\n if len(dataset.event_id) > 2:\n # multiclass, pick two first classes\n raise(ValueError(\"Dataset %s contain more than two classes\" %\n dataset.name))\n\n event_id = dataset.event_id\n epochs = self._epochs(dataset, subjects, event_id)\n groups = []\n for ii, ep in enumerate(epochs):\n groups.extend([ii] * len(ep))\n epochs = concatenate_epochs(epochs)\n X = epochs.get_data()*1e6\n y = epochs.events[:, -1]\n y = np.asarray(y == np.max(y), dtype=np.int32)\n\n groups = np.asarray(groups)\n return X, y, groups\n\n def score(self, clf, X, y, groups, n_jobs=1):\n \"\"\"get the score\"\"\"\n if len(np.unique(groups)) > 1:\n # if group as different values, use group\n cv = LeaveOneGroupOut()\n else:\n # else use kfold\n cv = KFold(5, shuffle=True, random_state=45)\n\n auc = cross_val_score(clf, X, y, groups=groups, cv=cv,\n scoring='roc_auc', n_jobs=n_jobs)\n return auc.mean()\n","sub_path":"moabb/contexts/motor_imagery.py","file_name":"motor_imagery.py","file_ext":"py","file_size_in_byte":6391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"129639445","text":"#!/usr/bin/env python\n\nimport time\nimport logging\nimport threading\n\nimport kubernetes as kub\n\nfrom .multiproc import multiproc\n\nlogger = logging.getLogger(\"Monitor.{}\".format(__name__))\n\n\nclass kubernetes_watch(multiproc):\n\n def __init__(self, settings, **args):\n multiproc.__init__(self, settings, **args)\n\n # pods/nodes\n self.watch = self.getconfig(\"watch\", default='pods')\n self.timeout = self.getconfig(\"timeout\", default=30)\n # {'label_selector': 'app=test'}\n # {'field_selector': 'metadata.namespace=test'}\n self.selectors = self.getconfig(\"selectors\", default={})\n\n self.functions = self.getconfig(\"functions\", default=[{'fnc': 'print',\n 'args': [],\n 'kwargs': {}\n }])\n\n self.data = {}\n self.output = ''\n self.error = ''\n self.status = 0\n self.stop_all_threads = False\n\n def watch_kub(self):\n\n try:\n kub.config.load_incluster_config()\n logger.info('Loaded from in cluster config')\n except Exception:\n kub.config.load_kube_config()\n logger.info('Loaded from kube config')\n\n self.kub_api = kub.client.CoreV1Api()\n\n if self.watch == 'nodes':\n watch = self.kub_api.list_node\n elif self.watch == 'pods':\n watch = self.kub_api.list_pod_for_all_namespaces\n elif self.watch == 'namespaces':\n watch = self.kub_api.list_namespace\n\n for event in kub.watch.Watch().stream(watch, _request_timeout=self.timeout, **self.selectors):\n # et = event['type']\n # node = event['object'].metadata.name\n # raw_object = event['raw_object']\n yield event\n\n def watcher(self):\n\n for fnc in self.functions:\n\n fnc_name = 'kubernetes_{}'.format(fnc['fnc'])\n fnc_path = 'hc.multiproc.kubernetes_fnc.{}'.format(fnc_name)\n\n fnc['fnc_imported'] = vars(__import__(fnc_path, fromlist=['']))[fnc_name]()\n\n for event in self.watch_kub():\n logger.debug(event['type'])\n for fnc in self.functions:\n self.data[fnc.get('name', fnc['fnc'])] = fnc['fnc_imported'].resolve(\n self.kub_api, event, fnc.get('args', []), fnc.get('kwargs', {}))\n\n def ticker(self):\n\n while 1:\n self.proc_save(self.status, self.data, self.output, self.error)\n if self.stop_all_threads:\n return\n\n def run_proc(self):\n\n self.proc_init()\n\n threads = []\n for fnc in [self.watcher, self.ticker]:\n thread = threading.Thread(target=fnc)\n threads.append(thread)\n thread.start()\n\n while 1:\n logger.debug([thr.is_alive for thr in threads])\n for thr in threads:\n if not thr.is_alive():\n logger.error('Stopping all threads.')\n self.stop_all_threads = True\n return\n\n time.sleep(self._period)\n","sub_path":"hc/multiproc/kubernetes_watch.py","file_name":"kubernetes_watch.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"511147954","text":"# ------------------------------------------------\n# terminal_effects.py\n#\n# terminal_effects\n# Author: \tEvan Wilde \n# Date:\t\tAug 05 2014\n# ------------------------------------------------\n'''\nGives access to some simple commandline effects\n'''\n\n\nclass TerminalColors:\n '''\n Class conaining some terminal colors\n '''\n WARNING = '\\033[93m'\n FAILURE = '\\033[91m'\n END = '\\033[0m'\n\n def disable(self):\n self.WARNING = ''\n self.FAILURE = ''\n self.END = ''\n\n\nclass CException(Exception):\n '''\n Throws colorful exceptions\n '''\n def __init__(self, message, values, critical=True):\n '''\n Create an exception\n '''\n self.critical = critical\n if critical:\n self.message = TerminalColors.FAILURE + \"\" + message\n else:\n self.message = TerminalColors.WARNING + \"\" + message\n self.values = values\n\n def is_critical(self):\n return self.critical\n\n def __str__(self):\n '''\n Throw message\n '''\n if self.values:\n if isinstance(self.values, str):\n values = self.values\n else:\n values = \"\"\n for elem in self.values:\n values += elem + \" \"\n return self.message + \" : \" + values + \"\" + TerminalColors.END\n else:\n return self.message + TerminalColors.END\n\n\nclass NotImplementedError(CException):\n '''\n Thrown when a function or method is not implemented\n '''\n def __init__(self):\n super().__init__(\"Not Implemented\", None)\n","sub_path":"src/terminal_effects.py","file_name":"terminal_effects.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"445909110","text":"#!/usr/bin/env python3\n\nimport hashlib\nfrom binascii import hexlify\nfrom itertools import product\nfrom sys import argv\nimport time \n\nPOSSIBLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\n# Number of characters to consider in the hash function, starting at the\n# beginning. e.g. 6 means we use the first 6 bytes of sha1.\nHASHCHARS = 12\n\n\n# Supported algoritms \nSUPORTED_ALGOS = [\"sha1\",\"sha224\",\"sha256\",\"sha384\",\"sha512\",\"md5\"]\n\n\n# Default ALGO\nALGO = \"sha1\"\n\ndef genhash(s):\n ''' Returns the shortened sha1 hash of s. If the input is bytes, they\n will be hashed directly; otherwise they will be encoded to ascii\n before being hashed.\n '''\n if type(s) is bytes:\n # Already encoded, just hash the bytes.\n if ALGO == \"sha1\":\n return hashlib.sha1(s).hexdigest()[:HASHCHARS]\n\n if ALGO == \"sha224\":\n return hashlib.sha224(s).hexdigest()[:HASHCHARS]\n\n if ALGO == \"sha256\":\n return hashlib.sha256(s).hexdigest()[:HASHCHARS]\n\n if ALGO == \"sha384\":\n return hashlib.sha384(s).hexdigest()[:HASHCHARS]\n\n if ALGO == \"sha512\":\n return hashlib.sha512(s).hexdigest()[:HASHCHARS]\n\n if ALGO == \"md5\":\n return hashlib.md5(s).hexdigest()[:HASHCHARS]\n else:\n # Convert it to ascii, then hash.\n if ALGO == \"sha1\":\n return hashlib.sha1(s.encode('ascii')).hexdigest()[:HASHCHARS]\n\n if ALGO == \"sha224\":\n return hashlib.sha224(s.encode('ascii')).hexdigest()[:HASHCHARS]\n\n if ALGO == \"sha256\":\n return hashlib.sha256(s.encode('ascii')).hexdigest()[:HASHCHARS]\n\n if ALGO == \"sha384\":\n return hashlib.sha384(s.encode('ascii')).hexdigest()[:HASHCHARS]\n\n if ALGO == \"sha512\":\n return hashlib.sha512(s.encode('ascii')).hexdigest()[:HASHCHARS]\n\n if ALGO == \"md5\":\n return hashlib.md5(s.encode('ascii')).hexdigest()[:HASHCHARS]\n\n\n\n\ndef show(orig_str, collision_str, duration):\n ''' Print the original string, the collision string, and then recompute\n the hashes of each of them and print those, to prove that we found\n a collision.\n '''\n # Do the encoding to ascii for bytes output\n orig_ascii = orig_str.encode('ascii')\n collision_ascii = collision_str.encode('ascii')\n\n # Print stuff.\n print()\n print('Collision found!')\n\n print()\n print(\"%r bytes collision found using %r algorithm in %r seconds\" % (HASHCHARS,ALGO,duration))\n print (\"Validate it\")\n print ()\n print (\"echo -n %r | %r | cut -c1-%d && echo -n %r | %r | cut -c1-%r\" %(orig_str,ALGO+\"sum\",HASHCHARS,collision_str,ALGO+\"sum\",HASHCHARS))\n\n\n\n\n print()\n print(orig_str \n + ' (bytes: ' + str(hexlify(orig_ascii)) + ')'\n + ' hashes to ' + str(genhash(orig_ascii))\n + ', but ' + collision_str\n + ' (bytes: ' + str(hexlify(collision_ascii)) + ')'\n + ' also hashes to ' + str(genhash(collision_ascii)))\n\ndef is_collision(trial, orig_hash):\n ''' Returns true if the hash of trial is the same as orig_hash.\n '''\n h = genhash(trial)\n return h == orig_hash\n\ndef collide(startnumber):\n ''' Search for collisions in the hash. Start with the possible match\n at index startnumber and look for collisions by searching upward\n from there.\n Note that this means if you choose a large value (e.g. 400000) this\n will not look for collisions on possibilities 0 <= x <= 400001, so\n choose a low number unless you want this to run for quite a while.\n '''\n start = int(time.time())\n\n # Iterator that yields possible characters.\n possible = product(POSSIBLE, repeat=100)\n\n # Iterate over the product until we reach the specified startnumber\n for i in range(startnumber):\n possible.__next__()\n\n # This is our collision target\n orig = ''.join([e for e in possible.__next__()]).lstrip('0')\n orig_hash = genhash(orig)\n\n # Iterate over the possible options\n for trial in possible:\n\n # Convert the tuple from itertools.product into a string\n trial = ''.join([e for e in trial])\n # Strip the leading zeros (who cares about zeros!)\n trial = trial.lstrip('0')\n \n # Exit if we found a collision\n if is_collision(trial, orig_hash):\n duration = int(time.time()) - start\n show(orig, trial, duration)\n break\n\nif __name__ == '__main__':\n if len(argv) > 1:\n n = int(argv[1])\n if len(argv) > 2:\n HASHCHARS=int(argv[2])\n if len(argv) > 3:\n if argv[3] not in SUPORTED_ALGOS:\n print()\n print (\"Using default algo sha256\")\n ALGO = argv[3]\n collide(n)\n else:\n print('Please pass an integer as the argument to this program.')\n print()\n print('This number will be used as the starting offset on the string')\n print('search function. It is recommended not to use something like')\n print('\"0\", since this will end up searching for collisions on the')\n print('empty string (\"\").')\n print()\n print('e.g.:')\n print(' $ python3 sha.py 300 sha1')\n print(' $ python3 sha.py 300 sha224')\n print(' $ python3 sha.py 300 sha256')\n print(' $ python3 sha.py 300 sha384')\n print(' $ python3 sha.py 300 sha512')\n print(' $ python3 sha.py 300 md5')\n print()\n print('This will search for collisions on the string \"4Q\", the 301st')\n print('iteration of our cartesian product search ALGOrithm on a')\n print('subset of the ASCII alphabet.')\n\n","sub_path":"colide.py","file_name":"colide.py","file_ext":"py","file_size_in_byte":5710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"320263305","text":"import os, json\nfrom os import path as osp\nfrom ffmpy import FFmpeg\nimport csv\n\nclassIndFile=open('soccerdb_class_ind.json')\nclassIndJSON=json.load(classIndFile)\nclassIndFile.close()\nsoccerdbVideosPath='/media/opekta/CopaEurope/SoccerDB/videos'\ntargetFolder='/home/opekta/copaeurope/mmaction2/data/soccernet/extractedFrames'\n\noutputExtension='.jpg'\n\nwith open('soccerdb_seg_info.csv', newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in spamreader:\n if (row[0][:6]=='seg_id'):\n continue\n # print('\\n',row)\n # print(len(row))\n splitted=[]\n for rowPart in row:\n element=rowPart.split(',')\n for x in element:\n splitted.append(x)\n # print(splitted)\n # print(len(splitted))\n if (splitted[1][-4:] != '.mp4'):\n continue\n event_labels=[]\n for i in range(6, len(splitted)-1):\n event_labels.append(splitted[i])\n if (classIndJSON['events'][splitted[i]] == \"Penalty\"):\n print(classIndJSON['events'][splitted[i]])\n start=3600*int(splitted[4][:2])+60*int(splitted[4][3:5])+int(splitted[4][6:8])\n print(start)\n end=3600*int(splitted[5][:2])+60*int(splitted[5][3:5])+int(splitted[5][6:8])\n print(end)\n print(splitted[1][:-4],'\\n')\n for root, dirs, files in os.walk(soccerdbVideosPath):\n for file in files:\n if (file[:30] == splitted[1][:-4]):\n videoPath=osp.join(root,file)\n targetVideoFolder=osp.join(targetFolder,classIndJSON['events'][splitted[i]]+'/'+file[:30])\n os.makedirs(targetVideoFolder, exist_ok=True)\n targetImage = osp.join(targetVideoFolder, \"img_%05d\"\n + outputExtension)\n print(videoPath)\n print(targetImage)\n ff = FFmpeg(\n inputs={videoPath: ['-y', '-ss',\n str(start), '-to', str(end)\n ]},\n outputs={targetImage: ['-qmin', '1', '-qscale:v', '2', '-vf', 'scale=398:224']}\n )\n ff.run()\n","sub_path":"tools/data/soccernet/extract_actions_from_soccerdb.py","file_name":"extract_actions_from_soccerdb.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"604301922","text":"import graphene\nfrom graphql_relay.node.node import from_global_id\n\nfrom accounts.decorators import login_required\nfrom accounts.permissions import has_permission\nfrom db.models_graphql import Document\nfrom backend.mutations import Mutation\nfrom .models import Vote as VoteModel\nfrom .models_graphql import Vote, Voting\n\n\ndef _set_vote(input, request, info):\n gid_type, gid = from_global_id(input.get('parent'))\n parent = Document._meta.model.objects.get(pk=gid)\n\n try:\n vote = VoteModel.objects.get(parent=parent,\n author=request.user.document)\n except VoteModel.DoesNotExist:\n vote = VoteModel(parent=parent, author=request.user.document)\n\n vote.value = input.get('value')\n vote.save(request=request)\n\n voting = Voting.get_node(info, id=gid)\n\n return {\n 'vote': vote,\n 'voting': voting\n }\n\n\nclass VoteSet(Mutation):\n class Input:\n parent = graphene.ID(required=True)\n value = graphene.Int(required=True)\n\n vote = graphene.Field(Vote)\n voting = graphene.Field(Voting)\n\n @classmethod\n @login_required\n def mutate_and_get_payload(cls, root, info, **input):\n return VoteSet(**_set_vote(input, info.context, info))\n\n\nclass VoteDelete(Mutation):\n class Input:\n id = graphene.ID(required=True)\n\n voteDeletedID = graphene.ID(required=True)\n voting = graphene.Field(Voting)\n\n @classmethod\n @login_required\n def mutate_and_get_payload(cls, root, info, **input):\n gid_type, gid = from_global_id(input.get('id'))\n vote = VoteModel.objects.get(document_id=gid,\n author=info.context.user.document)\n\n error = has_permission(cls, info.context, vote, 'delete')\n if error:\n return error\n\n parent_id = vote.parent_id\n vote.delete(request=info.context)\n\n voting = Voting.get_node(info, id=parent_id)\n\n return VoteDelete(voteDeletedID=input.get('id'), voting=voting)\n","sub_path":"voting/mutations.py","file_name":"mutations.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"543012981","text":"from random import randint\nfrom time import sleep\n\nhp_dragon = 100\nhp_player = 100\n\ndef health_status():\n global hp_dragon\n global hp_player\n print(f\"You have {hp_player} hp, the dragon now has {hp_dragon} hp.\")\n\ndef dragon():\n global hp_dragon\n global hp_player\n print(\"You now fight the dragon!...\")\n sleep(2)\n print(\"The dragon has three different types of attacks. Each is most successfully paraded by a move of yours...\")\n while (hp_dragon > 0 and hp_player > 0):\n attack = randint(0,2)\n if attack == 0:\n bite()\n input(\"> \")\n elif attack == 1:\n hit_w_tail()\n input(\"> \")\n elif attack ==2:\n spit_fire()\n input(\"> \")\n else:\n print(\"Error #1 this is not an attack of the dragon that should be possible\")\n print(f\"{hp_dragon} and {hp_player}\")\n\ndef bite():\n global hp_dragon\n global hp_player\n print(\"The dragon tries to bite you!\")\n choice = input(\"> How do you react? 'leap' backwards or do you 'block' the bite? \")\n if 'leap' in choice:\n print(\"You leap backwards!\")\n effect = randint(0,99)\n if effect < 70:\n print(\"Successfully! The dragon bites in the air, but you are already safely on the side. You can even stab the dragon and damage him!\")\n hp_dragon -= randint(10, 20)\n health_status() \n else:\n print(\"You leap backwards, but the dragon reaches you anyway, unlucky you!\")\n hp_player -= randint(15,20)\n health_status() \n elif 'block' in choice:\n print(\"You try to block the dragon's attack!\")\n effect = randint(0,99)\n if effect < 20:\n print(\"You block successfully! Wow! That is very difficult to achieve! ...You even give him a hit!\")\n hp_dragon -= randint(10, 20)\n health_status()\n else:\n print(\"You want to block the bite, but that's very difficult! You fail this time...The dragon hurts you!\")\n hp_player -= randint(20,30)\n health_status()\n else:\n print(\"You can choose between 'leap' backwards to avoid the bite or you try to 'block' the bite\")\n\ndef hit_w_tail():\n global hp_dragon\n global hp_player\n print(\"The dragon tries hit you with his tail!\")\n choice = input(\"> How do you react? 'stab' the tail with your sword or 'jump' on the tail? \")\n if 'stab' in choice:\n print(\"You try to stab the tail as it flies towards you!\")\n effect = randint(0,99)\n if effect > 49:\n print(\"You stab the tail right in the middle! Dragon blood spills on you as the dragon howls loudly\")\n hp_dragon -= randint(10,20)\n health_status()\n else:\n print(\"You try to stab the tail, but the dragon moves too fast! Instad of stabbing it, he hits you and slamms you against the wall!\")\n hp_player -= randint(10,20)\n health_status()\n elif 'jump' in choice:\n print(\"You try to jump on the tail!\")\n effect = randint(0,99)\n if effect > 85:\n print(\"You jump exactly at the right moment and land the the tail!! The dragon is furious! From there you can bury your sword deep in the beasts flesh!\")\n hp_dragon -= randint(30,35)\n health_status()\n else:\n print(\"You try to jump on the tail, but you slip and miss it...at least you don't get hurt\")\n health_status()\n else:\n print(\"You can 'stab' or 'jump' at this point.\")\n\ndef spit_fire():\n global hp_dragon\n global hp_player\n print(\"The dragon spits fire!\")\n choice = input(\"> How do you react? 'run' away or use a 'magic' spell to block it and throw the fire back at the dragon? \")\n if 'run' in choice:\n print(\"You run away! Smart move, dragon fire burns like hell!\")\n print(\"The dragon looks like he got a little bit weaker through his attack of his\")\n hp_dragon -= randint(1,5)\n health_status()\n elif 'magic' in choice:\n\n print(\"You use the magic spell!\")\n\n effect = randint(0,99)\n\n if effect < 20:\n\n print(\"A powerful white light shoots out of your wand! You block the fire and it heads back to the dragon, hurting him badly!\")\n\n hp_dragon -= randint(20,25)\n\n health_status()\n\n else:\n \n print(\"The spell and the fire hit on each other and with a loud 'bang!' both you and the dragon are being thrown backwards!\")\n\n hp_dragon -= 10\n\n hp_player -= 5\n\n health_status()\n\n else:\n print(\"You can 'run' or use a 'magic' spell at this point.\")\n\n#dragon()\n#bite()","sub_path":"monster.py","file_name":"monster.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"414126004","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 08 October, 2018\nTesting suite for Network export_to_shp function\n@author: J. Vicente Perez\n@email: geolovic@hotmail.com\n@date: 23 October, 2018\n\"\"\"\n\nfrom topopy import Network, DEM, Flow, Grid\nimport matplotlib.pyplot as plt\nimport numpy as np\ninfolder = \"../data/in\"\noutfolder = \"../data/out\"\n\ndem = DEM(infolder + \"/morocco.tif\")\nfd = Flow(dem)\nnet = Network(dem, fd, 1500)\n\nstreams = net.get_streams()\nstreams.save(outfolder + \"/canales_orig.tif\")\n\noutlet = np.array([579213, 504282]).reshape(1, 2)\n\nbasin = fd.get_drainage_basins(net.snap_points(outlet), asgrid=False)\nc1 = basin.max(axis=0).argmax()\nr1 = basin.max(axis=1).argmax()\nc2 = basin.shape[1] - np.fliplr(basin).max(axis=0).argmax()\nr2 = basin.shape[0] - np.flipud(basin).max(axis=1).argmax()\nbasin_cl = basin[r1:r2, c1:c2]\n\nnrow = basin_cl.shape[0]\nncol = basin_cl.shape[1]\noutgrid = Grid()\n\noutgrid._size = (ncol, nrow)\noutgrid._dims = (nrow, ncol)\ngeot = net._geot\nULx = geot[0] + geot[1] * c1\nULy = geot[3] + geot[5] * r1\noutgrid._geot = (ULx, geot[1], 0.0, ULy, 0.0, geot[5])\noutgrid._cellsize = geot[1]\noutgrid._proj = net._proj\noutgrid._ncells = basin_cl.size\noutgrid._nodata = 0\noutgrid._array = basin_cl\noutgrid._tipo = str(basin.dtype)\n\nfd.get_drainage_basins(net.snap_points(outlet)).save(outfolder + \"/basin_orig.tif\")\noutgrid.save(outfolder + \"/basin_test.tif\")\n\n\n# GET ONLY POINTS INSIDE BASIN\nbasin = basin.astype(np.bool).ravel()\nI = basin[net._ix]\n\nix = net._ix[I]\nixc = net._ixc[I]\n\n# Get grid channel cells\nw = np.zeros(net._ncells, dtype=np.int8)\nw[ix] = 1\nw[ixc] = 1\nw = w.reshape(net._dims)\n\nplt.imshow(w)\n\n#I = basin.astype(np.bool).ravel()[net._ix]\n#\n#ix = net._ix[I]\n#ixc = net._ixc[I] \n#\nrowix, colix = net.ind_2_cell(ix)\nnrowix = rowix - r1\nncolix = colix - c1\nnewix = outgrid.cell_2_ind(nrowix, ncolix)\n\n#rowixc, colixc = net.ind_2_cell(ixc)\n#rowixc -= r1\n#colixc -= c1\n#newixc = outgrid.cell_2_ind(rowixc, colixc)\n#\nw = np.zeros(basin_cl.size, dtype=np.int8)\nw[newix] = 1\n#w[newixc] = 1\nw = w.reshape(outgrid._dims)\n\nstreams = Grid()\nstreams.copy_layout(outgrid)\nstreams._nodata = 0\nstreams._array = w\nstreams._tipo = str(w.dtype)\n\nstreams.save(outfolder + \"/canales_prueba.tif\")\n\n\n","sub_path":"test/temp/DBasin_class_test.py","file_name":"DBasin_class_test.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"453103687","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 8 20:33:45 2019\n\n@author: xiaoe\n\"\"\"\n\nimport json\nimport random\nimport sys\n\nimport numpy as np\n\n#### Define Cost Function\nclass CrossEntropyCost(object):\n \n # 静态方法\n @staticmethod\n def fn(a, y):\n \"\"\"计算代价函数C\"\"\"\n return np.sum(np.nan_to_num(-y * np.log(a) - (1-y) * np.log(1 - a)))\n \n @staticmethod\n def delta(a, y):\n \"\"\"计算delta(L)\"\"\"\n return (a-y)\n \n#### Main Network Class\nclass Network(object):\n \n def __init__(self, sizes, cost = CrossEntropyCost):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.default_weight_initializer()\n self.cost = cost\n self.traincostvalue = []\n self.evaluationaccuracyvalue = []\n \n def default_weight_initializer(self):\n \"\"\"初始化weights和biases\"\"\"\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n self.weights = [np.random.randn(y, x) * np.sqrt(1/x) for x, y in zip(self.sizes[:-1], self.sizes[1:])]\n self.velocities = [np.zeros((y, x)) for x, y in zip(self.sizes[:-1], self.sizes[1:])]\n \n def large_weight_initializer(self):\n \"\"\"最初的初始化方法\"\"\"\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n self.weights = [np.random.randn(y, x) for x, y in zip(self.sizes[:-1], self.sizes[1:])]\n self.velocities = [np.zeros((y, x)) for x, y in zip(self.sizes[:-1], self.sizes[1:])]\n \n def feedforward(self, a):\n \"\"\"前向传播\"\"\"\n for b, w in zip(self.biases, self.weights):\n # z = w * a + b\n a = sigmoid(np.dot(w, a) + b)\n # 返回前向传播的最终结果的一个vector\n return a\n\n def SGD(self, training_data, epochs, mini_batch_size, eta, mu, \n lmbda = 0.0, evaluation_data = None,\n monitor_evaluation_cost = False, monitor_evaluation_accuracy = False,\n monitor_training_cost = False, monitor_training_accuracy = False, \n early_stopping_n = 0):\n \n if evaluation_data:\n evaluation_data = list(evaluation_data)\n n_data = len(evaluation_data)\n \n # training_data的数据量\n n = len(training_data)\n \n # early stopping functionality\n best_accuracy = 0\n no_accuracy_change = 0\n \n # 分别储存evaluation_data的cost,accuracy和training_data的cost,accuracy\n evaluation_cost, evaluation_accuracy, training_cost, training_accuracy = [], [], [], []\n \n # 将training_data分割成mini_batch\n for j in range(epochs):\n # 将training_data乱序\n random.shuffle(training_data)\n # 将training_data分割为mini_batch,每个mini_batch中数据量为mini_batch_size,并存在list中\n mini_batches = [training_data[k:k+mini_batch_size] for k in range(0, n, mini_batch_size)]\n for mini_batch in mini_batches:\n self.update_mini_batch(mini_batch, eta, mu, lmbda, n)\n \n # 输出当前epochs\n print(\"Epoch %s training complete\" % j)\n # 在使用training_data训练时\n # 将当前training_data的cost加入training_cost\n if monitor_training_cost:\n cost = self.total_cost(training_data, lmbda)\n training_cost.append(cost)\n self.traincostvalue.append(cost)\n print(\"Cost on training data: {}\".format(cost))\n # 将当前training_data的accuracy加入training_accuracy\n if monitor_training_accuracy:\n accuracy = self.accuracy(training_data, convert = True)\n training_accuracy.append(accuracy)\n print(\"Accuracy on training data: {} / {}\".format(accuracy, n))\n \n # 在使用evaluation_data测试时\n # 将当前evaluation_data的cost加入evaluation_cost\n if monitor_evaluation_cost:\n cost = self.total_cost(evaluation_data, lmbda, convert = True)\n evaluation_cost.append(cost)\n print(\"Cost on evaluation data: {}\".format(cost))\n # 将当前evaluation_data的accuracy加入evaluation_accuracy\n if monitor_evaluation_accuracy:\n accuracy = self.accuracy(evaluation_data)\n evaluation_accuracy.append(accuracy)\n self.evaluationaccuracyvalue.append(accuracy/10000)\n print(\"Accuracy on evaluation data: {} / {}\".format(self.accuracy(evaluation_data), n_data))\n \n # Early stopping:\n if early_stopping_n > 0:\n if accuracy > best_accuracy:\n best_accuracy = accuracy\n no_accuracy_change = 0\n else:\n no_accuracy_change += 1\n # 当accuracy连续降低的次数达到early_stopping_n,则程序提前停止\n if no_accuracy_change == early_stopping_n:\n return evaluation_cost, evaluation_accuracy, training_cost, training_accuracy\n \n return evaluation_cost, evaluation_accuracy, training_cost, training_accuracy\n \n \n def update_mini_batch(self, mini_batch, eta, mu, lmbda, n):\n \"\"\"用momentum代替gradient decent\"\"\"\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n for x, y in mini_batch:\n delta_nabla_b, delta_nabla_w = self.backprop(x, y, lmbda, n)\n nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n # momentum\n # v = mu * v - eta * nabla_C\n self.velocities = [mu * v - (eta / len(mini_batch)) * nw for v, nw in zip(self.velocities, nabla_w)]\n # w = (1 - eta * lambda / n) * w + v\n self.weights = [(1 - eta * lmbda / n) * w + v for w, v in zip(self.weights, self.velocities)]\n #self.weights = [w + v for w, v in zip(self.weights, self.velocities)]\n # b = b - eta * nabla_C\n self.biases = [b - (eta / len(mini_batch)) * nb for b, nb in zip(self.biases, nabla_b)]\n \n def backprop(self ,x, y, lmbda, n):\n \"\"\"backpropagation\"\"\"\n nabla_b = [np.shape(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n # feedforward\n # input\n activation = x\n # list to store all the activations, layer by layer\n activations = [x]\n # list to store all the z vectors, layer by layer\n zs = []\n for b, w in zip(self.biases, self.weights):\n # z = w * a + b\n z = np.dot(w, activation) + b\n # z值加入list\n zs.append(z)\n # a = sigmoid(z)\n activation = sigmoid(z)\n # a值加入list\n activations.append(activation)\n # backward pass\n # delta(L) = a - y\n delta = (self.cost).delta(activations[-1], y)\n # nabla_b(L) = delta(L)\n nabla_b[-1] = delta\n # nabla_w(L) = delta(L) * a(L-1).transpose()\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\n # 从倒数第二层开始\n for l in range(2, self.num_layers):\n z = zs[-l]\n sp = sigmoid_prime(z)\n delta = np.dot(self.weights[-l+1].transpose(), delta) * sp\n nabla_b[-l] = delta\n nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())\n return (nabla_b, nabla_w)\n \n def accuracy(self, data, convert = False):\n \"\"\"Return the number of inputs in ``data`` for which the neural\n network outputs the correct result. The neural network's\n output is assumed to be the index of whichever neuron in the\n final layer has the highest activation.\n The flag ``convert`` should be set to False if the data set is\n validation or test data (the usual case), and to True if the\n data set is the training data. \"\"\"\n if convert:\n # training data\n results = [(np.argmax(self.feedforward(x)), np.argmax(y)) for (x, y) in data]\n else:\n # validation or test data\n results = [(np.argmax(self.feedforward(x)), y) for (x, y) in data]\n result_accuracy = sum(int(x == y) for (x, y) in results)\n # 返回本次训练/测试回合的正确率\n return result_accuracy\n \n def total_cost(self, data, lmbda, convert = False):\n \"\"\"Return the total cost for the data set ``data``.The flag\n ``convert`` should be set to False if the data set is the\n training data (the usual case), and to True if the data set is\n the validation or test data. \"\"\"\n cost = 0.0\n for x, y in data:\n # a为前向传播最终结果的vector\n a = self.feedforward(x)\n if convert:\n # validation or test data\n y = vectorized_result(y)\n # 未正则化的cost\n cost += self.cost.fn(a, y) / len(data)\n # 正则化项\n cost += 1/2 * (lmbda / len(data)) * sum(np.linalg.norm(w) ** 2 for w in self.weights)\n # 返回本次训练/测试回合的总cost\n return cost\n\n#### Miscellaneous functions\ndef vectorized_result(j):\n \"\"\"Return a 10-dimensional unit vector with a 1.0 in the j'th position\n and zeroes elsewhere. This is used to convert a digit (0...9)\n into a corresponding desired output from the neural network.\n \"\"\"\n e = np.zeros((10, 1))\n e[j] = 1.0\n return e\n \ndef sigmoid(z):\n \"\"\"the sigmoid function\"\"\"\n return 1.0/(1.0 + np.exp(-z))\n\ndef sigmoid_prime(z):\n \"\"\"sigmoid的导数\"\"\"\n return sigmoid(z) * (1 - sigmoid(z))","sub_path":"my_network2_momentum.py","file_name":"my_network2_momentum.py","file_ext":"py","file_size_in_byte":9899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"212400700","text":"# -*- coding: utf-8 -*-\nimport os\nimport requests\n\nfrom django.conf import settings\n\n\ndef upload_file_to_group_album(album_id, caption, download_url, file, filename,\n group_id):\n upload_url = get_upload_url(album_id, group_id)\n with open(filename, \"wb\") as f:\n f.write(file)\n # Подготавливаем файл для отправки и отправляем его\n stream = open(filename, \"rb\")\n print((f'size: {os.path.getsize(filename)}'\n f'caption {caption} filename {filename} upload {download_url}'))\n files = {'file1': stream}\n response = requests.post(upload_url, files=files).json()\n # Подтверждаем сохранение файла и получаем его данные в виде json\n aid_ = response['aid']\n result_json = requests.get('https://api.vk.com/method/photos.save',\n params={\n 'access_token': settings.VK_ACCESS_TOKEN,\n 'album_id': aid_,\n 'group_id': response['gid'],\n 'server': response['server'],\n 'photos_list': response['photos_list'],\n 'caption': caption,\n 'hash': response['hash'],\n 'v': '5.92'\n }).json()\n print(result_json)\n\n\ndef get_upload_url(album_id, group_id):\n response = requests.get(\n 'https://api.vk.com/method/photos.getUploadServer', params={\n 'access_token': settings.VK_ACCESS_TOKEN,\n 'album_id': album_id,\n 'group_id': group_id,\n 'v': '5.52'\n }).json()\n server = response['response']['upload_url']\n return server\n","sub_path":"sync/api/vk.py","file_name":"vk.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"384028433","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 13 10:26:55 2017\n\n@author: 凯风\n\"\"\"\n\nimport numpy as np\n\ndef loadDataSet(filename):\n # 读取数据\n dataMat = []\n fr = open(filename)\n for line in fr.readlines():\n curLine = line.strip().split('\\t')\n fltLine = list(map(float,curLine)) # 将每行映射成浮点数\n dataMat.append(fltLine)\n return dataMat\n\ndef binSplitDataSet(dataSet,feature,value):\n # 将数据集按某个特征的某个值进行切分\n mat0 = dataSet[np.nonzero(dataSet[:,feature] > value)[0],:]\n mat1 = dataSet[np.nonzero(dataSet[:,feature] <= value)[0],:]\n return mat0,mat1\n\ndef regLeaf(dataSet):\n # 生成叶节点\n return np.mean(dataSet[:,-1])\n\ndef regErr(dataSet):\n # 误差估计\n return np.var(dataSet[:,-1]) * np.shape(dataSet)[0]\n\ndef chooseBestSplit(dataSet , leafType=regLeaf , errType=regErr , ops=(1,4)):\n # 核心,找到最佳的分割策略,二元的\n tolS = ops[0] # 误差下限\n tolN = ops[1] # 切分的最小样本数\n if len(set(dataSet[:,-1].T.tolist()[0])) == 1: # 目标变量值唯一,停止\n return None , leafType(dataSet)\n m,n = np.shape(dataSet)\n S = errType(dataSet) # 计算数据集误差\n bestS = np.inf\n bestIndex = 0\n bestValue = 0\n for featIndex in range(n-1): # 遍历每个特征\n for splitVal in set((dataSet[:, featIndex].T.A.tolist())[0]): # 遍历每个特征中的不同值\n mat0,mat1 = binSplitDataSet(dataSet,featIndex,splitVal) # 对每个特征进行二元切分\n if (np.shape(mat0)[0] < tolN) or (np.shape(mat1)[0] < tolN): # 切分后如果小于最小样本数就继续\n continue\n newS = errType(mat0) + errType(mat1) # 计算新的误差评估\n if newS < bestS:\n bestIndex = featIndex\n bestValue = splitVal\n bestS = newS\n if (S - bestS) < tolS: # 误差减少小于下限时,停止\n return None , leafType(dataSet)\n mat0,mat1 = binSplitDataSet(dataSet,bestIndex,bestValue)\n if (np.shape(mat0)[0] < tolN) or (np.shape(mat1)[0] < tolN): # 数据集数量过少时,停止\n return None , leafType(dataSet)\n return bestIndex,bestValue\n\n\ndef createTree(dataSet , leafType=regLeaf , errType=regErr , ops=(1,4)):\n # 树构建函数,参数有数据集、叶节点类型、错误类型、其他...\n feat,val = chooseBestSplit(dataSet,leafType,errType,ops) # 调用一个方法来划分数据集,如果是回归树,val是常熟、模型树val是一个线性方程\n if feat == None: # 满足条件则返回叶节点的值\n return val\n retTree = {} # 创建一个空的用来保存树结构的字典\n retTree['spInd'] = feat # 特征\n retTree['spVal'] = val # 特征值\n lSet,rSet = binSplitDataSet(dataSet,feat,val) # 根据特征和特征值,将数据集划分两瓣\n retTree['left'] = createTree(lSet,leafType,errType,ops) # 递归\n retTree['right'] = createTree(rSet,leafType,errType,ops)\n return retTree\n\n\ndef isTree(obj):\n # 判断传入的对象是不否是字典,返回布尔值\n return (type(obj).__name__=='dict')\n\ndef getMean(tree):\n # 塌陷处理?返回平均值\n if isTree(tree['right']): tree['right'] = getMean(tree['right'])\n if isTree(tree['left']): tree['left'] = getMean(tree['left'])\n return (tree['left']+tree['right'])/2.0\n \ndef prune(tree, testData):\n # 待剪枝的树和测试数据\n if np.shape(testData)[0] == 0: # 判断测试数据及是否为空\n return getMean(tree) \n if (isTree(tree['right']) or isTree(tree['left'])): # 左右分支是否为树,还是节点\n lSet, rSet = binSplitDataSet(testData, tree['spInd'], tree['spVal'])\n if isTree(tree['left']): \n tree['left'] = prune(tree['left'], lSet)\n if isTree(tree['right']):\n tree['right'] = prune(tree['right'], rSet)\n if not isTree(tree['left']) and not isTree(tree['right']): # 如果都不再是子树\n lSet, rSet = binSplitDataSet(testData, tree['spInd'], tree['spVal'])\n errorNoMerge = sum(np.power(lSet[:,-1] - tree['left'],2)) +\\\n sum(np.power(rSet[:,-1] - tree['right'],2)) # 合并\n treeMean = (tree['left']+tree['right'])/2.0\n errorMerge = sum(np.power(testData[:,-1] - treeMean,2))\n if errorMerge < errorNoMerge: # 判断合并后误差\n print(\"merging\")\n return treeMean\n else: return tree\n else: return tree\n \ndef linearSolve(dataSet):\n # 和上一节的线性回归没什么差异性\n m,n = np.shape(dataSet)\n X = np.mat(np.ones((m,n)))\n Y = np.mat(np.ones((m,1)))\n X[:,1:n] = dataSet[:,0:n-1] # 这里X的第一列全市0,有啥子用呢?\n Y = dataSet[:,-1]\n xTx = X.T * X\n if np.linalg.det(xTx) == 0.0:\n raise NameError('this matrix is singular , cannot do inverse. you can try increasing the second value of ops')\n ws = xTx.I * (X.T * Y)\n return ws,X,Y\n\ndef modelLeaf(dataSet):\n # 获取数据集、标签和参数矩阵\n ws,X,Y = linearSolve(dataSet)\n return ws\n\ndef modelErr(dataSet):\n # 计算模型错误\n ws,X,Y = linearSolve(dataSet)\n yHat = X * ws\n return sum(np.power(Y - yHat,2))\n\n\ndef regTreeEval(model,inDat):\n return float(model)\n\ndef modelTreeEval(model,inDat):\n n = np.shape(inDat)[1]\n X = np.mat(np.ones((1,n+1)))\n X[:,1:n+1] = inDat\n return float(X*model)\n\ndef treeForeCast(tree,inData,modelEval=regTreeEval):\n # 预测输入的数据在模型下的预测值\n if not isTree(tree):\n return modelEval(tree,inData)\n if inData[tree['spInd']] > tree['spVal']:\n if isTree(tree['left']):\n return treeForeCast(tree['left'],inData,modelEval)\n else:\n return modelEval(tree['left'],inData)\n else:\n if isTree(tree['right']):\n return treeForeCast(tree['right'],inData,modelEval)\n else:\n return modelEval(tree['right'],inData)\n\ndef createForeCast(tree,testData,modelEval=regTreeEval):\n m = len(testData)\n yHat = np.mat(np.zeros((m,1)))\n for i in range(m):\n yHat[i,0] = treeForeCast(tree,np.mat(testData[i]),modelEval)\n return yHat\n\n\n \n \n \n \n ","sub_path":"Machine_Learning_in_Action/part8_regTree/regTrees.py","file_name":"regTrees.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"639843841","text":"import requests, json, datetime\nfrom bs4 import BeautifulSoup\nimport urllib.request as req\n\nurl = \"https://www.nikkei.com/\"\n\n# URLにアクセスするURLを取得する\nhtml = requests.get(url)\nsoup = BeautifulSoup(html.content, 'html.parser')\n\n# classが\"k-card__title-piece\"に当てはまるspan要素全てを摘出する\nspan = soup.find_all(\"span\", class_='k-card__title-piece')\n\n# 見出しを16行だけ抽出\nnikkei= []\nfor s in span[0:16]:\n nikkei.append(s.getText())\n\nfor a in nikkei[0:8]:\n print(a)\n\nf = open('nikkei.txt', 'a')\nf.truncate(0)\nf.close()\n\nfor s in nikkei[8:16]:\n f = open('nikkei.txt', 'a')\n f.write('{}
'.format(str(s)))\n f.close()\n","sub_path":"nikkei.py","file_name":"nikkei.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"242464461","text":"# Import the necessary libraries\nimport sys\nimport database_access\n\n\n# Command line arguments\ntable = sys.argv[1]\n\n\n# Connect to database\ndb = database_access.connect()\ncursor = db.cursor()\n\n\n# Check for the numbers of entries in table\nsqlstring = \"SELECT * FROM `\" + table + \"`\"\ncursor.execute(sqlstring)\nprint(str(int(cursor.rowcount)))\n","sub_path":"src/scripts/check_sql_table.py","file_name":"check_sql_table.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"495015151","text":"import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score\nfrom sklearn.model_selection import KFold\nfrom model import CreditModelKN\n\ndef main():\n # Load data from disk and split into training and validation sets.\n data = np.loadtxt('data/credit-data.csv', dtype=np.int, delimiter=',', skiprows=1)\n characteristics = data[:, 1:6]\n plate = data[:, 6:12]\n pbil = data[:, 12:18]\n ppay = data[:, 18:24]\n averageborrowed = pbil.mean()\n\n # returnratio = ppay / pbil\n # print(\"Print line 1000 of pay ratio:\", returnratio[1000])\n\n X = data[:, 1:-1]\n y = data[:, -1]\n print(\"Shape of data:\", X.shape)\n\n # Fit the model against the training data.\n model = CreditModelKN()\n\n kfold = KFold(n_splits=10)\n kfold.get_n_splits(X)\n\n for train_index, test_index in kfold.split(X):\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n model.fit(X_train, y_train)\n\n # Predict against test data and ensure `y_hat` returns ints.\n y_hat = model.predict(X_test)\n y_hat = np.rint(np.squeeze(y_hat)).astype(int)\n assert len(y_hat) == len(X_test)\n print(\"Accuracy: {:06.3f}%\".format(100 * accuracy_score(y_test, y_hat)))\n print(\"Precision: {:06.3f}%\".format(100 * precision_score(y_test, y_hat)))\n print(\"Recall: {:06.3f}%\".format(100 * recall_score(y_test, y_hat)))\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"543533683","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tuesday Sep 21 17:00:40 2021\n\n@author: sam\n\"\"\"\nimport random as rd# importing random as rd\nimport csv\n\nactive_player = 1 # active player is a global variable which is updated through the player_turn function\n\nprint(\"welcome to pynopoly(v2.0)\")\n\n\n\n#making list from CSV file containing property data property_data3.csv\nwith open('property_data3.csv', newline = '') as f:\n reader = csv.reader(f)\n squares = list(reader)\n \n#defining chance cards and commuity chest, -1 = NA (it will call a function or not do anything) \nchance_cards = [[\"Go Back 3 square spaces\",-1,-1], #####################################################################################################################################\n [\"Advance token to nearest Utility. If unowned, you may buy it from the Bank. If owned, throw dice and pay owner a total ten times amount thrown.\",-1,-1], #############\n [\"You have been elected Chairman of the Board. Pay each player £50\",-1,-1], ############################################################################################\n [\"Speeding fine £15\",-15,-1], ##########################################################################################################################################\n [\"Make general repairs on all your property. For each house pay £25. For each hotel pay £100\",-1,-1], ##################################################################\n [\"Get Out of Jail Free\",-1,-1], ########################################################################################################################################\n [\"Your building loan matures. Collect £150\",150,-1], ###################################################################################################################\n [\"Bank pays you dividend of £50\", 50, -1], #############################################################################################################################\n [\"Advance to the nearest Station. If unowned, you may buy it from the Bank. If owned, pay owner twice the rental to which they are otherwise entitled\",-1,-1], #########\n [\"Go to Jail. Go directly to Jail, do not pass Go, do not collect £200\",-1,10], ########################################################################################\n [\"Take a trip to Kings Cross Station. If you pass Go, collect £200\",200, 5], ###########################################################################################\n [\"Advance to Pall Mall. If you pass Go, collect £200\", 200, 11], #######################################################################################################\n [\"Advance to Mayfair\",0,39], ###########################################################################################################################################\n [\"Advance to Go (Collect £200)\",200,0], ################################################################################################################################\n [\"Advance to Trafalgar Square. If you pass Go, collect £200\", 200, 24], ################################################################################################\n [\"Advance to the nearest Station. If unowned, you may buy it from the Bank. If owned, pay owner twice the rental to which they are otherwise entitled\",-1,-1], #########\n ]\n\ncommunity_chest =[[\"You are assessed for street repairs. £40 per house. £115 per hotel\", -1, -1], ######\n [\"It is your birthday. Collect £10 from every player\", 10, -1], #######################\n [\"Pay school fees of £50\", -50, -1], #################################################\n [\"Bank error in your favour. Collect £200\", 200 , -1], ################################\n [\"Advance to Go (Collect £200)\", 200, 0], #############################################\n [\"Go to Jail. Go directly to jail, do not pass Go, do not collect £200\", -50, 10],##### \n [\"Doctor’s fee. Pay £50\", -50, -1], ##################################################\n [\"From sale of stock you get £50\", 50, -1], ##########################################\n [\"Get Out of Jail Free\", -1,-1], #####################################################\n [\"Holiday fund matures. Receive £100\", 100, -1], ######################################\n [\"Income tax refund. Collect £20\", 20, -1], ###########################################\n [\"Life insurance matures. Collect £100\", 100, -1], #################################\n [\"Pay hospital fees of £100\", -100, -1], ############################################\n [\"Receive £25 consultancy fee\", -25, -1], ############################################\n [\"You have won second prize in a beauty contest. Collect £10\", 10, -1], ###############\n [\"You inherit £100\", 100, -1] #######################################################\n ]\n\n# cost of houses on each of the property colours. Hotels are the same price as a house but you must have bought 4 houses on each square in the set before buying hotels\nhouse_cost = [[\"brown\", \"purple\", \"light blue\", 50],\n [\"pink\", \"orange\", 100],\n [\"red\", \"yellow\", 150],\n [\"green\", \"dark blue\", 200]\n ]\n\n\n\n#--------------------functions--------------------------\n\n\n# this function runs through the lists in house_cost above and matches one of the colours with the argument set_colour, then finds the appropriate cost per house and multiplies it by the number of houses requested.\n# hotels are the same cost as houses so the house_cost_calc can be used to calculate the cost of hotels.\ndef house_cost_calc(set_colour, no_of_houses):\n cost = 0\n for i in range(len(house_cost)):\n price = house_cost[i][-1]\n length = len(house_cost[i])-1\n print(house_cost[i], length, price)\n \n for j in range(length):\n colour = house_cost[i][j]\n if set_colour == colour:\n cost = price * no_of_houses\n print(j, cost)\n else:\n print(\"The colour you have selected is not eligible for house building.\")\n \n return cost \n\n# function required to establish if the player can buy houses for the set\n# 1. Is the whole set in the name of the player?\n# 2. Does the set contain the maximum no of houses allowed (4 * no of squares in the set)\n# 3. Is the set eligible for houses (eg stations or utilities)\n# if the player is able to buy houses, we then need to return the max number that can be bought for that set. If not then message returns which explains why the player cannot buy a house.\n\n\n#rolls two dice and creates the total\ndef roll(player_no):\n die1 = rd.randint(1,6)\n die2 = rd.randint(1,6)\n total = die1 + die2\n print(player[player_no][0]+ \", you rolled a \", die1, \" and a \", die2, \"totaling\", total)\n return die1, die2, total\n\ndef double(die1, die2):\n if die1 == die2:\n print(\"double!!!\")\n \ndef nearest_train_station_calc(player_no):\n nearest_station = 0\n if player[player_no][3] == 7:\n nearest_station = 15\n elif player[player_no][3] == 22:\n nearest_station = 25\n elif player[player_no][3] == 36:\n nearest_station = 5\n print(\"nearest station is\", nearest_station)\n return nearest_station\n\ndef train_station_rental_calc(station, sent_through_chance):\n rent = 0\n station_proprietor = int(squares[station][4])\n print(\"station proprietor\", station_proprietor)\n station_count = 0\n stations = [5,15,25,35]\n for i in stations:\n print(i)\n if int(squares[i][4]) == station_proprietor:\n print(\"station\", i , \"is owned by \", station_proprietor) \n station_count += 1\n if station_count == 1:\n rent = 25 \n elif station_count == 2:\n rent = 50\n elif station_count == 3:\n rent = 100\n elif station_count == 4:\n rent = 200\n else:\n print(\"error in station rent\")\n if sent_through_chance == True:\n rent = rent * 2\n print(\"the rent you owe is \", rent)\n squares[station][5] = rent\n return rent\n\ndef three_spaces_back_calc(player_no, square_no):\n new_square = square_no - 3\n return new_square\n\ndef nearest_utility_calc(player_no):\n nearest_utility = 0\n if player[player_no][3] == 7 or player[player_no][3] == 36:\n nearest_utility = 12\n elif player[player_no][3] == 22:\n nearest_utility = 28\n print(\"nearest utility is\", nearest_utility)\n return nearest_utility\n\ndef utility_rent_calc(square_no, player_no, sent_through_chance):\n rent = 0\n owner_of_utility = int(squares[square_no][4])\n colour_of_utility = squares[square_no][0]\n if sent_through_chance == True:\n \n print(\"the colour of set is \", colour_of_utility)\n if owner_of_utility != player_no and owner_of_utility != 0: # if you don't own the utility yourself and the bank isnt the owner...\n print(\"roll dice and lets make a calculation.......................\")\n roll_dice1 = roll(player_no)\n print(\"...and the dice are.......................\", roll_dice1)\n print(\"The owner of the utility is: \", owner_of_utility)\n rent = 10 * roll_dice1[2]\n elif owner_of_utility != player_no:\n print(\"no rent to pay since you own the utility\")\n rent = 0 \n else:\n print(\"The dice you previously rolled are...\", roll_dice[0], \" and \", roll_dice[1])\n same_ownership = False\n for i in squares:\n if i[0] == colour_of_utility and int(i[4]) == owner_of_utility and i[1] != squares[square_no][1]:\n print(\"The other utility is owned by the same guy\", i[0], i[4])\n same_ownership = True\n print(\"same_ownership \", same_ownership) \n\n if same_ownership == True:\n rent = roll_dice1[2] * 10\n else:\n rent = roll_dice1[2] * 4\n \n return rent\n\n# ------------------------------------------------------temporary funtion that assigns all purchasable properties to player 1\n\ndef prop_4_player2():\n for i in squares:\n if i[4] == '0' and i[0] != \"NA\":\n i[4] = 2\n print(squares)\n\n# ------------------------------------------------------temporary funtion that assigns all purchasable properties to player 1\n\n\n\n#changes the owner of the property to the player no. \ndef property_transfer(player_no, prop_name):\n print(\"Player no\", player_no, \"and property no\", prop_name)\n for i in squares:\n if prop_name == i[1]:\n print(i[1],i[4])\n i[4] = player_no\n print(\"the new owner is \", i[4])\n \ndef house_or_hotel_transfer(player_no, set_colour, no_of_units_purchased):\n print(\"we are assigning houses and or hotels to your \", set_colour, \" set.\")\n for i in squares:\n if set_colour == i[0] and player_no == int(i[4]): #checks to see if the set colour matches with the purchase request AND that the player is the registered owner of the property\n print(i[1],i[11])\n new_no_of_units = int(i[11])\n new_no_of_units += no_of_units_purchased\n i[11] = new_no_of_units\n print(i[1],i[11])\n\n#creates a list of player data which is updated each move such as player balance, position on the board and whether the player has a get out of jail free card...\ndef assign_player_data():\n player = []\n \n no_of_players = int(input(\"how many players do you want? \"))\n print(no_of_players)\n no_of_players += 1 #add banker as player0\n \n \n for i in range(no_of_players):\n if i == 0:\n player.append([\"banker\"])\n player[i].append(0)#assigning piece to player\n player[i].append(20580-(1500*(no_of_players-1)))#assigning balance to banker\n player[i].append(0)# position on the board.\n player[i].append(False) #get out of jail free card bool\n player[i].append(False)#in jail bool\n else:\n player.append([input(\"Player's name: \")])\n x = int(input(\"choose a piece 1 = 🚗 | 2 = 👞 \"))\n player[i].append(x)#assigning piece to player\n player[i].append(1500)#assigning balance to player\n player[i].append(0)# position on the board\n player[i].append(True) #get out of jail free card bool\n player[i].append(False)#in jail bool\n \n return player\n\n#gets the card, selects top card reads it and puts it at the bottom of the deck \ndef cards(card_type):\n card = card_type[0]\n print(\"moving top card to bottom of deck\")\n card_type.append(card_type.index(card))\n card_type.pop(0)\n\n\n\n\n\ndef check_balance1(player_no, cost, transaction_type):\n balance = int(player[player_no][2])\n cost = abs(int(cost))\n player_name = player[player_no][0]\n if balance >= cost:\n print(player_name, \"You have £\" + str(balance) + \". You have sufficient funds to continue with this transaction.\")\n owner_square_you_occupy = int(squares[player[player_no][3]][4])\n if transaction_type == \"house_or_hotel_purchase\":\n transaction(player_no, cost, 0, transaction_type) #making payee the bank\n else:\n transaction(player_no, cost, owner_square_you_occupy, transaction_type)\n else:\n print(\"You appear not to have enough funds in your bank account to complete this transaction.\")\n print(\"checking mortgage value of your portfolio\")\n \n # so... if player doesn't have enough cash to pay a debt, the amount of cash must be added to the following\n # scan squares list to establish value of houses and hotels (half value) plus face value of each of the properties that are unmortgaged. Half the value of properties that are mortgaged. \n # If total value is less than debt outstanding, the player loses,\n \n # houses and hotels are given back to bank. Creditor receives half their face value, unmortgaged properties are given to creditor. mortgaged properties are given to creditor who must pay 10% of the prop value (the interest)\n # and then the creditor has the choice to pay off the mortgage there and then or do it later in the game. if the latter, the credritor must pay the 10% again at the time of paying off the mortgage. \n # \n # According to the rules: A player is bankrupt, when he owes more than he can pay either to another player or to the Bank. If his debt is to another player, \n # he must turn over to that player all that he has of value and retire from the game. In making this settlement, if he owns houses or hotels, he must return \n # these to the Bank in exchange for money to the extent of one-half the amount paid for them and this cash is given to the creditor. If he has mortgaged \n # property he also turns this property over to his creditor, but the new owner must at once pay the Bank the amount of interest on the loan, which is 10% of \n # the value of the property. After the new owner does this, he may, at his option, pay the principal or hold the property until some later turn at which time \n # he may lift the mortgage. If he holds property in this way until a later turn, he must pay the interest again when he lifts the mortgage. Should a player \n # owe the Bank, instead of another player, more than he can pay (because of taxes and penalties) even by selling his buildings and mortgaging property, he must \n # turn over all his assets to the Bank. In this case, the bank immediately sells by auction all property so taken, except buildings. A bankrupt player must \n # immediately retire from the game.The last player left in the game wins.\n \n # Unimproved properties can be mortgaged through the Bank at any time. Before an improved property can be mortgaged, all the Houses and Hotels on all the \n # properties of its color-group must be sold back to the Bank at half price. The mortgage value is printed on each Title Deed card. Once mortgaged, the deed \n # card is turned face-down, until the mortgage is lifted.\n \n # and the creditor is paid the value of the houses (half what the loser paid) then is given the properties at face value. If properties are already mortgaged the creditor must be 10% of value of the property\n # The creditor now can pay the mortgage off immediately OR can wait. If he waits he must pay another 10% of the value of the property when he rids himself of the mortgage. \n \n \n \n # if the debtor's cash plus other assets is more than the debt outstanding, a suggested order of events is this:\n # mortgage incomplete sets off, starting at the lowest rental value\n # then mortgage sets without houses on them \n # then sell the houses off\n # then hotels\n # then mortgage the remaining properties off\n \n \n \n # if the then mortgage values. \n # Suggest the cheapest properties to mortgage off first\n \n \n \n \n\n\n \n \ndef transaction(trans_party1, cost, trans_party2, trans_type):\n if trans_type == \"prop_purchase\":\n purchase = input()\n if purchase == \"y\":\n print(\"you have decided to make the purchase\")\n property_transfer(trans_party1, squares[player[trans_party1][3]][1])\n elif trans_type == \"house_or_hotel_purchase\":\n purchase = input()\n if purchase == \"y\":\n print(\"you have decided to make the house or hotel purchase\")\n ### we need to go back to the portfolio function in order to then call the house_or_hotel_purchase function...\n \n if trans_type == \"receipt\": # if active player is receiving funds\n payer = trans_party2\n payee = trans_party1\n else: # if active player is paying out \n payer = trans_party1\n payee = trans_party2 \n \n transaction_val = abs(cost)\n payer_balance = player[payer][2]\n \n payee_balance = player[payee][2]\n print(\"debit\", transaction_val, \"from\", payer, \"s\", payer_balance)\n print(\"credit\", transaction_val, \"to\", payee, \"s\", payee_balance)\n player[payer][2] = payer_balance - transaction_val\n player[payee][2] = payee_balance + transaction_val\n print(\"new player balance is:\", player[trans_party1][2])\n \n \n \n# Inspects property availability for purchase and if not calls the functions to charge rent, pick chance cards, go to jail etc.\ndef prop_available(square_no, player_no, sent_through_chance_card):\n prop_name = squares[square_no][1]\n print(\"The person who owns the property you are on is \", player[int(squares[square_no][4])][0]) \n if squares[square_no][4] == '0' and squares[square_no][0] == \"NA\": # this means Go, Community Chest, Income Tax, Chance, Just Visiting Jail, Community Chest, Free Parking, Chance, Go To Jail, Community Chest, Chance, Super Tax\n #U nique squares number 8 and are chronologically: Go, community_chest, tax, chance, jail, free_parking, chance, go_to_jail\n if squares[square_no][1] == \"COMMUNITY CHEST\":\n comunity_chest_calc(square_no, player_no)\n elif squares[square_no][1] == \"CHANCE\":\n chance_card_calc(square_no, player_no)\n elif squares[square_no][1] == \"INCOME TAX\" or squares[square_no][1] == \"Super TAX\":\n tax_type = squares[square_no][1]\n input(tax_type + \". Press any key to pay your £200 owed\")\n check_balance1(player_no, 200, \"penalty_pay\")\n elif squares[square_no][1] == \"FREE PARKING\":\n print(\"You landed on Free Parking!\") \n elif squares[square_no][1] == \"GO TO JAIL\":\n go_to_jail(player_no)\n elif squares[square_no][1] == \"Just Visiting Jail\":\n print(\"just visiting jail\")\n else:\n print(\"you are on square: \", squares[square_no][1]) # to help debug\n elif squares[square_no][4] == '0' and squares[square_no][0] != \"NA\": # this means property is owned by the bank and for sale\n print(\"you have landed on property which is owned by the bank and is for sale\")\n prop_price = squares[square_no][3]\n print(prop_name, \"is unowned. It is available for £\", prop_price + \".\")\n check_balance1(player_no, prop_price, \"prop_purchase\")\n \n elif player_no == int(squares[square_no][4]): # you have landed on your own property\n print(\"This property belongs to you. Welcome home!\")\n\n elif player_no != int(squares[square_no][4]): # you have landed on someone else's property\n rent_calculator(player_no, square_no, sent_through_chance_card)\n\n return player_no # the player_no needs to be returned so that the active player can be updated.\n\n\ndef repair_bill_calc(player_no, cost_per_house, cost_per_hotel):\n hotel_count = 0\n house_count = 0\n for i in squares:\n if player_no == int(i[4]):\n print(i[1], i[11])\n no_of_houses = int(i[11])\n if no_of_houses == 5:\n hotel_count += 1\n elif no_of_houses < 5:\n house_count += no_of_houses\n else:\n print(\"error counting houses in property: \", [1])\n print(\"no of hotels: \", hotel_count)\n print(\"no of houses: \", house_count)\n total_bill = (hotel_count * cost_per_hotel) + (house_count * cost_per_house)\n print(\"total property repair bill = £\", total_bill)\n check_balance1(player_no, total_bill, \"penalty_pay\")\n \n \ndef rent_calculator(player_no, square, sent_through_chance_card): # sent_through_chance_card is a boolean variable \n rent = 0 \n if square == 12 or square == 28: # if a utility\n print(\"rolling dice\")\n print (\"multiply the dice total by 4 if one property is owned, 10 if both properties are owned\")\n rent = utility_rent_calc(square, player_no, sent_through_chance_card)\n \n elif square == 5 or square == 15 or square == 25 or square == 35: # if a train station\n rent = train_station_rental_calc(square, sent_through_chance_card)\n \n else: # vanilla properties whose rent only changes based on the number of houses and hotels on each\n rent_without_houses = 5\n no_house = int(squares[square][11])\n new_val = rent_without_houses + no_house\n rent = abs(int(squares[square][new_val]))\n print(rent)\n print(no_house)\n print(new_val)\n print(\"square no: \", square)\n print(\"rent! Checking if you have £\" + str(rent)) \n check_balance1(player_no, rent, \"rent\") \n\ndef comunity_chest_calc(square_no, player_no):\n input(\"press any key to pick the COMMUNITY CHEST card\")\n owner_square_you_occupy = int(squares[player[player_no][3]][4]) # this is the owner of the property on which the active player has landed\n print(community_chest[0][0])\n if community_chest[0][2] >= 0 and community_chest[0][2] != 10: # if the community_chest card is an 'advance to'... card but not Go to Jail\n advance_to_new_square(player_no, community_chest[0][2])\n \n elif community_chest[0][2] == 10: # if go to jail\n go_to_jail(player_no)\n \n elif community_chest[0][2] < 0 and community_chest[0][1] > 0: # if no movement of player is required and a payment is to be received...\n if community_chest[0][0] == \"It is your birthday. Collect £10 from every player\":\n for i in range(len(player)-1):\n if player_no != i+1:\n print(\"player\", i+1, \"is paying out to \", player_no)\n transaction(player_no, 10, i+1, \"receipt\")\n else: \n transaction(player_no, community_chest[0][1], owner_square_you_occupy, \"receipt\")\n \n elif community_chest[0][2] < 0 and community_chest[0][1] < 0: # if no movement of player is required and a payment is to be made...\n if community_chest[0][0] == \"You are assessed for street repairs. £40 per house. £115 per hotel\":\n print(\"just checking on your property portfolio. This could be expensive...\")\n repair_bill_calc(player_no, 40, 115)\n else:\n check_balance1(player_no, community_chest[0][1], \"penalty_pay\") \n \n elif community_chest[0][0] == \"Get Out of Jail Free\":\n get_out_of_jail_card(player_no)\n \n else:\n print(\"any other community chest cards\")\n cards(community_chest) \n\ndef chance_card_calc(square_no, player_no): \n input(\"press Enter to pick the CHANCE card\")\n owner_square_you_occupy = int(squares[player[player_no][3]][4]) # the owner of the property on which the active player has landed\n print(chance_cards[0][0])\n if chance_cards[0][2] >= 0 and chance_cards[0][2] != 10: # if the chance card is an 'advance to'... card but not Go to Jail\n advance_to_new_square(player_no, chance_cards[0][2])\n \n elif chance_cards[0][0] == \"Advance token to nearest Utility. If unowned, you may buy it from the Bank. If owned, throw dice and pay owner a total ten times amount thrown.\":\n nearest_utility = nearest_utility_calc(player_no)\n print(\"player no: \", player_no, \"utility: \", nearest_utility, \"owner of the utility: \", squares[nearest_utility][4])\n advance_to_new_square(player_no, nearest_utility)\n \n elif chance_cards[0][0] == \"Advance to the nearest Station. If unowned, you may buy it from the Bank. If owned, pay owner twice the rental to which they are otherwise entitled\":\n #station_rent = train_station_rental_calc(nearest_train_station_calc(player_no), True)\n #print(\"station rent is... £\", station_rent)\n nearest_station = nearest_train_station_calc(player_no)\n print(\"nearest_station = \", nearest_station)\n advance_to_new_square(player_no, nearest_station)\n \n elif chance_cards[0][0] == \"You have been elected Chairman of the Board. Pay each player £50\":\n for i in range(len(player)-1):\n if player_no != i+1:\n print(\"player\", i+1, \"is is receiving funds from \", player_no)\n transaction(player_no, 50, i+1, \"penalty_pay\")\n \n elif chance_cards[0][0] == \"Go Back 3 square spaces\":\n print(\"three spaces back calc says we are now on square...\", three_spaces_back_calc(player_no, square_no)) \n new_square = three_spaces_back_calc(player_no, square_no)\n print(\"three spaces back new square is... \", new_square)\n go_backwards(player_no, new_square)\n \n elif chance_cards[0][2] < 0 and chance_cards[0][1] > 0: # if no movement of player is required and a payment is to be received... \n transaction(player_no, chance_cards[0][1], owner_square_you_occupy, \"receipt\")\n \n elif chance_cards[0][2] == 10: # if go to jail\n go_to_jail(player_no)\n \n elif chance_cards[0][0] == \"Get Out of Jail Free\":\n get_out_of_jail_card(player_no)\n \n elif chance_cards[0][2] < 0 and chance_cards[0][1] < 0: # if no movement of player is required and a payment is to be made...\n if chance_cards[0][0] == \"Make general repairs on all your property. For each house pay £25. For each hotel pay £100\":\n print(\"just checking on your property portfolio. This could be expensive...\")\n repair_bill_calc(player_no, 25,100)\n else:\n check_balance1(player_no, chance_cards[0][1], \"penalty_pay\")\n \n else:\n print(\"other chance cards exist...\")\n cards(chance_cards) \n \n\n \ndef advance_to_new_square(player_no, new_square_no):\n current_square = player[player_no][3]\n virtual_die = new_square_no - current_square\n if virtual_die < 0:\n virtual_die = 40 - current_square + new_square_no \n else:\n virtual_die = new_square_no - current_square\n \n new_square_no = new_square_cal(player_no, virtual_die, 0)\n \n prop_available(new_square_no, player_no, True)\n\ndef go_backwards(player_no, new_square_no):\n player[player_no][3] = new_square_no\n prop_available(new_square_no, player_no, True)\n \n\n\ndef go_to_jail(player_no):\n in_jail = True\n new_player_pos = 10\n print(\"You are in jail!\")\n player[player_no][3] = new_player_pos\n if player[player_no][4] == True:\n get_out_of_jail_use = input(\"you have a get out of jail card do you wish to use it (y/n)\")\n if get_out_of_jail_use == \"y\":\n in_jail = False\n print(\"Just visiting...\")\n player[player_no][4] = False\n else:\n in_jail = True\n player[player_no][5] = in_jail\n \ndef get_out_of_jail_card(player_no):\n player[player_no][4] == True\n \n# calculates the new square value \ndef new_square_cal(player_no, die1, die2):\n total = die1 + die2\n new_player_pos = player[player_no][3] + total\n \n if new_player_pos > 39:\n subtracter = 40 - player[player_no][3]\n new_pos = total - subtracter\n print(\"new_position:\", new_pos)\n player[player_no][3] = new_pos\n make_go_payment(player_no) # When you go past GO! or when you land on GO! then collect £200\n else:\n print(\"new_position:\", new_player_pos)\n player[player_no][3] = new_player_pos\n \n double(die1, die2)\n return player[player_no][3]\n\n\n\n \n#this function to be called when the active player has completed his move(s)\ndef player_turn(player_no, total_players, die1, die2):\n player_turn = 1\n print(\"total_players: \", total_players)\n if player_no == total_players-1:\n if die1 == die2:\n player_turn = player_no\n print(\"active player is the last player in 'player' list. Double means active player doesn't change\")\n else:\n player_turn = 1\n print(\"active player is the last player in 'player' list. Active player updates to 1\")\n else:\n if die1 == die2:\n player_turn = player_no\n print(\"active player is not last on player list. Double thrown so active player does not change\")\n else:\n player_turn += 1\n print(\"active player is not last on player list. Adds 1 to active player value.\")\n active_player = player_turn\n #print(\"the active player is: \", player[active_player][0])\n return active_player\n\n\n\ndef make_go_payment(player_no):\n if player[player_no][3] == 0:\n print(\"Congratulations you landed on Go. You just collected £200\")\n else:\n print(\"You passed Go. You just collected £200\")\n player[player_no][2] += 200 # active customer balance credited\n player[0][2] -= 200 # bank balance debited \n print(\"your new player balance is: £\", player[player_no][2])\n \n \n \ndef move(player_keystroke, player_no):\n \n print(player_keystroke, player_no)\n if player_keystroke == \"p\":\n portfolio(player_keystroke, player_no)\n \n input(\"...and press any key to roll dice\")\n else:\n print(\"not portfolio\")\n \n\n\ndef portfolio(player_keystroke, player_no):\n print(\"Portfolio:\")\n houses = 0\n hotels = 0\n\n for i in squares:\n if int(i[4]) == player_no:\n if int(i[11]) == 5:\n houses = 0\n hotels = 1\n print(i[1], \"part of the \", i[0], \"set. Currently, you have \", hotels, \" hotel\")\n \n else:\n houses = int(i[11])\n hotels = 0\n print(i[1], \"part of the \", i[0], \"set. Currently, you have \", houses, \" houses\")\n \n \n \n \n purchase = input(\"Do you wish to buy any houses or hotels for your portfolio?\")\n \n if purchase == \"y\":\n \n set_colour = input(\"type the set colour for which you wish to buy property...\")\n \n # we need to check if the set is eligible for house purchase ie not stations or utilities...\n # ...and if the whole set is owned...\n # and if so whether there are any houses already on the set, in order to work out the limit on the number of houses which can be placed on the set.\n \n if set_colour == \"transparent\" or set_colour == \"white\":\n print(\"You can't buy houses for stations or utilities...\")\n else: \n set_size_owned = 0\n set_size = 0\n set_houses = 0\n set_hotels = 0\n house_cost = 0\n for i in squares:\n if int(i[4]) == player_no and set_colour == i[0]:\n set_size_owned += 1\n if int(i[11]) == 5:\n houses = 0\n hotels = 1\n set_hotels +=1\n print(i[1], \"part of the \", i[0], \"set. Currently, you have \", hotels, \" hotel\")\n \n else:\n houses = int(i[11])\n set_houses += int(i[11])\n hotels = 0\n print(i[1], \"part of the \", i[0], \"set. Currently, you have \", houses, \" houses\")\n \n if set_colour == i[0]:\n set_size += 1\n \n \n if set_size == set_size_owned:\n max_houses = (set_size_owned * 4) - set_houses\n max_hotels = set_size_owned - set_hotels\n \n if max_houses == 0:\n if max_hotels == 0:\n print(\"you have bought all the houses and hotels for this property\")\n elif max_hotels > 0:\n hotel_option = input(\"you have the maximum no of houses available on this set. Do you want to buy a hotel for each square on the set? Press Y or any other key to carry on without the purchase\")\n if hotel_option == \"y\":\n print(\"working out the cost of hotels for this set...\")\n hotel_cost = house_cost_calc(set_colour, set_size)\n print(\"The cost of buying a hotel on each square of the \", set_colour, \" set is £\", hotel_cost)\n \n # here we need to add check balance then if funds available, transaction function to be called. \n check_balance1(player_no, hotel_cost, \"house_or_hotel_purchase\") # have we got enough funds, if so call transaction function using bank as payee, player_no as payor\n print(\" calling house_or_hotel_transfer function to update the squares list with the correct no of houses or hotels...\")\n house_or_hotel_transfer(player_no, set_colour, 1)\n \n else: \n print(\"set size is \", set_size_owned, \"so you can buy a maximum of \", max_houses, \"houses and \", max_hotels, \"hotel(s)\")\n\n no_of_houses = int(input(\"how many houses do you want to buy on the set coloured \" + set_colour))\n while no_of_houses > max_houses:\n no_of_houses = int(input(\"Too many houses. Try again.\"))\n house_cost = house_cost_calc(set_colour, no_of_houses) \n print(\"You wish to buy \", str(no_of_houses), \"on the \", set_colour, \" set. This will cost...\", house_cost)\n # here we need to add check balance then if funds available, transaction function to be called. \n check_balance1(player_no, house_cost, \"house_or_hotel_purchase\") # have we got enough funds, if so call transaction function using bank as payee, player_no as payor\n print(\" calling house_or_hotel_transfer function to update the squares list with the correct no of houses or hotels...\")\n house_per_square = int(no_of_houses/set_size)\n house_or_hotel_transfer(player_no, set_colour, house_per_square)\n \n elif set_size != set_size_owned:\n print(\"In order to buy houses for this property you need to own the whole set...\")\n input(\"press any key to continue\")\n portfolio(player_keystroke, player_no)\n else:\n print(\"Throw exception!!!!!!!!\")\n\n\n#-------------------end of funtions----------------- \n\n#prop_4_player2() # buys all property in name of player 2\n \n#shuffling the chance and community chest cards\n#rd.shuffle(chance_cards)\n#rd.shuffle(community_chest) \nplayer = assign_player_data()#sets a global variable from the assign_player_data function\nprint(player)\n\n\n\n\n\nwhile True:\n #print(player)\n player_name = player[active_player][0]\n player_keystroke = str(input(player_name + \", Enter 'P' to display your properties, or any other key to roll dice...\"))\n move(player_keystroke, active_player)\n \n roll_dice = roll(active_player)\n new_square = new_square_cal(active_player, roll_dice[0], roll_dice[1])\n #new_square = new_square_cal(active_player, 30, 3)\n active_player = player_turn(prop_available(new_square, active_player, False), len(player), roll_dice[0], roll_dice[1])\n print(\"players: \", player)\n ","sub_path":"Pynopoly/pynopolyV3.0.py","file_name":"pynopolyV3.0.py","file_ext":"py","file_size_in_byte":37853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"92907840","text":"import sqlite3\nfrom student import Student\nfrom instructor import Instructor\n\nclass WhoWhatWhy():\n\n def __init__(self):\n self.db_path = \"/Users/MainTechPiece/workspace/python/student_exercises/studentexercises.db\"\n\n def exercises_w_students(self):\n\n exercises = dict()\n\n with sqlite3.connect(self.db_path) as conn:\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select\n e.Id ExerciseId,\n e.Name,\n s.Id,\n s.First_Name,\n s.Last_Name,\n i.Id,\n i.First_Name,\n i.Last_Name\n from Exercise e\n join Assigned_Exercises ae on ae.ExerciseId = e.Id\n join Student s on s.Id = ae.StudentId\n join Instructor i on i.Id = ae.InstructorId;\n \"\"\")\n\n dataset = db_cursor.fetchall()\n\n for row in dataset:\n exercise_name = row[1]\n student = Student(row[2], row[3], row[4], \"\", \"\")\n instructor = Instructor(row[5], row[6], row[7], \"\", \"\", \"\")\n instructor.students = []\n\n if exercise_name not in exercises:\n exercises[exercise_name] = [instructor]\n instructor.students.extend(f'{student.first_name} {student.last_name}')\n else:\n exercises[exercise_name].append(instructor)\n instructor.students.extend(f'{student.first_name} {student.last_name}')\n\n for exercise_name, instructors in exercises.items():\n print(f'{exercise_name}:')\n for instructor in instructors:\n this = f'\\t* {instructor.first_name} {instructor.last_name} assigned this to '\n for each in instructor.students:\n this += each\n print(this)\n print(\"\\n\")\n\nreport = WhoWhatWhy()\n\nreport.exercises_w_students()\n","sub_path":"who_what_why.py","file_name":"who_what_why.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"48665472","text":"\"\"\"pypyr step that parses file, does string replacement and writes output.\"\"\"\nfrom functools import reduce\nimport os\nimport logging\n\n# logger means the log level will be set correctly\nlogger = logging.getLogger(__name__)\n\n\ndef run_step(context):\n \"\"\"Parses input file and replaces a search string.\n\n This also does string substitutions from context on the fileReplacePairs.\n It does this before it search & replaces the in file.\n\n Be careful of order. If fileReplacePairs is not an ordered collection,\n replacements could evaluate in any given order. If this is coming in from\n pipeline yaml it will be an ordered dictionary, so life is good.\n\n Args:\n context: pypyr.context.Context. Mandatory.\n The following context keys expected:\n - fileReplaceIn. mandatory. path-like.\n Path to source file on disk.\n - fileReplaceOut. mandatory. path-like. Write output file to\n here. Will create directories in path for you.\n - fileReplacePairs. mandatory. Dictionary where items are:\n 'find_string': 'replace_string'\n\n Returns:\n None.\n\n Raises:\n FileNotFoundError: take a guess\n pypyr.errors.KeyNotInContextError: Any of the required keys missing in\n context.\n pypyr.errors.KeyInContextHasNoValueError: Any of the required keys\n exists but is None.\n \"\"\"\n logger.debug(\"started\")\n context.assert_keys_have_values(__name__,\n 'fileReplaceIn',\n 'fileReplaceOut',\n 'fileReplacePairs')\n\n in_path = context.get_formatted('fileReplaceIn')\n out_path = context.get_formatted('fileReplaceOut')\n\n logger.debug(\"Running subsitutions from context on fileReplacePairs\")\n formatted_replacements = context.get_formatted_iterable(\n context['fileReplacePairs'])\n\n logger.debug(f\"opening source file: {in_path}\")\n with open(in_path) as infile:\n logger.debug(f\"opening destination file for writing: {out_path}\")\n os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True)\n with open(out_path, 'w') as outfile:\n outfile.writelines(iter_replace_strings(infile,\n formatted_replacements))\n\n logger.info(f\"Read {in_path}, replaced strings and wrote to {out_path}\")\n logger.debug(\"done\")\n\n\ndef iter_replace_strings(iterable_strings, replacements):\n \"\"\"Generator that yields a formatted string from iterable_strings.\n\n Args:\n iterable_strings: Iterable containing strings. E.g a file-like object.\n replacements: Dict containing 'find_string': 'replace_string' pairs\n\n Returns:\n Yields formatted line.\n \"\"\"\n for string in iterable_strings:\n yield reduce((lambda s, kv: s.replace(*kv)),\n replacements.items(),\n string)\n","sub_path":"pypyr/steps/filereplace.py","file_name":"filereplace.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"34441497","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nApartment class of pycity\n\"\"\"\n\nfrom __future__ import division\nimport warnings\n\nimport pycity_base.classes.demand.DomesticHotWater as DHW\nimport pycity_base.classes.demand.ElectricalDemand as ElecDemand\nimport pycity_base.classes.demand.SpaceHeating as SpaceHeat\n\n\nclass Apartment(object):\n \"\"\"\n Apartments potentially contain:\n Electricity, domestic hot water and space heating demand\n \"\"\"\n\n def __init__(self, environment, net_floor_area=None, occupancy=None):\n \"\"\"\n Parameter\n ---------\n environment : Environment object\n Common to all other objects. Includes time and weather instances\n net_floor_area : float, optional\n Net floor area of apartment in m^2 (default: None)\n occupancy : object\n Occupancy object of pycity (default: None)\n \"\"\"\n self.environment = environment\n self._kind = \"apartment\"\n self.net_floor_area = net_floor_area\n self.occupancy = occupancy\n\n # Create empty power curves\n self.power_el = ElecDemand.ElectricalDemand(environment,\n method=0,\n annualDemand=0)\n self.demandDomesticHotWater = DHW.DomesticHotWater(environment,\n tFlow=0,\n method=1,\n dailyConsumption=0,\n supplyTemperature=0)\n self.demandSpaceheating = SpaceHeat.SpaceHeating(environment,\n method=1,\n livingArea=0,\n specificDemand=0)\n self.rooms = []\n\n def addEntity(self, entity):\n \"\"\"\n Add an entity to apartment.\n\n Parameters\n ----------\n entity : object\n Entity. Possible objects:\n - Electrical demand (entity._kind == \"electricaldemand\")\n - Domestic hot water demand (entity._kind == \"domestichotwater\")\n - Space heating demand (entity._kind == \"spaceheating\")\n - Occupancy (entity._kind == 'occupancy')\n - Room (entity._kind == \"room\"\n \n Example\n -------\n >>> myDHW = DomesticHotWater(...)\n >>> myApartment = Apartment(...)\n >>> myApartment.addDevice(myDHW)\n \"\"\"\n\n if entity._kind == \"electricaldemand\":\n self.power_el = entity\n\n elif entity._kind == \"domestichotwater\":\n self.demandDomesticHotWater = entity\n\n elif entity._kind == \"spaceheating\":\n self.demandSpaceheating = entity\n\n elif entity._kind == 'occupancy':\n self.occupancy = entity\n\n elif entity._kind == \"room\": # pragma: no cover\n self.rooms.append(entity)\n\n else: # pragma: no cover\n warnings.warn('Kind of entity is unknown. Entity has not been ' +\n 'added')\n\n def addMultipleEntities(self, entities):\n \"\"\"\n Add multiple entities to the existing apartment\n \n Parameter\n ---------\n entities: List-like\n List (or tuple) of entities that are added to the apartment\n \n Example\n -------\n >>> myDHW = DomesticHotWater(...)\n >>> mySH = SpaceHeating(...)\n >>> myApartment = Apartment(...)\n >>> myApartment.addDevice([myDHW, mySH])\n \"\"\"\n for entity in entities:\n self.addEntity(entity)\n\n def get_power_curves(self,\n getElectrical=True,\n getDomesticHotWater=True,\n getSpaceheating=True,\n currentValues=True):\n \"\"\"\n Get apartment's current power curves\n \n Parameters\n ----------\n getElectrical : Boolean, optional\n Also return current electrical demand\n getDomesticHotWater : Boolean, optional\n Also return current domestic hot water demand\n getSpaceheating : Boolean, optional\n Also return current space heating demand\n currentValues : Boolean, optional\n Return the current values (True) or return values for all time \n steps (False).\n \n Return\n ------\n Current power curves. Order: electrical, domestic hot water,\n space heating\n \"\"\"\n result = ()\n if getElectrical:\n result += (self.power_el.get_power(currentValues),)\n if getDomesticHotWater:\n result += (self.demandDomesticHotWater.get_power(currentValues,\n False),)\n if getSpaceheating:\n result += (self.demandSpaceheating.get_power(currentValues),)\n\n return result\n\n def get_total_el_power(self, currentValues=True):\n \"\"\"\n Returns current el. power curve of building (net electric power plus\n electric hot water, if electric hot water device is installed).\n\n Parameters\n ----------\n currentValues : bool, optional\n Return the current values (True) or return values for all time\n steps (False).\n\n Return\n ------\n If dhw is supplied by electrical supply:\n result_tuple : tuple (power_dhw + power_el)\n Result tuple with power curve\n\n else (no dhw via electrical device):\n power_el : np.array\n Electrical power curve of apartment\n \"\"\"\n power_el = self.power_el.get_power(currentValues)\n if not self.demandDomesticHotWater.thermal:\n power_dhw = self.demandDomesticHotWater.get_power(currentValues,\n False)\n return (power_dhw + power_el)\n else:\n return power_el\n\n def get_total_th_power(self,\n currentValues=True,\n returnTemperature=True):\n \"\"\"\n Returns current thermal power curve of building (space heating\n plus thermal hot water, if thermal hot water device is installed).\n\n Parameters\n ----------\n currentValues : bool, optional\n Return the current values (True) or return values for all time\n steps (False).\n (default: True)\n returnTemperature : bool, optional\n Defines, if return temperature should be returned\n (default: True)\n\n Return\n ------\n If returnTemperature is True:\n result_tuple : tuple (power_dhw[0] + demandSpaceHeating, power_dhw[1])\n Result tuple with thermal power curve and return temperature curve\n\n else (returnTemperature is False):\n result_tuple : tuple (power_dhw + demandSpaceHeating)\n Thermal power curve of apartment\n \"\"\"\n demandSpaceHeating = self.demandSpaceheating.get_power(currentValues)\n if self.demandDomesticHotWater.thermal:\n function = self.demandDomesticHotWater.get_power\n power_dhw = function(currentValues, returnTemperature)\n\n if returnTemperature:\n return (power_dhw[0] + demandSpaceHeating, power_dhw[1])\n else:\n return (power_dhw + demandSpaceHeating)\n\n def get_space_heat_power_curve(self, current_values=True):\n \"\"\"\n Returns space heating power curve of apartment\n Parameters\n ----------\n currentValues : bool, optional\n Return the current values (True) or return values for all time\n steps (False).\n (default: True)\n Returns\n -------\n space_heat_curve : array-like\n Space heating power curve\n \"\"\"\n\n return self.demandSpaceheating.get_power(currentValues=current_values)\n\n def get_el_power_curve(self, current_values=True):\n \"\"\"\n Returns net electrical power curve of apartment (without space heating\n or hot water demand!)\n Parameters\n ----------\n currentValues : bool, optional\n Return the current values (True) or return values for all time\n steps (False).\n (default: True)\n Returns\n -------\n el_power_curve : array-like\n Electrical power curve\n \"\"\"\n\n return self.power_el.get_power(currentValues=current_values)\n\n def get_dhw_power_curve(self, current_values=True):\n \"\"\"\n Returns hot water power curve of apartment\n Parameters\n ----------\n currentValues : bool, optional\n Return the current values (True) or return values for all time\n steps (False).\n (default: True)\n Returns\n -------\n el_power_curve : array-like\n Electrical power curve\n \"\"\"\n\n return self.demandDomesticHotWater.get_power(\n currentValues=current_values, returnTemperature=False)\n\n def get_max_nb_occupants(self):\n \"\"\"\n Returns maximum number of occupants within apartment\n\n Returns\n -------\n max_nb_occupants : int\n Maximum number of occupants\n \"\"\"\n max_nb_occupants = None\n if self.occupancy is not None:\n max_nb_occupants = self.occupancy.number_occupants\n return max_nb_occupants\n\n def get_occupancy_profile(self):\n \"\"\"\n Returns occupancy profile (if occupancy object exists)\n\n Returns\n -------\n occupancy_profile : array-like\n 1d array-like list with number of occupants per timestep\n \"\"\"\n occupancy_profile = None\n if self.occupancy is not None:\n occupancy_profile = self.occupancy.occupancy\n return occupancy_profile\n","sub_path":"pycity_base/classes/demand/Apartment.py","file_name":"Apartment.py","file_ext":"py","file_size_in_byte":10075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"11449679","text":"import PyPDF2\n\n# Simple pdf's operation using PyPDF2\n\nwith open(\"first.pdf\", \"rb\") as file:\n reader = PyPDF2.PdfFileReader(file)\n print(reader.numPages)\n page =reader.getPage(0)\n page.rotateClockwise(90)\n writer = PyPDF2.PdfFileWriter()\n writer.addPage(page)\n with open(\"rotated.pdf\", \"wb\") as output:\n writer.write(output)\n\n\n\n\n# Merging two pdf's in one\nmerger = PyPDF2.PdfFileMerger()\nfile_names = [\"first.pdf\", \"second.pdf\"]\nfor file_name in file_names:\n merger.append(file_name)\nmerger.write(\"combined.pdf\")\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"585115441","text":"import os\n\ndef make_dir(dir_name):\n try:\n if not(os.path.isdir(dir_name)):\n os.makedirs(os.path.join(dir_name))\n except OSError as e:\n if e.errno != errno.EEXIST:\n print(\"Failed to create directory!!!!!\")\n raise\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"263272092","text":"\r\nfrom sodapy import Socrata\r\nimport matplotlib.pyplot as plt; plt.rcdefaults()\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom flask import request, redirect\r\nimport webbrowser, operator, jinja2, os, cgi, webapp2, logging\r\nJINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\r\n extensions=['jinja2.ext.autoescape'],\r\n autoescape=True)\r\n\r\n# Information at https://dev.socrata.com/foundry/data.seattle.gov/xurz-654a\r\n\r\n\r\n#This program creates content for the Neighborhood Crime Dashboard\r\n#Data created:\r\n# 1. Graph for number of crimes per year\r\n# 2. Graph for Types of Crimes/Frequency\r\n# 3. Graph for Crimes Numbers per month - 2018\r\n# 4. Graph of Type of Crime for 2018\r\n# 5. The most common crime in that neighborhood\r\n# 6. Most common crime that year\r\n\r\n\r\n\r\n# import info\r\n# api username/token/passoword infromation from a file \"info\" not provided, you can access the api without \r\n# a token:\r\n# Unauthenticated client only works with public data sets. Note 'None'\r\n# in place of application token, and no username or password:\r\n# client = Socrata(\"data.seattle.gov\", None)\r\n\r\n\r\n\r\n#Get crimes over the years\r\ndef plotCrime(data):\r\n timeDict = {}\r\n for dict in data:\r\n year = dict['occ_datetime'][0:4]\r\n if year not in timeDict.keys():\r\n timeDict[year] = 0\r\n timeDict[year] += 1\r\n return timeDict\r\n\r\n\r\n\r\n#Get a dictionary of types of crimes and their frequencies\r\ndef plotTypes(data):\r\n typedict = {}\r\n for dict in data:\r\n if \"crime_subcategory\" in dict.keys(): #some dicts don't include a subcategory, these crimes are not included\r\n cat = dict[\"crime_subcategory\"]\r\n if cat not in typedict.keys():\r\n typedict[cat] = 0\r\n typedict[cat] += 1\r\n return typedict\r\n\r\n\r\n\r\n#Get most frequent crime from plotTypes\r\ndef mostCrime(dict):\r\n name = \"\"\r\n highest = 0\r\n for item in dict.keys():\r\n num = dict[item]\r\n if num > highest:\r\n name = item\r\n highest = num\r\n return name\r\n\r\n\r\n\r\n#Get a list of the dictionaries of the crimes for a certain year\r\ndef yearList(data, year):\r\n list = []\r\n for dict in data:\r\n dictyear = dict['occ_datetime'][0:4]\r\n if dictyear == year:\r\n list.append(dict)\r\n return list\r\n\r\n\r\n#Dictionary of months and number of crimes for that month\r\ndef yearDetails(data):\r\n yearDict = {\"01\": 0, \"02\": 0, \"03\": 0, \"04\": 0, \"05\": 0, \"06\": 0, \"07\": 0,\r\n \"08\": 0, \"09\": 0, \"10\": 0, \"11\": 0, \"12\": 0}\r\n for dict in data:\r\n date = dict[\"occ_datetime\"][5:7]\r\n yearDict[date] += 1\r\n return yearDict\r\n\r\n\r\n\r\n#Create a bar graph from a dictionary\r\ndef barGraph(dict_data, label, title, fig, num):\r\n topic_nums = ()\r\n topic_names = ()\r\n if num == \"YES\":\r\n for key in dict_data:\r\n num = dict_data[key]\r\n name = int(key)\r\n del dict_data[key]\r\n dict_data[name] = num\r\n topic_names = sorted(dict_data.keys())\r\n else:\r\n for item in dict_data.keys():\r\n topic_names = topic_names + (item,)\r\n for item in dict_data.keys():\r\n topic_nums = topic_nums + (dict_data[item],)\r\n\r\n y_pos = np.arange(len(topic_names))\r\n\r\n plt.barh(y_pos, topic_nums, align='center', alpha=0.5)\r\n plt.yticks(y_pos, topic_names, fontsize=8)\r\n plt.xlabel('Number of Crimes')\r\n plt.ylabel(label)\r\n plt.title(title)\r\n\r\n plt.tight_layout()\r\n plt.savefig(fig)\r\n #plt.show()\r\n plt.close()\r\n\r\n\r\n \r\n\r\nclass MainHandler(webapp2.RequestHandler):\r\n def get(self):\r\n print (\"Choose a neighborhood from the list\")\r\n return\r\n\r\nclass GetData(webapp2.RequestHandler):\r\n def post(self):\r\n vals = {}\r\n vals['page_title'] = \"Dashboard\"\r\n name = self.request.get('neighborhood')\r\n sub = self.request.get('submit')\r\n logging.info(name)\r\n logging.info(sub)\r\n \r\n input = name.capatalize()\r\n \r\n client = Socrata('data.seattle.gov',app_token = info.token,username=info.username, password=info.password)\r\n results = client.get(\"xurz-654a\", neighborhood = input, limit=100000000)\r\n\r\n # -1-\r\n yearlist = plotCrime(results)\r\n barGraph(yearlist, \"Year\", \"Number of Crimes per Year\", \"all.png\")\r\n\r\n # -2-\r\n crimelist = plotTypes(results)\r\n barGraph(crimelist, \"Type\", \"Different Types of Crimes\", \"type.png\")\r\n\r\n # -3-\r\n twenty_eighteen = yearList(results, \"2018\")\r\n yeardict = yearDetails(twenty_eighteen)\r\n barGraph(yeardict, \"Month\", \"Number of Crimes per Month 2018\", \"18all.png\")\r\n\r\n # -4-\r\n crime18dict = plotTypes(twenty_eighteen)\r\n barGraph(crime18dict, \"Type\", \"Types of Crime in 2018\", \"18type.png\")\r\n\r\n # -5-\r\n all = mostCrime(crimelist)\r\n\r\n # -6-\r\n now = mostCrime(crime18dict)\r\n\r\n crime = {\"neighborhood\": neighborhood, \"allTime\": all, \"thisYear\": now}\r\n\r\n file = open(\"dashboard.html\", \"w\")\r\n template = JINJA_ENVIRONMENT.get_template('dashboardtemplate.html')\r\n file.write(template.render(crime))\r\n file.close()\r\n\r\n\r\n\r\n\r\napplication = webapp2.WSGIApplication([\r\n ('/getData', GetData),\r\n ('/.*', MainHandler)\r\n ],\r\n debug=True)\r\n\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"516373011","text":"def eladuosai(n):\r\n l = list(range(1,n+1))\r\n l[0] = 0\r\n for i in range(2,n+1):\r\n if l[i-1] != 0 :\r\n for j in range(i*2,n+1,i):\r\n l[j-1] = 0\r\n result = [x for x in l if x != 0]\r\n return set(result)\r\nprint(eladuosai(10000))\r\n","sub_path":"Test/EraPrimeCalc.py","file_name":"EraPrimeCalc.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"557507792","text":"# Cool Sequence! Always equate to 1 with all numbers!\ntry:\n num = int(input())\n if num > 0:\n while num != 1:\n print(int(num))\n if num % 2 == 0:\n num = num / 2\n elif num % 2 == 1:\n num = 3 * num + 1\n else:\n break\n print(int(num))\n else:\n print('Input value cannot be zero or negative!')\n\nexcept ValueError:\n print('Input Error! Put a positive integer!')","sub_path":"Projects/AutomateTheBoringStuff/Collaz_seq.py","file_name":"Collaz_seq.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"344493608","text":"__auther__ = 'Ashish Patil'\n\nfrom flask import Flask,request,jsonify\nfrom mysql import connector\nimport mysql\nimport json\nimport directorAPIUtil\nimport MovieUtil\n\ndef searchDirectorAPI():\n\n first_name = request.args.get('first_name')\n last_name = request.args.get('last_name')\n\n con = mysql.connector.connect(\n host='localhost',\n user='root',\n password='Stacker#123',\n database='IMDB'\n )\n cursor = con.cursor()\n\n #query to search director for given director first name and director last name\n cursor.execute(\"select * from directors where first_name = %s and last_name =%s\",(first_name,last_name))\n\n director_data = cursor.fetchall()\n cursor.close()\n\n #check for empty and more that one condition :\n isOneRecord = MovieUtil.validateLenght(director_data)\n\n if not isOneRecord:\n return \"\"\n\n director_id = director_data[0][0]\n\n cursor = con.cursor()\n cursor.execute(\"select count(*) from movies_directors where director_id = %s\", (director_id,))\n\n NumberOfMovies = cursor.fetchall()[0][0]\n cursor.close()\n\n isGreaterThanZero = MovieUtil.validateNumberOfMoviesForDirectorOrActor(NumberOfMovies)\n\n if isOneRecord and isGreaterThanZero:\n cursor = con.cursor()\n cursor.execute(\"select * from movies m join movies_directors md on m.id = md.movie_id \"\n \"where md.director_id = %s order by m.year desc limit 5\",\n (director_id,))\n\n movies_data = cursor.fetchall();\n cursor.close()\n\n SearchDirector_Dict = directorAPIUtil.fillSearchDirectorAPI(first_name,last_name,NumberOfMovies,movies_data)\n SearchDirector_Dict = json.dumps(SearchDirector_Dict)\n return str(SearchDirector_Dict)\n\n return \"\"","sub_path":"searchDirectorAPI.py","file_name":"searchDirectorAPI.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"339929999","text":"#Receives and executes BLINKT commands from rasp pi ALTO\n#Jacob Armstrong - Sept 12\n\nimport paho.mqtt.client as mqtt\nfrom blinkt import set_pixel, set_brightness, show, clear, time\n\nset_brightness(0.05)\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n client.subscribe(\"jacob\")\n# client.subscribe(\"jacob\") #look into why there are two of the same line here\n\ndef on_message(client, userdata, msg):\n print(\"executing received command: \"+str(msg.payload))\n clear()\n exec(str(msg.payload))\n show()\n# time.sleep(3)\n# clear()\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"128.224.56.211\", 1883, 60)\n\nclient.loop_forever()\n","sub_path":"rectest.py","file_name":"rectest.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"627905217","text":"\"\"\"\nThis library provides simplified functions for saving\noptions and values in a simple database file.\n\"\"\"\n# ==============================================\n__author__ = \"Jacopo De Luca\"\n__version__ = \"1.0.2\"\n# ==============================================\n\nimport sqlite3\nfrom Path import path_appdata\n\n\nclass DbPreferences:\n def __init__(self, module_name, optionset):\n self.path = path_appdata() + module_name + \".db\"\n self.optionset = optionset\n self.db = sqlite3.connect(self.path)\n if optionset != str():\n self.create_optionset(optionset)\n\n def create_optionset(self, optionset):\n self.db.execute(\n \"CREATE TABLE IF NOT EXISTS %s (ID CHAR(50) PRIMARY KEY NOT NULL, \"\n \"INTVALUE INTEGER, \"\n \"REALVALUE REAL, \"\n \"STR TEXT);\" % optionset)\n self.optionset = optionset\n\n def select_optionset(self, optionset):\n self.optionset = optionset\n\n def put_int(self, key, value, optionset=str()):\n if optionset == str():\n optionset = self.optionset\n self.db.execute(\"REPLACE INTO %s (ID,INTVALUE) VALUES('%s',%i);\" % (optionset, key,\n DbPreferences.__check_type(value, int)))\n self.db.commit()\n\n def get_int(self, key, defvalue, optionset=str()):\n if optionset == str():\n optionset = self.optionset\n value = self.db.execute(\"SELECT INTVALUE FROM %s WHERE ID='%s';\" % (optionset, key)).fetchone()\n if not value:\n value = DbPreferences.__check_type(defvalue, int)\n else:\n value = value[0]\n return value\n\n def put_float(self, key, value, optionset=str()):\n if optionset == str():\n optionset = self.optionset\n self.db.execute(\"REPLACE INTO %s (ID,REALVALUE) VALUES('%s',%f);\" % (optionset, key,\n DbPreferences.__check_type(value, float)))\n self.db.commit()\n\n def get_float(self, key, defvalue, optionset=str()):\n if optionset == str():\n optionset = self.optionset\n value = self.db.execute(\"SELECT REALVALUE FROM %s WHERE ID='%s';\" % (optionset, key)).fetchone()\n if not value:\n value = DbPreferences.__check_type(defvalue, float)\n else:\n value = value[0]\n return value\n\n def put_string(self, key, value, optionset=str()):\n if optionset == str():\n optionset = self.optionset\n self.db.execute(\"REPLACE INTO %s (ID,STR) VALUES('%s','%s');\" % (optionset, key,\n DbPreferences.__check_type(value, str)))\n self.db.commit()\n\n def get_string(self, key, defvalue, optionset=str()):\n if optionset == str():\n optionset = self.optionset\n value = self.db.execute(\"SELECT STR FROM %s WHERE ID='%s';\" % (optionset, key)).fetchone()\n if not value:\n value = DbPreferences.__check_type(defvalue, str)\n else:\n value = value[0]\n return value\n\n def delete_key(self, key, optionset=str()):\n if optionset == str():\n optionset = self.optionset\n self.db.execute(\"DELETE FROM %s WHERE ID='%s';\" % (optionset, key))\n\n def delete_optionset(self, optionset=str()):\n if optionset == str():\n optionset = self.optionset\n self.db.execute(\"DROP TABLE %s;\" % optionset)\n\n @staticmethod\n def __check_type(value, _type):\n if type(value) != _type:\n raise TypeError(\"Required \" + str(_type) + \" passed: \" + str(type(value)))\n return value\n","sub_path":"System/Lib/DbPreferences.py","file_name":"DbPreferences.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"476394276","text":"import string\nimport time\n\ninstructions = []\ndata = dict()\nlastFreq = 0\npos = -1\nabc = list(string.ascii_lowercase)\ndone = False\n\nwith open(\"input.txt\", \"r\") as f:\n instructions = f.readlines()\n\nfor i in abc:\n data[i] = 0\n\ndef snd(param):\n global lastFreq\n print(\"Playing freq: \", data[param])\n lastFreq = data[param]\n\ndef set(param, param2):\n if param2 in abc:\n num = data[param2]\n data[param] = num\n else:\n data[param] = int(param2)\n\ndef mul(param, param2):\n num = 0\n if param2 in abc:\n num = data[param2]\n else:\n num = int(param2)\n data[param] = data[param] * num\n\ndef jgz(param, num):\n global pos\n val = 0\n if param in abc:\n val = data[param]\n else:\n val = int(param)\n if val > 0:\n print(\"Jump: \", int(num))\n pos += int(num)-1\n print(\"After jump: \", pos)\n\ndef mod(param, param2):\n if param2.strip() in abc:\n data[param] = data[param] % data[param2]\n else:\n val = int(param2)\n data[param] = data[param] % val\n\ndef add(param, param2):\n if param2 in abc:\n data[param] = data[param] + data[param2]\n else:\n data[param] = data[param] + int(param2)\n\ndef rcv(param):\n global done\n val = data[param]\n if val != 0:\n print(\"The last freq played was: \", lastFreq)\n done = True\n\nwhile not done:\n pos += 1\n instructions[pos] = instructions[pos].replace(\"\\n\", \"\")\n split = instructions[pos].split(' ')\n\n #print(pos)\n #time.sleep(0.5)\n if split[0] == 'snd':\n snd(split[1])\n\n if split[0] == 'set':\n set(split[1], split[2])\n\n if split[0] == 'mul':\n mul(split[1], split[2])\n\n if split[0] == 'jgz':\n jgz(split[1], split[2])\n\n if split[0] == 'mod':\n mod(split[1], split[2])\n\n if split[0] == 'add':\n add(split[1], split[2])\n\n if split[0] == 'rcv':\n rcv(split[1])\n\"\"\"\n\ndata['a'] = 1\n\ndata['b'] = 2\n\npos = 10\n\njgz('a', -5)\n\nprint(pos)\n\"\"\"\n","sub_path":"Day 18/Part 1.py","file_name":"Part 1.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"433833749","text":"from shutil import which\r\nfrom tkinter import *\r\nimport subprocess\r\n\r\n# import messagebox from tkinter module\r\nimport tkinter.messagebox\r\n\r\n# create a tkinter root window\r\nroot = tkinter.Tk()\r\n\r\n# root window title and dimension\r\nroot.title(\"Software Check\")\r\nroot.geometry('400x100')\r\n\r\n\r\ndef onClick():\r\n\r\n if which(\"docker\"):\r\n docker_install = \"✔ SUCCESS\"\r\n else:\r\n docker_install = \"❌ NOT INSTALLED!!!\"\r\n if which(\"wsl\"):\r\n ret = subprocess.run([\"wsl\", \"test\", \"-f\", \"/etc/os-release\"])\r\n if ret.returncode != 0:\r\n wsl_install = \"❌ NOT INSTALLED!!!\"\r\n else:\r\n wsl_install = \"✔ SUCCESS\"\r\n else:\r\n wsl_install = \"❌ NOT INSTALLED!!!\"\r\n # ubuntu check\r\n check_ubuntu = subprocess.Popen([\"powershell.exe\", \".\\check_ubuntu.ps1\"], stdout=subprocess.PIPE,\r\n stdin=subprocess.PIPE, stderr=subprocess.PIPE)\r\n check_ubuntu = check_ubuntu.stdout.read().decode(\"utf-8\")\r\n check_ubuntu = check_ubuntu.strip()\r\n if check_ubuntu == \"INSTALLED\":\r\n ubuntu_install = \"✔ SUCCESS\"\r\n else:\r\n ubuntu_install = \"❌ NOT INSTALLED!!!\"\r\n message = [\"Docker: \" + docker_install, \"Wsl: \" + wsl_install, \"Ubuntu: \" + ubuntu_install]\r\n tkinter.messagebox.showinfo(\"Results\", \"\\n\".join(message))\r\n\r\n\r\nif __name__ == '__main__':\r\n l = Label(root, text=\"Active Buildings Software Dependency \\n\" \"Installation Check.\", font=(\"Arial\", 15))\r\n l.pack()\r\n # Create a Button\r\n button = Button(root, text=\"Check\", command=onClick, height=2, width=12)\r\n\r\n # Set the position of button on the top of window.\r\n button.pack(side='bottom')\r\n root.mainloop()\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"48015727","text":"import env\nimport unittest\n\nimport hardwarelibrary.communication.serialport as s_ports\nfrom hardwarelibrary.motion.sutterdevice import SutterDevice\n\n\nclass TestConnectSutter(unittest.TestCase):\n def testConnectDebugSutter(self):\n sutter = SutterDevice(serialNumber=\"debug\")\n # The port is not open until initializeDevice()\n sutter.doInitializeDevice()\n \n position = sutter.position()\n\n self.assertIsNotNone(position[0])\n self.assertIsNotNone(position[1])\n self.assertIsNotNone(position[2])\n\n def testMoveToWithDebugSutter(self):\n sutter = SutterDevice(\"debug\")\n sutter.doInitializeDevice()\n\n sutter.moveTo((0, 100, 4000))\n position = sutter.position()\n self.assertTrue(position[0] == 0)\n self.assertTrue(position[1] == 100)\n self.assertTrue(position[2] == 4000)\n\n @unittest.skip(\"Must have real sutter connected\")\n def testListStageDevices(self):\n sp = s_ports.SerialPort()\n ports = sp.matchPorts(idVendor=4930, idProduct=1)\n self.assertIsInstance(ports, list)\n self.assertTrue(ports)\n print(ports)\n # then we would try to match a port using the selected index. There is no function for that yet.\n sp.portPath = ports[0]\n sp.open(baudRate=128000)\n self.assertIsNotNone(sp.port) # self.assertTrue(sp.isOpen())\n sp.close()\n\n @unittest.skip(\"Must have real sutter connected\")\n def testConnectRealSutterWithSutterDeviceClass(self):\n sutter = SutterDevice()\n sp = s_ports.SerialPort()\n ports = sp.matchPorts(idVendor=4930, idProduct=1)\n portPath = ports[0]\n sutter.port = sp(portPath=portPath) # we will have to generalize the method sutter.doInitializeDevice\n self.assertIsNotNone(sutter.port)\n sutter.port.open()\n self.assertIsNotNone(sp.port)\n sutter.moveTo((10, 4000, 100))\n position = sutter.position()\n self.assertTrue(position[0] == 10)\n self.assertTrue(position[1] == 4000)\n self.assertTrue(position[2] == 100)\n sutter.doShutdownDevice()\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"hardwarelibrary/tests/testConnectSutter.py","file_name":"testConnectSutter.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"26256144","text":"from flask import Flask, request, jsonify\nimport json\nimport requests\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef index_html():\n return app.send_static_file('index.html')\n\n@app.route('/job/task', methods=['GET','POST'])\ndef new_task():\n if request.method == 'GET':\n return app.send_static_file('new_task.html')\n else:\n global task_id\n new_task = dict()\n new_task['id'] = task_id\n new_task['node'] = task_id % 2\n if new_task['node'] == 0:\n slave = 'http://162.105.175.179:9090/newtask'\n else:\n slave = 'http://162.105.175.61:9090/newtask'\n new_task['status'] = 'Pending'\n new_task['message'] = ''\n task_id = task_id + 1\n\n new_task['taskName'] = request.form.get('taskName')\n new_task['commandLine'] = request.form.get('commandLine')\n new_task['packagePath'] = request.form.get('packagePath','default')\n new_task['timeout'] = request.form.get('timeout','default')\n new_task['packagePath'] = request.form.get('packagePath','default')\n new_task['maxRetryCount'] = request.form.get('maxRetryCount','default')\n new_task['imageId'] = request.form.get('imageId','default')\n new_task['cpu'] = request.form.get('cpu','default')\n new_task['memory'] = request.form.get('memory','default')\n with open('../gfs/'+new_task['taskName']+'.json', 'w+') as task_info:\n json.dump(new_task, task_info)\n requests.post(slave, data={'taskName':new_task['taskName']})\n return jsonify(\"{'task_id':\"+str(task_id)+\"}\")\n\n@app.route('/job/status', methods=['GET','POST'])\ndef task_status():\n if request.method == 'GET':\n return app.send_static_file('task_status.html')\n else:\n taskName = request.form.get('taskName')\n task_status = dict()\n with open('../gfs/'+taskName+'.json', 'r') as task_info_json:\n task_info = json.load(task_info_json)\n task_status['taskName'] = task_info['taskName']\n task_status['node'] = task_info['node']\n task_status['status'] = task_info['status']\n with open('../gfs/'+taskName+'.log', 'r') as task_output:\n task_status['message'] = task_output.read(1000)\n return jsonify(task_status)\n\n@app.route('/job/kill', methods=['GET','POST'])\ndef kill_task():\n if request.method == 'GET':\n return app.send_static_file('kill_task.html')\n else:\n taskName = request.form.get('taskName')\n with open('../gfs/'+taskName+'.json', 'r') as task_info_json:\n task_info = json.load(task_info_json)\n node = task_info['node']\n if node == 0:\n slave = 'http://162.105.175.179:9090/killtask'\n else:\n slave = 'http://162.105.175.61:9090/killtask'\n requests.post(slave, data={'taskName':taskName})\n task_status = dict()\n with open('../gfs/'+taskName+'.json', 'r') as task_info_json:\n task_info = json.load(task_info_json)\n task_status['taskName'] = task_info['taskName']\n task_status['node'] = task_info['node']\n task_status['status'] = task_info['status']\n\n return jsonify(task_status)\n\n\n\n\nif __name__ == '__main__':\n task_id = 0\n app.run(host='0.0.0.0', port=8080, debug=True)\n","sub_path":"第6次作业/src/Master/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"116705875","text":"#!/usr/bin/env python\n\"\"\"\nUnimodal distribution (Aniso Shielding Lineshape Inversion)\n===========================================================\n\"\"\"\n# %%\n# The following example demonstrates the statistical learning based determination of\n# the nuclear shielding tensor parameters from a one-dimensional cross-section of a\n# magic-angle flipping (MAF) spectrum. In this example, we use a synthetic MAF\n# lineshape from a unimodal tensor distribution.\n#\n# Before getting started\n# ----------------------\n#\n# Import all relevant packages.\nimport csdmpy as cp\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom mrinversion.kernel.nmr import ShieldingPALineshape\nfrom mrinversion.linear_model import SmoothLasso, SmoothLassoCV, TSVDCompression\nfrom mrinversion.utils import get_polar_grids\n\n# Setup for the matplotlib figures\n\n\n# function for 2D x-y plot.\ndef plot2D(ax, csdm_object, title=\"\"):\n # convert the dimension coordinates of the csdm_object from Hz to pmm.\n _ = [item.to(\"ppm\", \"nmr_frequency_ratio\") for item in csdm_object.dimensions]\n\n levels = (np.arange(9) + 1) / 10\n ax.contourf(csdm_object, cmap=\"gist_ncar\", levels=levels)\n ax.grid(None)\n ax.set_title(title)\n get_polar_grids(ax)\n ax.set_aspect(\"equal\")\n\n\n# %%\n# Dataset setup\n# -------------\n#\n# Import the dataset\n# ''''''''''''''''''\n#\n# Load the dataset. Here, we import the dataset as a CSDM data-object.\n\n# the 1D MAF cross-section data in csdm format\ndomain = \"https://sandbox.zenodo.org/record/1065347/files\"\nfilename = f\"{domain}/puxfgdh25rru1q3li124anylkgup8rdp.csdf\"\ndata_object = cp.load(filename)\n\n# convert the data dimension from `Hz` to `ppm`.\ndata_object.dimensions[0].to(\"ppm\", \"nmr_frequency_ratio\")\n\n# %%\n# The variable ``data_object`` holds the 1D MAF cross-section. For comparison, let's\n# also import the true tensor parameter distribution from which the synthetic 1D pure\n# anisotropic MAF cross-section line-shape is simulated.\ndatafile = f\"{domain}/s5wpm26w4cv3w64qjhouqu458ch4z0nd.csdf\"\ntrue_data_object = cp.load(datafile)\n\n# %%\n# The plot of the 1D MAF cross-section along with the 2D true tensor parameter\n# distribution of the synthetic dataset is shown below.\n\n# the plot of the 1D MAF cross-section dataset.\n_, ax = plt.subplots(1, 2, figsize=(9, 3.5), subplot_kw={\"projection\": \"csdm\"})\nax[0].plot(data_object)\nax[0].invert_xaxis()\n\n# the plot of the true tensor distribution.\nplot2D(ax[1], true_data_object, title=\"True distribution\")\nplt.tight_layout()\nplt.show()\n\n# %%\n# Linear Inversion setup\n# ----------------------\n#\n# Dimension setup\n# '''''''''''''''\n#\n# **Anisotropic-dimension:** The dimension of the dataset that holds the pure\n# anisotropic frequency contributions, which in this case, is the only dimension.\nanisotropic_dimension = data_object.dimensions[0]\n\n# %%\n# **x-y dimensions:** The two inverse dimensions corresponding to the `x` and\n# `y`-axis of the `x`-`y` grid.\ninverse_dimension = [\n cp.LinearDimension(count=25, increment=\"370 Hz\", label=\"x\"), # the `x`-dimension.\n cp.LinearDimension(count=25, increment=\"370 Hz\", label=\"y\"), # the `y`-dimension.\n]\n\n# %%\n# Generating the kernel\n# '''''''''''''''''''''\n#\n# For MAF datasets, the line-shape kernel corresponds to the pure nuclear shielding\n# anisotropy line-shapes. Use the\n# :class:`~mrinversion.kernel.nmr.ShieldingPALineshape` class to generate a\n# shielding line-shape kernel.\nlineshape = ShieldingPALineshape(\n anisotropic_dimension=anisotropic_dimension,\n inverse_dimension=inverse_dimension,\n channel=\"29Si\",\n magnetic_flux_density=\"9.4 T\",\n rotor_angle=\"90 deg\",\n rotor_frequency=\"14 kHz\",\n number_of_sidebands=4,\n)\n\n# %%\n# Here, ``lineshape`` is an instance of the\n# :class:`~mrinversion.kernel.nmr.ShieldingPALineshape` class. The required\n# arguments of this class are the `anisotropic_dimension`, `inverse_dimension`, and\n# `channel`. We have already defined the first two arguments in the previous\n# sub-section. The value of the `channel` argument is the nucleus observed in the MAF\n# experiment. In this example, this value is '29Si'.\n# The remaining arguments, such as the `magnetic_flux_density`, `rotor_angle`,\n# and `rotor_frequency`, are set to match the conditions under which the spectrum\n# was acquired. The value of the `number_of_sidebands` argument is the number of\n# sidebands calculated for each line-shape within the kernel.\n#\n# Once the ShieldingPALineshape instance is created, use the\n# :meth:`~mrinversion.kernel.nmr.ShieldingPALineshape.kernel` method of the\n# instance to generate the MAF line-shape kernel.\nK = lineshape.kernel(supersampling=1)\n\n# %%\n# Data Compression\n# ''''''''''''''''\n#\n# Data compression is optional but recommended. It may reduce the size of the\n# inverse problem and, thus, further computation time.\nnew_system = TSVDCompression(K, data_object)\ncompressed_K = new_system.compressed_K\ncompressed_s = new_system.compressed_s\n\nprint(f\"truncation_index = {new_system.truncation_index}\")\n\n# %%\n# Solving the inverse problem\n# ---------------------------\n#\n# Smooth-LASSO problem\n# ''''''''''''''''''''\n#\n# Solve the smooth-lasso problem. You may choose to skip this step and proceed to the\n# statistical learning method. Usually, the statistical learning method is a\n# time-consuming process that solves the smooth-lasso problem over a range of\n# predefined hyperparameters.\n# If you are unsure what range of hyperparameters to use, you can use this step for\n# a quick look into the possible solution by giving a guess value for the :math:`\\alpha`\n# and :math:`\\lambda` hyperparameters, and then decide on the hyperparameters range\n# accordingly.\n\n# guess alpha and lambda values.\ns_lasso = SmoothLasso(alpha=5e-5, lambda1=5e-6, inverse_dimension=inverse_dimension)\ns_lasso.fit(K=compressed_K, s=compressed_s)\nf_sol = s_lasso.f\n\n# %%\n# Here, ``f_sol`` is the solution corresponding to hyperparameters\n# :math:`\\alpha=5\\times10^{-5}` and :math:`\\lambda=5\\times 10^{-6}`. The plot of this\n# solution is\n_, ax = plt.subplots(1, 2, figsize=(9, 3.5), subplot_kw={\"projection\": \"csdm\"})\n\n# the plot of the guess tensor distribution solution.\nplot2D(ax[0], f_sol / f_sol.max(), title=\"Guess distribution\")\n\n# the plot of the true tensor distribution.\nplot2D(ax[1], true_data_object, title=\"True distribution\")\nplt.tight_layout()\nplt.show()\n\n# %%\n# Predicted spectrum\n# ''''''''''''''''''\n#\n# You may also evaluate the predicted spectrum from the above solution following\nresiduals = s_lasso.residuals(K, data_object)\npredicted_spectrum = data_object - residuals\n\nplt.figure(figsize=(4, 3))\nplt.subplot(projection=\"csdm\")\nplt.plot(data_object, color=\"black\", label=\"spectrum\") # the original spectrum\nplt.plot(predicted_spectrum, color=\"red\", label=\"prediction\") # the predicted spectrum\nplt.gca().invert_xaxis()\nplt.legend()\nplt.tight_layout()\nplt.show()\n\n# %%\n# As you can see from the predicted spectrum, our guess isn't far from the optimum\n# hyperparameters. Let's create a search grid about the guess hyperparameters and run\n# a cross-validation method for selection.\n#\n# Statistical learning of the tensors\n# -----------------------------------\n#\n# Smooth LASSO cross-validation\n# '''''''''''''''''''''''''''''\n#\n# Create a guess range of values for the :math:`\\alpha` and :math:`\\lambda`\n# hyperparameters.\n# The following code generates a range of :math:`\\lambda` and :math:`\\alpha` values\n# that are uniformly sampled on the log scale.\nlambdas = 10 ** (-5.2 - 1 * (np.arange(6) / 5))\nalphas = 10 ** (-4 - 2 * (np.arange(6) / 5))\n\n# set up cross validation smooth lasso method\ns_lasso_cv = SmoothLassoCV(\n alphas=alphas,\n lambdas=lambdas,\n inverse_dimension=inverse_dimension,\n sigma=0.005,\n folds=10,\n)\n# run the fit using the compressed kernel and compressed signal.\ns_lasso_cv.fit(compressed_K, compressed_s)\n\n# %%\n# The optimum hyper-parameters\n# ''''''''''''''''''''''''''''\n#\n# Use the :attr:`~mrinversion.linear_model.SmoothLassoCV.hyperparameters` attribute of\n# the instance for the optimum hyper-parameters, :math:`\\alpha` and :math:`\\lambda`,\n# determined from the cross-validation.\nprint(s_lasso_cv.hyperparameters)\n\n# %%\n# The cross-validation surface\n# ''''''''''''''''''''''''''''\n#\n# Optionally, you may want to visualize the cross-validation error curve/surface. Use\n# the :attr:`~mrinversion.linear_model.SmoothLassoCV.cross_validation_curve` attribute\n# of the instance, as follows. The cross-validation metric is the mean square error\n# (MSE).\ncv_curve = s_lasso_cv.cross_validation_curve\n\n# plot of the cross-validation curve\nplt.figure(figsize=(5, 3.5))\nax = plt.subplot(projection=\"csdm\")\nax.contour(np.log10(s_lasso_cv.cross_validation_curve), levels=25)\nax.scatter(\n -np.log10(s_lasso_cv.hyperparameters[\"alpha\"]),\n -np.log10(s_lasso_cv.hyperparameters[\"lambda\"]),\n marker=\"x\",\n color=\"k\",\n)\nplt.tight_layout(pad=0.5)\nplt.show()\n\n# %%\n# The optimum solution\n# ''''''''''''''''''''\n#\n# The :attr:`~mrinversion.linear_model.SmoothLassoCV.f` attribute of the instance holds\n# the solution.\nf_sol = s_lasso_cv.f\n\n# %%\n# The corresponding plot of the solution, along with the true tensor distribution, is\n# shown below.\n_, ax = plt.subplots(1, 2, figsize=(9, 3.5), subplot_kw={\"projection\": \"csdm\"})\n\n# the plot of the tensor distribution solution.\nplot2D(ax[0], f_sol / f_sol.max(), title=\"Optimum distribution\")\n\n# the plot of the true tensor distribution.\nplot2D(ax[1], true_data_object, title=\"True distribution\")\nplt.tight_layout()\nplt.show()\n","sub_path":"examples/synthetic/plot_1D_3_MAF.py","file_name":"plot_1D_3_MAF.py","file_ext":"py","file_size_in_byte":9489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"144423980","text":"# Copyright 2013 – present by the SalishSeaCast Project contributors\n# and The University of British Columbia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n\"\"\"\nimport os\nfrom collections import namedtuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pytz\nfrom matplotlib.dates import DateFormatter\nfrom salishsea_tools import data_tools, places, nc_tools, teos_tools\n\nimport nowcast.figures.website_theme\nfrom nowcast.figures import shared\n\n\ndef make_figure(\n node_name,\n grid_T_hr,\n dev_grid_T_hr,\n timezone,\n mesh_mask,\n dev_mesh_mask,\n figsize=(8, 10),\n theme=nowcast.figures.website_theme,\n):\n \"\"\"Plot the temperature and salinity time series of observations and model\n results at an ONC VENUS node.\n\n :arg str node_name: Ocean Networks Canada (ONC) VENUS node name;\n must be a key in\n :py:obj:`salishsea_tools.places.PLACES`.\n\n :arg grid_T_hr: Hourly tracer results dataset from production NEMO run.\n :type grid_T_hr: :class:`netCDF4.Dataset`\n\n :arg dev_grid_T_hr: Hourly tracer results dataset from development NEMO run.\n :type dev_grid_T_hr: :class:`netCDF4.Dataset`\n\n :arg str timezone: Timezone to use for display of model results.\n\n :arg mesh_mask: NEMO mesh mask for production NEMO run.\n :type mesh_mask: :class:`netCDF4.Dataset`\n\n :arg dev_mesh_mask: NEMO mesh mask for development NEMO run.\n :type dev_mesh_mask: :class:`netCDF4.Dataset`\n\n :arg 2-tuple figsize: Figure size (width, height) in inches.\n\n :arg theme: Module-like object that defines the style elements for the\n figure. See :py:mod:`nowcast.figures.website_theme` for an\n example.\n\n :returns: :py:class:`matplotlib.figure.Figure`\n \"\"\"\n plot_data = _prep_plot_data(\n node_name, grid_T_hr, dev_grid_T_hr, timezone, mesh_mask, dev_mesh_mask\n )\n fig, (ax_sal, ax_temp) = _prep_fig_axes(figsize, theme)\n _plot_salinity_time_series(ax_sal, node_name, plot_data, theme)\n _plot_temperature_time_series(ax_temp, plot_data, timezone, theme)\n _attribution_text(ax_temp, theme)\n return fig\n\n\ndef _prep_plot_data(\n place, grid_T_hr, dev_grid_T_hr, timezone, mesh_mask, dev_mesh_mask\n):\n try:\n j, i = places.PLACES[place][\"NEMO grid ji\"]\n except KeyError as e:\n raise KeyError(\n \"place name or info key not found in salishsea_tools.places.PLACES: {e}\"\n )\n node_depth = places.PLACES[place][\"depth\"]\n station_code = places.PLACES[place][\"ONC stationCode\"]\n # Production model results\n model_time = nc_tools.timestamp(\n grid_T_hr, range(grid_T_hr.variables[\"time_counter\"].size)\n )\n try:\n # NEMO-3.4 mesh mask\n gdept = mesh_mask.variables[\"gdept\"]\n except KeyError:\n # NEMO-3.6 mesh mask\n gdept = mesh_mask.variables[\"gdept_0\"]\n tracer_depths = gdept[..., j, i][0]\n tracer_mask = mesh_mask.variables[\"tmask\"][..., j, i][0]\n try:\n # NEMO-3.4 mesh mask\n gdepw = mesh_mask.variables[\"gdepw\"]\n except KeyError:\n # NEMO-3.6 mesh mask\n gdepw = mesh_mask.variables[\"gdepw_0\"]\n w_depths = gdepw[..., j, i][0]\n salinity_profiles = grid_T_hr.variables[\"vosaline\"][..., j, i]\n temperature_profiles = grid_T_hr.variables[\"votemper\"][..., j, i]\n model_salinity_ts = _calc_results_time_series(\n salinity_profiles,\n model_time,\n node_depth,\n timezone,\n tracer_depths,\n tracer_mask,\n w_depths,\n )\n model_temperature_ts = _calc_results_time_series(\n temperature_profiles,\n model_time,\n node_depth,\n timezone,\n tracer_depths,\n tracer_mask,\n w_depths,\n )\n # Development model results\n dev_model_time = nc_tools.timestamp(\n dev_grid_T_hr, range(grid_T_hr.variables[\"time_counter\"].size)\n )\n tracer_depths = dev_mesh_mask.variables[\"gdept_0\"][..., j, i][0]\n tracer_mask = dev_mesh_mask.variables[\"tmask\"][..., j, i][0]\n w_depths = dev_mesh_mask.variables[\"gdepw_0\"][..., j, i][0]\n salinity_profiles = dev_grid_T_hr.variables[\"vosaline\"][..., j, i]\n temperature_profiles = dev_grid_T_hr.variables[\"votemper\"][..., j, i]\n dev_model_salinity_ts = _calc_results_time_series(\n salinity_profiles,\n dev_model_time,\n node_depth,\n timezone,\n tracer_depths,\n tracer_mask,\n w_depths,\n )\n dev_model_temperature_ts = _calc_results_time_series(\n temperature_profiles,\n dev_model_time,\n node_depth,\n timezone,\n tracer_depths,\n tracer_mask,\n w_depths,\n )\n # Observations\n onc_data = data_tools.get_onc_data(\n \"scalardata\",\n \"getByStation\",\n os.environ[\"ONC_USER_TOKEN\"],\n station=station_code,\n deviceCategory=\"CTD\",\n sensors=\"salinity,temperature\",\n dateFrom=data_tools.onc_datetime(model_time[0], \"utc\"),\n dateTo=data_tools.onc_datetime(model_time[-1], \"utc\"),\n )\n plot_data = namedtuple(\n \"PlotData\",\n \"model_salinity_ts, model_temperature_ts, \"\n \"dev_model_salinity_ts, dev_model_temperature_ts, \"\n \"ctd_data\",\n )\n return plot_data(\n model_salinity_ts=model_salinity_ts,\n model_temperature_ts=model_temperature_ts,\n dev_model_salinity_ts=dev_model_salinity_ts,\n dev_model_temperature_ts=dev_model_temperature_ts,\n ctd_data=data_tools.onc_json_to_dataset(onc_data),\n )\n\n\ndef _calc_results_time_series(\n tracer,\n model_time,\n node_depth,\n timezone,\n tracer_depths,\n tracer_mask,\n w_depths,\n psu_to_teos=False,\n):\n time_series = namedtuple(\"TimeSeries\", \"var, time\")\n if psu_to_teos:\n var = teos_tools.psu_teos(\n [\n shared.interpolate_tracer_to_depths(\n tracer[i, :], tracer_depths, node_depth, tracer_mask, w_depths\n )\n for i in range(tracer.shape[0])\n ]\n )\n else:\n var = [\n shared.interpolate_tracer_to_depths(\n tracer[i, :], tracer_depths, node_depth, tracer_mask, w_depths\n )\n for i in range(tracer.shape[0])\n ]\n return time_series(var=var, time=[t.to(timezone) for t in model_time])\n\n\ndef _prep_fig_axes(figsize, theme):\n fig, (ax_sal, ax_temp) = plt.subplots(\n 2,\n 1,\n figsize=figsize,\n sharex=True,\n facecolor=theme.COLOURS[\"figure\"][\"facecolor\"],\n )\n fig.autofmt_xdate()\n ax_sal.set_facecolor(theme.COLOURS[\"axes\"][\"background\"])\n ax_temp.set_facecolor(theme.COLOURS[\"axes\"][\"background\"])\n return fig, (ax_sal, ax_temp)\n\n\ndef _plot_salinity_time_series(ax, place, plot_data, theme):\n ctd_data = plot_data.ctd_data\n qaqc_mask = ctd_data.salinity.attrs[\"qaqcFlag\"] == 1\n ax.plot(\n ctd_data.salinity.sampleTime[qaqc_mask],\n ctd_data.salinity[qaqc_mask],\n linewidth=2,\n label=\"Observations\",\n color=theme.COLOURS[\"time series\"][\"VENUS CTD salinity\"],\n )\n ax.plot(\n [t.datetime for t in plot_data.model_salinity_ts.time],\n plot_data.model_salinity_ts.var,\n linewidth=2,\n label=\"Model\",\n color=theme.COLOURS[\"time series\"][\"VENUS node model salinity\"],\n alpha=0.7,\n )\n ax.plot(\n [t.datetime for t in plot_data.dev_model_salinity_ts.time],\n plot_data.dev_model_salinity_ts.var,\n linewidth=2,\n label=\"Dev Model\",\n color=theme.COLOURS[\"time series\"][\"VENUS node dev model salinity\"],\n alpha=0.5,\n )\n _salinity_axis_labels(ax, place, plot_data, theme)\n\n\ndef _salinity_axis_labels(ax, place, plot_data, theme):\n first_model_day = plot_data.model_salinity_ts.time[0]\n last_model_day = plot_data.model_salinity_ts.time[-1]\n title_dates = first_model_day.format(\"DD-MMM-YYYY\")\n if first_model_day.day != last_model_day.day:\n title_dates = \" and \".join((title_dates, last_model_day.format(\"DD-MMM-YYYY\")))\n ax.set_title(\n f'VENUS {place.title()} {places.PLACES[place][\"depth\"]}m {title_dates}',\n fontproperties=theme.FONTS[\"axes title\"],\n color=theme.COLOURS[\"text\"][\"axes title\"],\n )\n ax.set_xlim(\n plot_data.model_salinity_ts.time[0].datetime,\n plot_data.model_salinity_ts.time[-1].datetime,\n )\n ax.set_ylabel(\n \"Salinity [g/kg]\",\n fontproperties=theme.FONTS[\"axis\"],\n color=theme.COLOURS[\"text\"][\"axis\"],\n )\n ymin, ymax = ax.get_ylim()\n ax.set_ylim(np.floor(ymin) - 1, np.ceil(ymax) + 1)\n ax.legend(loc=\"best\")\n ax.grid(axis=\"both\")\n theme.set_axis_colors(ax)\n\n\ndef _plot_temperature_time_series(ax, plot_data, timezone, theme):\n ctd_data = plot_data.ctd_data\n qaqc_mask = ctd_data.salinity.attrs[\"qaqcFlag\"] == 1\n ax.plot(\n ctd_data.temperature.sampleTime[qaqc_mask],\n ctd_data.temperature[qaqc_mask],\n linewidth=2,\n label=\"Observations\",\n color=theme.COLOURS[\"time series\"][\"VENUS CTD temperature\"],\n )\n ax.plot(\n [t.datetime for t in plot_data.model_temperature_ts.time],\n plot_data.model_temperature_ts.var,\n linewidth=2,\n label=\"Model\",\n color=theme.COLOURS[\"time series\"][\"VENUS node model temperature\"],\n alpha=0.7,\n )\n ax.plot(\n [t.datetime for t in plot_data.dev_model_temperature_ts.time],\n plot_data.dev_model_temperature_ts.var,\n linewidth=2,\n label=\"Dev Model\",\n color=theme.COLOURS[\"time series\"][\"VENUS node dev model temperature\"],\n alpha=0.5,\n )\n tzname = plot_data.model_temperature_ts.time[0].datetime.tzname()\n _temperature_axis_labels(ax, plot_data, timezone, tzname, theme)\n\n\ndef _temperature_axis_labels(ax, plot_data, timezone, tzname, theme):\n ax.set_xlabel(\n f\"Date and Time [{tzname}]\",\n fontproperties=theme.FONTS[\"axis\"],\n color=theme.COLOURS[\"text\"][\"axis\"],\n )\n ax.set_xlim(\n plot_data.model_temperature_ts.time[0].datetime,\n plot_data.model_temperature_ts.time[-1].datetime,\n )\n ax.xaxis.set_major_formatter(\n DateFormatter(\"%d-%b %H:%M\", tz=pytz.timezone(timezone))\n )\n ax.set_ylabel(\n \"Temperature [°C]\",\n fontproperties=theme.FONTS[\"axis\"],\n color=theme.COLOURS[\"text\"][\"axis\"],\n )\n ymin, ymax = ax.get_ylim()\n ax.set_ylim(np.floor(ymin) - 1, np.ceil(ymax) + 1)\n ax.legend(loc=\"best\")\n ax.grid(axis=\"both\")\n theme.set_axis_colors(ax)\n\n\ndef _attribution_text(ax, theme):\n ax.text(\n 1,\n -0.35,\n \"Observations from Ocean Networks Canada\",\n horizontalalignment=\"right\",\n verticalalignment=\"top\",\n transform=ax.transAxes,\n fontproperties=theme.FONTS[\"figure annotation small\"],\n color=theme.COLOURS[\"text\"][\"figure annotation\"],\n )\n","sub_path":"nowcast/figures/comparison/compare_venus_ctd.py","file_name":"compare_venus_ctd.py","file_ext":"py","file_size_in_byte":11447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"164087221","text":"from __future__ import absolute_import\n\nimport os\nimport shutil\nimport sys\nfrom distutils.dir_util import copy_tree\nfrom zipfile import ZipFile\n\nimport requests\n\nWORKER_URL = \"https://github.com/ianfab/fishtest/archive/master.zip\"\n\n\ndef restart(worker_dir):\n \"\"\"Restarts the worker, using the same arguments\"\"\"\n args = sys.argv[:]\n args.insert(0, sys.executable)\n if sys.platform == \"win32\":\n args = ['\"{}\"'.format(arg) for arg in args]\n\n os.chdir(worker_dir)\n os.execv(sys.executable, args) # This does not return !\n\n\ndef update():\n worker_dir = os.path.dirname(os.path.realpath(__file__))\n update_dir = os.path.join(worker_dir, \"update\")\n if not os.path.exists(update_dir):\n os.makedirs(update_dir)\n\n worker_zip = os.path.join(update_dir, \"wk.zip\")\n with open(worker_zip, \"wb+\") as f:\n f.write(requests.get(WORKER_URL).content)\n\n zip_file = ZipFile(worker_zip)\n zip_file.extractall(update_dir)\n zip_file.close()\n prefix = os.path.commonprefix([n.filename for n in zip_file.infolist()])\n worker_src = os.path.join(update_dir, os.path.join(prefix, \"worker\"))\n copy_tree(worker_src, worker_dir)\n # the worker runs games from the \"testing\" folder so change the folder\n os.chdir(worker_dir)\n shutil.rmtree(update_dir)\n testing_dir = os.path.join(worker_dir, \"testing\")\n if os.path.exists(testing_dir):\n shutil.rmtree(testing_dir)\n\n restart(worker_dir)\n","sub_path":"worker/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"437769859","text":"from __future__ import division\nimport pickle\nimport io\nimport os, sys\nimport csv\nimport re\nimport nltk\nfrom string import punctuation\n\ndef removePunctuation(text):\n '''\n Removes punctuation, changes to lower case and strips leading and trailing\n spaces.\n\n Args:\n text (str): Input string.\n\n Returns:\n (str): The cleaned up string.\n '''\n text.strip()\n return ''.join(c for c in text.encode('ascii', 'ignore') if c not in punctuation or c in ['#','@','.','\\n'])\n\ndef serialize(obj, ser_filename, isSerializingList=True):\n '''\n Saves a python object to disk.\n\n If the object being dealt with is a list, the contents of thenew list get\n added to the existing serialized list. Otherwise, the new object ovewrites\n the old one.\n\n Args:\n obj: Object to save.\n ser_filename: Filename to save object with on disk.\n isSerializingList: Boolean denoting whether object to be saved is a list\n or not.\n '''\n if(isSerializingList):\n # If pre-existing serialization, get its list representation and add new one to it\n if(os.path.isfile(ser_filename)):\n stored_list = unserialize(ser_filename)\n stored_list.extend(obj)\n f = open(ser_filename, 'wb')\n pickle.dump(stored_list, f)\n f.close()\n else:\n f = open(ser_filename, 'wb')\n pickle.dump(obj, f)\n f.close()\n else:\n f = open(ser_filename, 'wb')\n pickle.dump(obj, f)\n f.close()\n\ndef unserialize(ser_filename):\n '''\n Loads a python object from disk.\n\n Returns:\n The python object at the specified path or None if none is found.\n '''\n if(not os.path.isfile(ser_filename)):\n return None\n else:\n f = open(ser_filename, 'rb')\n obj = pickle.load(f)\n f.close()\n return obj\n\ndef map_node_to_dataset_id(content_embedding_data):\n '''\n This functions returns a dictionary which maps a graph node id (which is\n just its position in a list) to the contract or supplier id for that node.\n i.e. The dictionary is of the form {0:'', 1:'', ... , :''}\n\n @param adjmat adjacency matrix as produced by the \"adjacency_matrix(similarity_matrix, threshold)\"\n function and seen in the corresponding output csv.\n @param content_embedding_data content embeddings as produced by either the \"get_train_embeddings()\"\n or \"get_test_embeddings()\" and the corresponding csv files.\n @return mapping dict\n '''\n mapping = {}\n for idx in range(len(content_embedding_data)):\n mapping[idx] = content_embedding_data[idx]['id']\n\n return mapping\n","sub_path":"Supplier-Contract Embedding/Helper.py","file_name":"Helper.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"360575882","text":"__author__ = 'mesh'\nimport re\nimport pymorphy2\n\nmorph = pymorphy2.MorphAnalyzer()\n\n# text = open('input.txt', encoding='utf8').read()\ntext = 'Это зависит от обстоятельств. ' \\\n 'Вася много работает на заводе.'\n\ndef tokzr_WORD(txt): return (re.findall(r'(?ms)\\W*(\\w+)', txt)) # split words\ndef tokzr_SENT(txt): return (re.findall(r'(?ms)\\s*(.*?(?:\\.|\\?|!))', txt)) # split sentences\ns = 0\nsentences = tokzr_SENT(text)\nfor sentence in sentences:\n w = 0\n words = tokzr_WORD(sentence)\n for word in words:\n # print(s, w, word)\n pos = morph.parse(word)\n # print(pos)\n for guess in pos:\n if 'VERB' in guess.tag:\n print(guess)\n\n w += 1\n s += 1\n\n\n","sub_path":"machine.py","file_name":"machine.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"167436317","text":"'''给出 n 代表生成括号的对数,请你写出一个函数,使其能够生成所有可能的并且有效的括号组合。\n例如,给出 n = 3,生成结果为:\n[\n \"((()))\",\n \"(()())\",\n \"(())()\",\n \"()(())\",\n \"()()()\"\n]\n'''\n'''采用回溯法,当'('的数量小于n时,向S中添加;当')'小于'('的数量时,S中添加')' '''\nclass Solution:\n def generateParenthesis(self,n):\n list1 = list()\n self.backtracks(n,list1, S='', l=0, r =0)\n return list1\n\n def backtracks(self,n,list1,S, l,r):\n if l < n:\n self.backtracks(n,list1,S+'(',l+1,r)\n if r < l :\n self.backtracks(n,list1,S+')',l,r+1)\n if len(S) == 2*n:\n list1.append(S)\n return\n\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.generateParenthesis(3))\n\n\n\n\n\n","sub_path":"LeetCode刷题/22 括号生成.py","file_name":"22 括号生成.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"61678844","text":"import argparse\nimport sys\nimport subprocess\nimport os\n\nparser = argparse.ArgumentParser()\nparser.add_argument('inputfile', type=str, help='input file')\nparser.add_argument('folder_output', type=str, help='folder output')\nparser.add_argument('max_peptide', type=int, help='how much peptides u want to use in the clustring')\nargs = parser.parse_args()\n\ninputfile = args.inputfile\noutputfolder = args.folder_output.rstrip('/') + '/'\nmax_peptide = args.max_peptide #how much peptides u want to use in the clustring\nseqs = \"seqs/\"\nthreshhold = 0.5 # identity\nn = 2\nl = 1\n\noutputfilename = inputfile[:inputfile.find('.')]\nwhile '/' in outputfilename:\n outputfilename = outputfilename[outputfilename.find('/')+1:]\nprint(outputfilename)\nfoldername = inputfile[:inputfile.find('/' + outputfilename)]\nwhile '/' in foldername:\n foldername = foldername[foldername.find('/')+1:]\n\n\n### clustring\noutputfile = outputfolder + outputfilename\nif not os.path.exists(outputfolder): os.mkdir(outputfolder)\ncmd = \"cd-hit -c \" + str(threshhold) + \" -n \" + str(n) + \" -l \" + str(l) + ' -i ' + inputfile + \" -o \" + outputfile\nsubprocess.call(cmd, shell=True)\n\n#what seqs in each cluster \nif not os.path.exists(outputfolder + seqs): os.mkdir(outputfolder + seqs)\n\nheader_prefix_to_seq = {} #short header to seq\nheader_prefix_to_header = {} #short header to header\nwith open(inputfile[:-len(\"TrimmedCysLoop.fas\")] + \"fs\", 'r') as f:\n for line in f:\n if line.startswith('>'):\n header_prefix = line[:line.find(\"Len\")] # e.g >448_Length_8_Repeats_309.9084062045127_Type_8\n header_prefix_to_header[header_prefix] = line\n header_prefix_to_seq[header_prefix] = \"\"\n else:\n header_prefix_to_seq[header_prefix] += line\n\n\nwith open(outputfile + \".clstr\", 'r') as clstrf:\n cluster_size = 0\n for line in clstrf:\n if line.startswith('>') or line == '':\n if cluster_size:\n filename = foldername + \"_Members_\" + str(pepcount) + \"_ClusterSize_\" + str(cluster_size) + filename\n with open(outputfolder + seqs + filename, 'w') as f:\n f.write(data)\n filename = '_' + line.lstrip('>').rstrip().replace(' ','_') + \".fas\" # e.g \">Cluster 0\" -> \"_Cluster_0.fs\" # + \n data = \"\"\n cluster_size = 0\n pepcount = 0\n else:\n if pepcount == max_peptide:\n continue\n header_prefix = line[line.find('>'):line.find(\"Len\")] # e.g 0\t14aa, >1423_Length_14_Repe... *\n pepcount += 1\n header = header_prefix_to_header[header_prefix]\n data += header + header_prefix_to_seq[header_prefix]\n cluster_size += float(header.split('_')[4]) # \">6365_Length_12_Repeats_9.15083876588128_Type_12\" -> 9.15083876588128","sub_path":"src/clustring.py","file_name":"clustring.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"468359611","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 4, kernel_size=5) # Conv2d(in_channels, out_channels, kernel_size, ...)\n self.pool = nn.MaxPool2d(2, 2) # MaxPool2d(kernel_size, stride=None, padding=0, ...)\n self.conv2 = nn.Conv2d(4, 4, kernel_size=5)\n self.fc = nn.Linear(4 * 4 * 4, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n # 28 * 28 -> 4 * 24 * 24 -> 4 * 12 * 12\n x = self.pool(F.relu(self.conv2(x)))\n # 4 * 12 * 12 -> 8 * 8 * 8 -> 8 * 4 * 4\n x = x.view(-1, 4 * 4 * 4)\n x = self.fc(x)\n return x\n\n\nif __name__ == '__main__':\n net = Net()\n input_ = torch.randn(10, 1, 28, 28)\n # print(input_)\n out = net(input_)\n print(out)","sub_path":"my_nn.py","file_name":"my_nn.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"388313581","text":"# Your code here\nimport os\n\n\n\ndef finder(files, queries):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n mapping = {}\n\n for filename in files:\n basename = os.path.basename(os.path.normpath(filename))\n if basename in mapping:\n mapping[basename].append(filename)\n else:\n mapping[basename] = [filename]\n \n result = []\n\n for query in queries:\n if query in mapping:\n result += mapping[query]\n\n return result\n\n\nif __name__ == \"__main__\":\n files = [\n '/bin/foo',\n '/bin/bar',\n '/usr/bin/baz'\n ]\n queries = [\n \"foo\",\n \"qux\",\n \"baz\"\n ]\n print(finder(files, queries))\n","sub_path":"hashtables/ex5/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"95728698","text":"def main():\n\t\"\"\"main method asks for user input and calls the \n\tgrammar checker method with user input\"\"\"\n\tinit_sentence = input(\"Please insert a sentence: \")\n\tsentence = init_sentence.split(\" \")\n\n\tfollow = isGrammarlyCorrect(sentence)\n\tprint(follow)\n\treturn follow\n\ndef isGrammarlyCorrect(s):\n\t\"\"\"This method recieves a list and compares it with the \n\tpossible combinations to form a sentence. if no possible rule is \n\tfollowed or the list has less than 3 words, the method returns False.\n\telse, returns true.\"\"\"\n\tperson_word = ['Nao', 'Shila']\n\tposition_word = ['above', 'below', 'left', 'right']\n\tobject_words = ['car', 'person', 'dog', 'diamond']\n\tbehaviour_words = ['recognise', 'detect', 'avoid', 'replace']\n\tpronoun_words = ['I', 'you', 'they']\n\ttexture_words = ['smooth', 'dimpled', 'rough']\n\n\tif (len(s) < 3):\n\t\treturn False\n\n\tif(s[0] in person_word):\n\t\t\n\t\tif(s[1] in behaviour_words):\n\t\t\t\n\t\t\tif(s[2] in object_words):\n\t\t\t\treturn True\n\n\t\t\telif(s[2] in texture_words and s[3] in object_words):\n\t\t\t\treturn True\n\n\t\t\telif(s[2] in position_word):\n\t\t\t\treturn True\n\t\t\t\n\t\t\telse:\n\t\t\t\treturn False\n\t\t\n\t\telse:\n\t\t\treturn False\n\n\telif(s[0] in pronoun_words):\n\n\t\tif(s[1] in behaviour_words):\n\t\t\t\n\t\t\tif(s[2] in position_word or s[2] in object_words):\n\t\t\t\treturn True\n\n\t\t\telse:\n\t\t\t\treturn False\n\t\t\n\t\telse:\n\t\t\treturn False\n\n\n\telse:\n\t\treturn False\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n\n\n\n\n","sub_path":"Portfolio 1/Lab1b.py","file_name":"Lab1b.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"456444283","text":"\nimport gym\nimport os\nimport custom_ant\nfrom PlotCallBack import PlotCallBack\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom stable_baselines3 import A2C\nfrom stable_baselines3.common.evaluation import evaluate_policy\n\nfrom stable_baselines.bench import Monitor\nfrom stable_baselines3.common import results_plotter\nfrom stable_baselines3.common.results_plotter import load_results, ts2xy\nfrom stable_baselines3.common.callbacks import BaseCallback\n\n## User Parameters ##\nENV_ID='Block-v2'\nTOTAL_TIMESTEPS = 5e5\nTRAIN_MODE='DEFAULT' # Choose from OPTUNA, DEFAULT, or BOTH\nEVALUATE = True # False will skip the evaluation step\n#####################\n\n\nbag_dir = 'bag/'\nos.makedirs(bag_dir, exist_ok=True)\nscores = []\nresults = []\n\ndef train_model(optuna, env, bag_dir):\n \n if optuna:\n print(\"Training with Optuna-optimized hyperparameters\")\n prefix = \"optuna/\"\n log_dir = bag_dir + prefix\n env, callback = setup_callback(log_dir, env)\n model = A2C('MlpPolicy', env, \n gamma = 0.9949651748245363,\n n_steps = 2,\n ent_coef = 1.4874340288559667e-06,\n max_grad_norm = 0.7428181598534858,\n learning_rate = 4.7667230267782784e-05,\n gae_lambda = 0.9868067509887242, \n verbose=0,\n\n )\n else:\n print(\"Training with stable-baselines3 default hyperparameters\")\n prefix = \"default/\"\n log_dir = bag_dir + prefix\n env, callback = setup_callback(log_dir, env)\n model = A2C('MlpPolicy', env, \n gamma = 0.99,\n n_steps = 5,\n ent_coef = 0.0,\n max_grad_norm = 0.5,\n learning_rate = 0.0007,\n gae_lambda = 1.0,\n verbose=0\n )\n \n # Train the agent\n model.learn(total_timesteps=int(TOTAL_TIMESTEPS), callback=callback)\n best_model = A2C.load(os.getcwd() + \"/\" + log_dir + \"/best_model\", verbose=1)\n \n if EVALUATE: eval(best_model, prefix)\n env.close()\n return\n\n# Evaluate the trained agent\ndef eval(model, prefix):\n print(\"Evaluating the best \" + prefix + \" model\")\n mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=10)\n scores.append(prefix + \": mean_reward = \" + str(mean_reward) + \" +/- \" + str(std_reward))\n\ndef setup_callback(log_dir, env):\n if os.path.isfile(log_dir + 'monitor.csv'):\n print(\"A monitor.csv already exists. Deleting it.\")\n os.remove(log_dir + 'monitor.csv')\n if not os.path.isfile(log_dir + 'monitor.csv'):\n print(\"Old monitor.csv successfully deleted\")\n os.makedirs(log_dir, exist_ok=True)\n env = Monitor(env, log_dir)\n callback = PlotCallBack(check_freq=1000, log_dir=log_dir)\n return env, callback\n\nlegend_names = []\n\nif TRAIN_MODE != 'OPTUNA':\n results.append(bag_dir + \"default/\")\n env = gym.make(ENV_ID)\n train_model(False, env, bag_dir)\n legend_names.append('Default')\nif TRAIN_MODE != 'DEFAULT':\n results.append(bag_dir + \"optuna/\") \n env = gym.make(ENV_ID)\n train_model(True, env, bag_dir)\n legend_names.append('Optuna')\n\nfor score in scores:\n print(score)\n\nresults_plotter.plot_results(results, TOTAL_TIMESTEPS, results_plotter.X_TIMESTEPS, ENV_ID)\nplt.legend(legend_names)\nplt.show()\n","sub_path":"custom_env/Block-v2/train_blockv2.py","file_name":"train_blockv2.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"189927618","text":"from datetime import datetime, timedelta\nfrom dateutil import tz\nimport re\nimport time\nimport yaml\n\nDATE_PATTERN = r'(^\\d{4}-\\d{2}-\\d{2}).(\\d{2}:\\d{2}:\\d{2}).*'\nDATETIME_PATTERN = r'(^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}).*'\nDATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'\n\n\ndef get_quarter_daterange(year, quarter):\n # Q1 starts in April, start of tax year\n num_firebreak = quarter - 1\n start_week_num = quarter * 13 + num_firebreak\n\n if start_week_num > 52:\n start_week_num -= 53\n year += 1\n\n start_week = \"{}-W{}\".format(year, start_week_num)\n start_date = datetime.strptime(start_week + '-1', \"%Y-W%W-%w\")\n\n end_week_num = start_week_num + 12\n if end_week_num > 52:\n end_week_num -= 53\n year += 1\n\n end_week = \"{}-W{}\".format(year, end_week_num)\n\n end_date = datetime.strptime(end_week + '-5', \"%Y-W%W-%w\")\n\n return start_date, end_date\n\n\ndef get_bi_weekly_sprint_dates(q_start, q_end):\n sprints = []\n sprint_start = q_start\n while sprint_start < q_end:\n sprint_end = sprint_start + timedelta(weeks=2) - timedelta(seconds=1)\n sprint = {\n 'started_on': str(sprint_start),\n 'ended_on': str(sprint_end)\n }\n sprints.append(sprint)\n sprint_start += timedelta(weeks=2)\n\n last_sprint = sprints[-1]\n if last_sprint['ended_on'] > str(q_end):\n last_sprint['ended_on'] = str(q_end - timedelta(seconds=1))\n\n return sprints\n\n\ndef get_datetime(datetime_str):\n matched_datetime = re.search(DATETIME_PATTERN, datetime_str)\n if matched_datetime:\n return datetime.strptime(matched_datetime.group(1), DATETIME_FORMAT)\n\n\ndef get_date_string(date_and_time):\n matched_date = re.search(DATE_PATTERN, str(date_and_time))\n\n if matched_date:\n date_str = matched_date.group(1)\n if matched_date.group(2) == \"23:00:00\":\n utc_time = datetime.strptime(f\"{date_str}T{matched_date.group(2)}\", DATETIME_FORMAT)\n\n from_zone = tz.gettz('UTC')\n to_zone = tz.gettz('Europe/London')\n\n # Tell the datetime object that it's in UTC time zone since\n # datetime objects are 'naive' by default\n utc = utc_time.replace(tzinfo=from_zone)\n\n # Convert time zone\n london = utc.astimezone(to_zone)\n\n matched_date = re.search(DATE_PATTERN, str(london))\n date_str = matched_date.group(1)\n\n return date_str\n\n\ndef get_time_diff(started_at, ended_at=None):\n def count_days_on_weekends(start, end):\n weekend_count = 0\n while start <= end:\n if start.weekday() in {5, 6}:\n weekend_count += 1\n start += timedelta(days=1)\n return weekend_count\n\n if ended_at:\n # if start or end is on a weekend should the day be pushed to Friday / Monday?\n # As otherwise that day will be lost as it lands on the weekend\n start = get_datetime(started_at) if type(started_at) is str else started_at\n end = get_datetime(ended_at) if type(ended_at) is str else ended_at\n weekend_days = count_days_on_weekends(start, end)\n return end - start - timedelta(days=weekend_days)\n\n\ndef get_process_cycle_efficiency(cycle_time, blocked_time=None):\n if blocked_time:\n return (cycle_time - blocked_time) / cycle_time\n\n return 1 # no blocked time so 100% efficient\n\n\nclass Base:\n def get_metrics(self, last_num_weeks=None):\n pass\n","sub_path":"app/source/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"171013127","text":"\n#task21 = TaskClass.Task(20,4,[40, 45, 32, 14],[5, 9, 8, 7])\n#taskL.append(task21)\n\ndef argmax(T,Solution,C):\n max_t=0\n max_sum=0\n for t in T:\n S=0\n for i in Solution[t]:\n S=S+C[i]\n if S>max_sum:\n max_sum=S\n max_t=t\n\n return max_t\n\ndef knapsack_dylp(A,B,C):\n T={0:0} #Hash: biggest value of set for weight - {weight:value}\n Solution={0:[]}\n #Cicle for all targes $\\frac{c_i}{a_i}$\n for i in range(0,A.shape[0]):\n # print C[i],\"/\",A[i],\":\",\n T_old=dict(T) #copy $T_{k-1}$ into $T_{old}$\n # print T\n #Cicle for all partial summ\n for x in T_old:\n if (x+A[i])<=B:\n if (not i in Solution[x]):\n if ( (x+A[i] not in T) or (T[x+A[i]]\",T\n ResultCost = max(T.values())\n Result = Solution[argmax(T,Solution,C)]\n return (Result, ResultCost)\n\n","sub_path":"task_solver.py","file_name":"task_solver.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"349527831","text":"# -*- coding: utf-8 -*-\n\n\ndef _i(prefix, output_string, postfix=\"\\n\"):\n \"\"\" 行先頭に \"ok\" 等の文字が出力されないようにフォーマットする\n \"\"\"\n return \"\\n\".join([prefix + line for line in output_string.split(\"\\n\")]) + \"\\n\"\n\n\nclass Tap(object):\n def __init__(self, writer):\n self.writer = writer\n self.test_count = 0\n self.succeeded = 0\n self.failed = 0\n self.errored = 0\n self.not_ok_list = []\n self.current = None\n\n def total_elapsed(self):\n return self.succeeded + self.failed + self.errored\n\n def debug(self, debug_str):\n writer = self.writer\n writer.write(_i(\" # \", unicode(debug_str)))\n\n def start(self, scenario):\n self.current = scenario\n\n def succeed(self):\n writer = self.writer\n\n self.succeeded += 1\n writer.write(\"ok {0} - {1}\\n\".format(\n self.total_elapsed(), self.current.doc))\n\n def fail(self, exception):\n \"\"\"\n @param exception AssertionFailed\n \"\"\"\n writer = self.writer\n\n self.failed += 1\n self.not_ok_list.append(self.total_elapsed())\n writer.write(\"not ok {0} - {1}\\n\".format(\n self.total_elapsed(), self.current.doc))\n writer.write(\" ------------------------------------------------------------\\n\")\n writer.write(_i(\" \", \"filename: {0}\".format(self.current.setting.filename)))\n writer.write(_i(\" \", \"action_index: {0}\".format(exception.action_index)))\n writer.write(_i(\" \", \"assert_index: {0}\".format(exception.assert_index)))\n writer.write(_i(\" \", \"assertion: {0}\".format(exception.assertion)))\n writer.write(_i(\" \", \"compared: \"))\n for i, items in enumerate(exception.compared):\n for j, item in enumerate(items):\n writer.write(_i(u\" \", \"{0}-{1}: {2}\".format(i, j, item)))\n writer.write(\" ------------------------------------------------------------\\n\")\n\n def error(self, exception):\n \"\"\"\n @param exception ActionException\n \"\"\"\n writer = self.writer\n\n self.errored += 1\n self.not_ok_list.append(self.total_elapsed())\n writer.write(\"not ok {0} - {1}\\n\".format(\n self.total_elapsed(), self.current.doc))\n writer.write(\" ------------------------------------------------------------\\n\")\n writer.write(_i(\" \", \"filename: {0}\".format(self.current.setting.filename)))\n writer.write(_i(\" \", \"action_index: {0}\".format(exception.action_index)))\n writer.write(_i(\" \", exception.stack_trace))\n writer.write(\" ------------------------------------------------------------\\n\")\n\n def start_test(self, test_count):\n writer = self.writer\n\n self.test_count = test_count\n writer.write(\"1..{0}\\n\".format(test_count))\n\n def end_test(self, elapsed_time):\n writer = self.writer\n\n writer.write(\"\\n\")\n if self.not_ok_list:\n writer.write(\"FAILED tests {0}\\n\".format(\n \", \".join([str(e) for e in self.not_ok_list])))\n writer.write(\"Failed {0}/{1} tests, {2:.2f}% ok ({3:.3f} sec elapsed)\\n\".format(\n len(self.not_ok_list), self.test_count,\n float(self.succeeded) / self.test_count * 100,\n elapsed_time))\n else:\n if self.test_count == 1:\n writer.write(\"1 test succeeded ({0:.3f} sec elapsed)\\n\".format(\n elapsed_time))\n else:\n writer.write(\"{0} tests all succeeded ({1:.3f} sec elapsed)\\n\".format(\n self.test_count, elapsed_time))\n\n","sub_path":"zaffy/formatter/tap.py","file_name":"tap.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"606896897","text":"#!/bin/python\nprint(\"Mary had a little lamb.\")\nprint(\"Its fleece was white as %s.\" % 'snow')\nprint(\"And everywhere that Mary went.\")\nprint(\".\" * 10) # what'd that do?\n\nend1 = \"C\"\nend2 = \"h\"\nend3 = \"e\"\nend4 = \"e\"\nend5 = \"s\"\nend6 = \"e\"\nend7 = \"B\"\nend8 = \"u\"\nend9 = \"r\"\nend10 = \"g\"\nend11 = \"e\"\nend12 = \"r\"\n\n# if we use comma at the end of 'end6', then it will not go to new line.\nprint(end1 + end2 + end3 + end4 + end5 + end6, end='$', sep='-')\nprint(end7 + end8 + end9 + end10 + end11 + end12)\nprint(\"\\n \\n\")\n\n# Look for python inbuilt new line character.\n\"\"\" \\n is used for new line in python\"\"\"\n","sub_path":"Basic Programs/more_printing.py","file_name":"more_printing.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"52994061","text":"import pandas as pd\nimport xml.etree.ElementTree as ET\nimport requests\nimport re\nfrom requests.exceptions import ConnectionError, HTTPError\n\ndef get_ndec(strfloat):\n tmplist = strfloat.strip().split('.') \n return 0 if len(tmplist) == 1 else len(tmplist[1])\n\nclass Config:\n\n XML_URI = \"https://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist-90d.xml\"\n CUBE_NS = \"{http://www.ecb.int/vocabulary/2002-08-01/eurofxref}\"\n DB_EXCH_CURRENCY = pd.DataFrame()\n CURRENCY_NDEC = {'EUR': 2}\n STARTUP_STATUS = \"NOK\"\n HTTP_ERROR = \"\"\n DATE_REGEX = re.compile('\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d')\n\n @classmethod\n def init_app(cls, app):\n try:\n response = requests.get(cls.XML_URI)\n response.raise_for_status()\n except HTTPError as he:\n cls.HTTP_ERROR = f'Error on startup: requests will not be fulfilled: {he}'\n except ConnectionError as ce:\n cls.HTTP_ERROR = f'Error on startup: requests will not be fulfilled: {ce}'\n else:\n xml_eurofxref = response.content\n root = ET.XML(xml_eurofxref)\n root_cube = root.find(f'{cls.CUBE_NS}Cube')\n\n if root_cube is None:\n app.config['HTTP_ERROR'] = f'Error on startup: unknown namespace {cls.CUBE_NS}'\n return\n \n for cube_time in root_cube:\n time_val = cube_time.attrib['time']\n cls.DB_EXCH_CURRENCY.loc[time_val, 'EUR'] = 1.00 \n for cube in cube_time:\n curr = cube.attrib['currency'] \n rate = cube.attrib['rate'] \n cls.DB_EXCH_CURRENCY.loc[time_val, curr] = float(cube.attrib['rate']) \n ndec = get_ndec(cube.attrib['rate'])\n if curr not in cls.CURRENCY_NDEC or cls.CURRENCY_NDEC[curr] < ndec:\n cls.CURRENCY_NDEC[curr] = ndec\n \n cls.STARTUP_STATUS = \"OK\" \n\n app.config['HTTP_ERROR'] = cls.HTTP_ERROR\n app.config['STARTUP_STATUS'] = cls.STARTUP_STATUS\n \nclass DevelopmentConfig(Config):\n DEBUG = True\n\n\nclass TestingConfig(Config):\n TESTING = True\n\nclass ProductionConfig(Config):\n\n @classmethod\n def init_app(cls, app):\n Config.init_app(app)\n\n import logging\n from logging import StreamHandler\n file_handler = StreamHandler()\n file_handler.setLevel(logging.ERROR)\n app.logger.addHandler(file_handler)\n\nconfig = {\n 'development': DevelopmentConfig,\n 'testing': TestingConfig,\n 'production': ProductionConfig,\n 'default': DevelopmentConfig\n}\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"551783393","text":"#!/usr/bin/env python\n\n'''\nOnline example from :\nhttp://www.vtk.org/Wiki/VTK/Examples/Python/Graphs/EdgeWeights\n\nGoing to heavly comment out the steps to understand how to edit\nthis into what we need to make other graphs\n''' \n\nimport vtk \t\t\t\t\t\t\t# Manditory on all python VTK\nimport random\n \n\ng = vtk.vtkMutableUndirectedGraph() \t \t# Sets up empty data structure\n\n# Create a list of 1000 Vertexs\nvertex_list = []\nfor i in range(1000):\n\tvertex_list.append(g.AddVertex())\n\n# Create a fully connected graph \t\t# Adding in the edges between them\nfor i in range(1000):\n\twhile(True):\n\t\tv1 = random.choice(vertex_list)\n\t\tv2 = random.choice(vertex_list)\n\t\tif(v1 != v2):\n\t\t\tg.AddGraphEdge(v1,v2)\n\t\t\tbreak\n\n# Making the layout view\n# Which is an awsome class that handles a lot of the backend\n# stuff for interaction with the graph we are about to make\ngraphLayoutView = vtk.vtkGraphLayoutView()\n\n# Toss in your graph into this view\ngraphLayoutView.AddRepresentationFromInput(g)\n\n# General Set Up Stuff\ngraphLayoutView.SetLayoutStrategy(\"Simple 2D\")\n\n\n# Recenters the camera around our points\ngraphLayoutView.ResetCamera()\n\n# Opens and renders the window\ngraphLayoutView.Render()\n \n# I have no idea what the hell this does\ngraphLayoutView.GetLayoutStrategy().SetRandomSeed(0)\n \n# Sets up the interaction widets/event_listeners\ngraphLayoutView.GetInteractor().Start()","sub_path":"pysrc/DisconnectedGraph.py","file_name":"DisconnectedGraph.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"327901375","text":"import time\nimport asyncio\nfrom aiohttp import ClientSession\n\nfrom utils import (\n sec_to_mcs,\n url, params, headers\n)\nfrom test_cases import SecondTestCase, FirstTestCase\n\n\nasync def fetch(session):\n start_time = time.time()\n\n async with session.get(\n url=url, params=params, headers=headers\n ) as response:\n status_code = response.status\n response = await response.read()\n\n end_time = time.time()\n elapsed = end_time-start_time\n\n test = FirstTestCase(\n response={\n 'content': response,\n 'microseconds': sec_to_mcs(elapsed),\n 'status_code': status_code,\n 'end_time': end_time,\n 'start_time': start_time,\n },\n methods=[\n 'test_content_size',\n 'test_response_time',\n 'test_response_code',\n 'test_information_volume'\n ]\n )\n test.run()\n return test\n\n\nasync def run(count_test):\n tasks = []\n\n async with ClientSession() as session:\n for i in range(count_test):\n task = asyncio.ensure_future(fetch(session))\n tasks.append(task)\n\n tests = await asyncio.gather(*tasks)\n SecondTestCase(\n methods=[\n 'test_all_subtest_passed',\n 'test_rps',\n 'test_latency',\n ],\n tests=tests\n\n ).run()\n\nif __name__ == '__main__':\n count_test = 8\n glob_time = time.time()\n\n loop = asyncio.get_event_loop()\n future = asyncio.ensure_future(run(count_test))\n loop.run_until_complete(future)\n\n print('Glob end %s' % (time.time() - glob_time))\n","sub_path":"test_2_async.py","file_name":"test_2_async.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"484116642","text":"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2017/12/16 10:44\n# @Author : Shiyu Li\n# @Software: PyCharm\n# https://github.com/ritchieng/wideresnet-tensorlayer\n\n\nimport tensorlayer as tl\nfrom tensorlayer.layers import *\nimport numpy as np\nimport time\nimport os\n\n## Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py```\nX_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(\n shape=(-1, 32, 32, 3), plotable=False)\n\n# X_train = np.asarray(X_train, dtype=np.float32)\n# y_train = np.asarray(y_train, dtype=np.int64)\n# X_test = np.asarray(X_test, dtype=np.float32)\n# y_test = np.asarray(y_test, dtype=np.int64)\n\nprint('X_train.shape', X_train.shape) # (50000, 32, 32, 3)\nprint('y_train.shape', y_train.shape) # (50000,)\nprint('X_test.shape', X_test.shape) # (10000, 32, 32, 3)\nprint('y_test.shape', y_test.shape) # (10000,)\nprint('X %s y %s' % (X_test.dtype, y_test.dtype))\n\n\ndef data_to_tfrecord(images, labels, filename):\n \"\"\" Save data into TFRecord \"\"\"\n if os.path.isfile(filename):\n print(\"%s exists\" % filename)\n return\n print(\"Converting data into %s ...\" % filename)\n cwd = os.getcwd()\n writer = tf.python_io.TFRecordWriter(filename)\n for index, img in enumerate(images):\n img_raw = img.tobytes()\n ## Visualize a image\n # tl.visualize.frame(np.asarray(img, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)\n label = int(labels[index])\n # print(label)\n ## Convert the bytes back to image as follow:\n # image = Image.frombytes('RGB', (32, 32), img_raw)\n # image = np.fromstring(img_raw, np.float32)\n # image = image.reshape([32, 32, 3])\n # tl.visualize.frame(np.asarray(image, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)\n example = tf.train.Example(features=tf.train.Features(feature={\n \"label\": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),\n 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),\n }))\n writer.write(example.SerializeToString()) # Serialize To String\n writer.close()\n\n\ndef read_and_decode(filename, is_train=None):\n \"\"\" Return tensor to read from TFRecord \"\"\"\n filename_queue = tf.train.string_input_producer([filename])\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(serialized_example,\n features={\n 'label': tf.FixedLenFeature([], tf.int64),\n 'img_raw': tf.FixedLenFeature([], tf.string),\n })\n # You can do more image distortion here for training data\n img = tf.decode_raw(features['img_raw'], tf.float32)\n img = tf.reshape(img, [32, 32, 3])\n img = tf.cast(img, tf.float32) # * (1. / 255) - 0.5\n # if is_train == True:\n # # 1. Randomly crop a [height, width] section of the image.\n # img = tf.random_crop(img, [24, 24, 3])\n # # 2. Randomly flip the image horizontally.\n # img = tf.image.random_flip_left_right(img)\n # # 3. Randomly change brightness.\n # img = tf.image.random_brightness(img, max_delta=63)\n # # 4. Randomly change contrast.\n # img = tf.image.random_contrast(img, lower=0.2, upper=1.8)\n # # 5. Subtract off the mean and divide by the variance of the pixels.\n # try: # TF 0.12+\n # img = tf.image.per_image_standardization(img)\n # except: # earlier TF versions\n # img = tf.image.per_image_whitening(img)\n #\n # elif is_train == False:\n # # 1. Crop the central [height, width] of the image.\n # img = tf.image.resize_image_with_crop_or_pad(img, 24, 24)\n # # 2. Subtract off the mean and divide by the variance of the pixels.\n # try: # TF 0.12+\n # img = tf.image.per_image_standardization(img)\n # except: # earlier TF versions\n # img = tf.image.per_image_whitening(img)\n # elif is_train == None:\n # img = img\n\n label = tf.cast(features['label'], tf.int32)\n return img, label\n\n\n# ## Save data into TFRecord files\n# data_to_tfrecord(images=X_train, labels=y_train, filename=\"train.cifar10\")\n# data_to_tfrecord(images=X_test, labels=y_test, filename=\"test.cifar10\")\n\nbatch_size = 64\n# For generator\nnum_examples = X_train.shape[0]\nindex_in_epoch = 0\nepochs_completed = 0\n\n# For wide resnets\nblocks_per_group = 4\nwidening_factor = 4\n\n# Basic info\nbatch_num = 64\nimg_row = 32\nimg_col = 32\nimg_channels = 3\nnb_classes = 10\n\nwith tf.device('/cpu:0'):\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n # prepare data in cpu\n x_train_, y_train_ = read_and_decode(\"train.cifar10\", True)\n x_test_, y_test_ = read_and_decode(\"test.cifar10\", False)\n\n x_train_batch, y_train_batch = tf.train.shuffle_batch([x_train_, y_train_],\n batch_size=batch_size, capacity=2000, min_after_dequeue=1000,\n num_threads=32) # set the number of threads here\n # for testing, uses batch instead of shuffle_batch\n x_test_batch, y_test_batch = tf.train.batch([x_test_, y_test_],\n batch_size=batch_size, capacity=50000, num_threads=32)\n\n\n def zero_pad_channels(x, pad=0):\n \"\"\"\n Function for Lambda layer\n \"\"\"\n pattern = [[0, 0], [0, 0], [0, 0], [pad - pad // 2, pad // 2]]\n return tf.pad(x, pattern)\n\n\n def residual_block(x, count, nb_filters=16, subsample_factor=1):\n prev_nb_channels = x.outputs.get_shape().as_list()[3]\n\n if subsample_factor > 1:\n subsample = [1, subsample_factor, subsample_factor, 1]\n # shortcut: subsample + zero-pad channel dim\n name_pool = 'pool_layer' + str(count)\n shortcut = tl.layers.PoolLayer(x,\n ksize=subsample,\n strides=subsample,\n padding='VALID',\n pool=tf.nn.avg_pool,\n name=name_pool)\n\n else:\n subsample = [1, 1, 1, 1]\n # shortcut: identity\n shortcut = x\n\n if nb_filters > prev_nb_channels:\n name_lambda = 'lambda_layer' + str(count)\n shortcut = tl.layers.LambdaLayer(\n shortcut,\n zero_pad_channels,\n fn_args={'pad': nb_filters - prev_nb_channels},\n name=name_lambda)\n\n name_norm = 'norm' + str(count)\n y = tl.layers.BatchNormLayer(x,\n decay=0.999,\n epsilon=1e-05,\n is_train=True,\n name=name_norm)\n\n name_conv = 'conv_layer' + str(count)\n y = tl.layers.Conv2dLayer(y,\n act=tf.nn.relu,\n shape=[3, 3, prev_nb_channels, nb_filters],\n strides=subsample,\n padding='SAME',\n name=name_conv)\n\n name_norm_2 = 'norm_second' + str(count)\n y = tl.layers.BatchNormLayer(y,\n decay=0.999,\n epsilon=1e-05,\n is_train=True,\n name=name_norm_2)\n\n prev_input_channels = y.outputs.get_shape().as_list()[3]\n name_conv_2 = 'conv_layer_second' + str(count)\n y = tl.layers.Conv2dLayer(y,\n act=tf.nn.relu,\n shape=[3, 3, prev_input_channels, nb_filters],\n strides=[1, 1, 1, 1],\n padding='SAME',\n name=name_conv_2)\n\n name_merge = 'merge' + str(count)\n out = tl.layers.ElementwiseLayer([y, shortcut],\n combine_fn=tf.add,\n name=name_merge)\n return out\n\n\n def model_batch_norm(x_crop, y_, reuse):\n \"\"\" Batch normalization should be placed before rectifier. \"\"\"\n with tf.variable_scope(\"model\", reuse=reuse):\n tl.layers.set_name_reuse(reuse)\n net = InputLayer(x_crop, name='input')\n net = Conv2dLayer(net,\n act=tf.nn.relu,\n shape=[3, 3, 3, 16],\n strides=[1, 1, 1, 1],\n padding='SAME',\n name='cnn_layer_first')\n for i in range(0, blocks_per_group):\n nb_filters = 16 * widening_factor\n count = i\n net = residual_block(net, count, nb_filters=nb_filters, subsample_factor=1)\n\n for i in range(0, blocks_per_group):\n nb_filters = 32 * widening_factor\n if i == 0:\n subsample_factor = 2\n else:\n subsample_factor = 1\n count = i + blocks_per_group\n net = residual_block(net, count, nb_filters=nb_filters, subsample_factor=subsample_factor)\n\n for i in range(0, blocks_per_group):\n nb_filters = 64 * widening_factor\n if i == 0:\n subsample_factor = 2\n else:\n subsample_factor = 1\n count = i + 2 * blocks_per_group\n net = residual_block(net, count, nb_filters=nb_filters, subsample_factor=subsample_factor)\n\n net = tl.layers.BatchNormLayer(net,\n decay=0.999,\n epsilon=1e-05,\n is_train=True,\n name='norm_last')\n\n net = tl.layers.PoolLayer(net,\n ksize=[1, 8, 8, 1],\n strides=[1, 8, 8, 1],\n padding='VALID',\n pool=tf.nn.avg_pool,\n name='pool_last')\n\n net = tl.layers.FlattenLayer(net, name='flatten')\n\n net = tl.layers.DenseLayer(net,\n n_units=nb_classes,\n act=tf.identity,\n name='fc')\n\n y = net.outputs\n\n ce = tl.cost.cross_entropy(y, y_, name='cost')\n # L2 for the MLP, without this, the accuracy will be reduced by 15%.\n # L2 = 0\n # for p in tl.layers.get_variables_with_name('relu/W', True, True):\n # L2 += tf.contrib.layers.l2_regularizer(0.004)(p)\n cost = ce #+ L2\n\n correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)\n acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n return net, cost, acc\n\n\n with tf.device('/gpu:0'):\n network, cost, acc, = model_batch_norm(x_train_batch, y_train_batch, None)\n _, cost_test, acc_test = model_batch_norm(x_test_batch, y_test_batch, True)\n\n ## train\n n_epoch = 5000\n learning_rate = 0.01\n print_freq = 1\n n_step_epoch = int(len(y_train) / batch_size)\n n_step = n_epoch * n_step_epoch\n\n with tf.device('/gpu:0'):\n # train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999,\n # epsilon=1e-08, use_locking=False).minimize(cost)\n train_op = tf.train.GradientDescentOptimizer(\n learning_rate, use_locking=False).minimize(cost, var_list=network.all_params)\n\n tl.layers.initialize_global_variables(sess)\n\n network.print_params(False)\n network.print_layers()\n\n print(' learning_rate: %f' % learning_rate)\n print(' batch_size: %d' % batch_size)\n print(' n_epoch: %d, step in an epoch: %d, total n_step: %d' % (n_epoch, n_step_epoch, n_step))\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n step = 0\n for epoch in range(n_epoch):\n start_time = time.time()\n train_loss, train_acc, n_batch = 0, 0, 0\n for s in range(n_step_epoch):\n ## You can also use placeholder to feed_dict in data after using\n # val, l = sess.run([x_train_batch, y_train_batch])\n # tl.visualize.images2d(val, second=3, saveable=False, name='batch', dtype=np.uint8, fig_idx=2020121)\n # err, ac, _ = sess.run([cost, acc, train_op], feed_dict={x_crop: val, y_: l})\n err, ac, _ = sess.run([cost, acc, train_op])\n step += 1\n train_loss += err\n train_acc += ac\n n_batch += 1\n\n if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:\n print(\"Epoch %d : Step %d-%d of %d took %fs\" % (\n epoch, step, step + n_step_epoch, n_step, time.time() - start_time))\n print(\" train loss: %f\" % (train_loss / n_batch))\n print(\" train acc: %f\" % (train_acc / n_batch))\n\n test_loss, test_acc, n_batch = 0, 0, 0\n for _ in range(int(len(y_test) / batch_size)):\n err, ac = sess.run([cost_test, acc_test])\n test_loss += err\n test_acc += ac\n n_batch += 1\n print(\" test loss: %f\" % (test_loss / n_batch))\n print(\" test acc: %f\" % (test_acc / n_batch))\n\n coord.request_stop()\n coord.join(threads)\n sess.close()\n","sub_path":"DeepSteganalysis/WideResNet/WideResNet_cifar10.py","file_name":"WideResNet_cifar10.py","file_ext":"py","file_size_in_byte":13936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"179268175","text":"height = 500\r\nwidth = 500\r\nnx = 6\r\nny = 6\r\nl = 60\r\nslide = 0\r\n\r\ndef setup():\r\n size(width, height)\r\n stroke(0, 102, 0) #цвет рамки\r\n fill(0, 102, 0) #цвет закраски \r\n frameRate(5) #скорость прокрутки\r\n \r\ndef draw():\r\n global slide\r\n background(0)\r\n for i in range(ny):\r\n for k in range(nx): \r\n x = ((k+1)*width/nx)-((width/nx)/2)\r\n y = ((i+1)*height/ny)-((height/ny)/2)\r\n if slide == 0:\r\n line(x, y-(l/2), x, y+(l/2))\r\n if slide == 1:\r\n rect(x-l/2, y-l/2, l, l)\r\n if slide == 2:\r\n ellipse(x, y, l, l)\r\n #triangle(x-2, y-2, 100+x, 20+y, 40+x, 90+y)\r\n slide = (slide + 1)%3\r\n","sub_path":"PrimitivePShap____yut.pyde","file_name":"PrimitivePShap____yut.pyde","file_ext":"pyde","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"182103157","text":"class Doraemon:\n def __init__(self,w,h,c):\n self.weight = w\n self.height = h\n self.color = c\n self.pocket_num = 3\n print(\"__Creating Now__\\n\" +\n \"僕、ドラえもんです.\")\n \n def pocket(self,nobi):\n self.nobi_ask = nobi\n self.pocket_num *= nobi ** 2\n \n \n\nDora1 = Doraemon(129.3, 129.3, \"blue\")\nprint(Dora1.pocket_num)\nhelp = int(input(\"のび太くんのドラえもーんの掛け声は何回ですか?\"))\nDora1.pocket(help)\nprint(\"以上より\" + str(Dora1.pocket_num) + \"個です\")\n\n\"\"\"print(\"僕の体重は\" + str(Dora1.weight) + \"kgだよ。意外に重いねテクノロジーだね\")\nprint(\"なので、ダイエットします。\")\n\nmomentum = int(input(\"何キロぐらい減らしたい?\"))\nDora1.weight -= momentum\nprint(\"えい!\\n\" + str(round(Dora1.weight,1)) + \"kgになったよ\")\"\"\"\n\n\n\n\n\n","sub_path":"dora_object.py","file_name":"dora_object.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"149094835","text":"import random\nimport time\n\nfrom card import Card\nfrom nemesisCard import NemesisCard\nfrom nemesis import Nemesis\nfrom player import Player\n\n\nclass Game:\n\n characterArray = {\n \"Mist\": {\n \"power\": 1, \"maxCharge\": 4, \"starterBreach\": [0,0,3,4], \"deck\": [],\"hand\" : []\n }\n }\n\n def __init__(self, nbPlayers):\n self.nbPlayers = nbPlayers\n self.nemesisName = \"empty\"\n self.nemesis = None\n self.mainMenu = True\n self.characterSelect = False\n self.characterList = [\"Mist\", \"Jian\",\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\"]\n self.characterSelected = []\n self.playerList = []\n self.deckOrder = []\n self.discardDeckOder = []\n self.gemDeck = []\n self.relicDeck = []\n self.spellDeck = []\n self.reserveDeck = []\n self.reserveDeckCount = [7, 7, 7, 5, 5, 5, 5, 5, 5]\n self.isPlaying = True\n self.win = None\n self.chooseEffect = []\n self.isPossibleChooseEffect = []\n self.waitingForPlayerChoice = False\n\n\n def setNbPlayers(self,stringPlayers):\n self.nbPlayers = stringPlayers.split(\"-\")[1]\n\n def setNemesisName(self,nemesis):\n self.nemesisName = nemesis.split(\"-\")[1]\n\n def setupNemesis(self):\n self.nemesis = Nemesis(self.nemesisName)\n self.nemesis.setupNemesis(self.nbPlayers)\n self.mainMenu = False\n\n def setPlayer(self,index,name):\n self.characterSelected[index] = name\n\n def launchGame(self):\n #self.createDeckOrder()\n self.createTestDeckOrder()\n self.createAllCards()\n self.createReserve()\n self.createPlayerSetup()\n self.nemesis.additionnalSetup()\n\n def gameIsOver(self, boolean):\n self.win = boolean\n self.isPlaying = False\n\n def doNemesisTurn(self):\n self.doNemesisBoard()\n self.nemesisDrawCard()\n\n def nemesisDrawCard(self):\n if not self.nemesis.nemesisDeck:\n if not self.nemesis.nemesisBoard:\n self.gameIsOver(True)\n else:\n card = self.nemesis.drawNemesisCard()\n self.doNemesisCard(False, card)\n\n def doNemesisBoard(self):\n if not self.nemesis.nemesisBoard:\n for card in self.nemesis.nemesisBoard:\n self.doNemesisCard(True, card)\n\n def doNemesisCard(self, boolean, nemesisCard):\n if nemesisCard.cardType == \"Serviteur\":\n self.doServiteurEffect(boolean,nemesisCard)\n elif nemesisCard.cardType == \"Pouvoir\":\n self.doCardPowerEffect(boolean,nemesisCard)\n elif nemesisCard.cardType == \"Attaque\":\n self.doAttaqueEffect(nemesisCard)\n\n def checkEffet(self, effect):\n self.isPossibleChooseEffect = [False, False]\n isPossible = False\n if effect ==\"065\":\n for player in self.playerList:\n if player.charge > 0:\n isPossible = True\n self.isPossibleChooseEffect[1] = True\n if player.spellBoard:\n self.isPossibleChooseEffect[0] = True\n isPossible = True\n return isPossible\n\n def doServiteurEffect(self,alreadyOnBoard,nemesisCard):\n if alreadyOnBoard:\n effectNumber = nemesisCard.cardEffect\n #Souffle Du Labyrinthe\n if effectNumber == \"065\":\n if self.checkEffect(\"065\"):\n self.chooseNemesisEffect(1, 2)\n\n def doChoosenEffect(self,codeEffect,player):\n #1 - Discard Prepared Spell\n #2 - Loose 1 charge\n if codeEffect == 1:\n player.discardSpell(1)\n elif codeEffect == 2:\n player.looseCharge(1)\n\n self.waitingForPlayerChoice = False\n\n\n def chooseNemesisEffect(self,codeFirstEffect,codeSecondEffect):\n self.chooseEffect = [codeFirstEffect, codeSecondEffect]\n self.waitingForPlayerChoice = True\n while self.waitingForPlayerChoice:\n time.sleep(1)\n\n def createPlayerSetup(self):\n i = 1\n for char in self.characterSelected:\n player = Player(char, i, Game.characterArray[char][\"power\"], Game.characterArray[char][\"maxCharge\"], Game.characterArray[char][\"starterBreach\"], Game.characterArray[char][\"deck\"], Game.characterArray[char][\"hand\"])\n self.playerList.extend(player)\n i += 1\n\n\n def createReserve(self):\n random.shuffle(self.gemDeck)\n random.shuffle(self.relicDeck)\n random.shuffle(self.spellDeck)\n firstGem = self.getReserveCard(\"Gemme\",0,3)\n secondGem = self.getReserveCard(\"Gemme\",3,4)\n thirdGem = self.getReserveCard(\"Gemme\",5,6)\n firstRelic = self.getReserveCard(\"Relic\",1,4)\n secondRelic = self.getReserveCard(\"Relic\",4,8)\n firstSpell = self.getReserveCard(\"Spell\",1,3)\n secondSpell = self.getReserveCard(\"Spell\",3,4)\n thirdSpell = self.getReserveCard(\"Spell\",4,5)\n fourthSpell = self.getReserveCard(\"Spell\",6,9)\n self.reserveDeck.extend([firstGem,secondGem,thirdGem,firstRelic,secondRelic,firstSpell,secondSpell,thirdSpell,fourthSpell])\n for card in self.reserveDeck:\n print(card.cardName)\n\n\n def getReserveCard(self,type,min,max):\n keepLooping = True\n print(\"GetReserveCard \" + type + \" Min \" + str(min) + \" Max \" + str(max))\n while keepLooping:\n if type == \"Gemme\":\n card = self.gemDeck[random.randint(0, len(self.gemDeck) - 1)]\n print(\"Checking \" + card.cardName)\n if card.cardCost >= min and card.cardCost <= max:\n print(\"Accepted \" + card.cardName)\n self.gemDeck.remove(card)\n keepLooping = False\n elif type == \"Relic\":\n card = self.relicDeck[random.randint(0,len(self.relicDeck) - 1)]\n if card.cardCost >= min and card.cardCost <= max:\n self.relicDeck.remove(card)\n keepLooping = False\n elif type == \"Spell\":\n card = self.spellDeck[random.randint(0,len(self.spellDeck) - 1)]\n if card.cardCost >= min and card.cardCost <= max:\n self.spellDeck.remove(card)\n keepLooping = False\n\n return card\n\n def createDeckOrder(self):\n if int(self.nbPlayers) == 1:\n self.deckOrder = [1,1,1,0,0]\n elif int(self.nbPlayers) == 2:\n self.deckOrder = [1, 1, 2, 2, 0, 0]\n elif int(self.nbPlayers) == 3:\n self.deckOrder = [1, 2, 3, 5, 0, 0]\n elif int(self.nbPlayers) == 4:\n self.deckOrder = [1, 2, 3, 4, 0, 0]\n\n random.shuffle(self.deckOrder)\n\n def createTestDeckOrder(self):\n self.deckOrder = [0, 0, 1, 2]\n\n\n def createAllCards(self):\n self.createGemCards()\n self.createRelicCards()\n self.createSpellCards()\n\n def createGemCards(self):\n elementExtraterrestre = Card(\"Gemme\", \"Élément Extraterrestre\", \"004\", 4)\n beryliteHantee = Card(\"Gemme\", \"Beylite Hantée\", \"011\",3)\n pierreDeDouleur = Card(\"Gemme\", \"Pierre De Douleur\", \"018\", 6)\n opaleBrulante = Card(\"Gemme\", \"Opale Brûlante\", \"56\", 5)\n saphirNuageux = Card(\"Gemme\", \"Saphir Nuageux\", \"62\", 6)\n agregatDeDiamants = Card(\"Gemme\", \"Agrégat De Diamants\", \"69\", 4)\n jade = Card(\"Gemme\",\"Jade\",\"76\",2)\n rubisFulgurant = Card(\"Gemme\", \"Rubis Fulgurant\", \"83\", 4)\n perleFiltrante = Card(\"Gemme\", \"PerleFiltrante\", \"90\", 3)\n ambreDeBRisbois = Card(\"Gemme\", \"Ambre De V'Risbois\", \"9788\",3)\n agateSangsue = Card(\"Gemme\", \"Agate Sangsue\", \"N02\",3)\n scarabeeFossilise = Card(\"Gemme\", \"Scarabée Fossilisé\", \"V03\",3)\n pierreDeSang = Card(\"Gemme\", \"Pierre De Sang\", \"W56\", 6)\n mineraiDeLaBreche = Card(\"Gemme\", \"Minerai De La Brèche\", \"W63\", 4)\n diamantInquietant = Card(\"Gemme\", \"Diamant Inquiétant\", \"W70\",3)\n magnetiteCongelee = Card(\"Gemme\", \"Mangétite Congelée\", \"W84\",3)\n bouquetDeScories = Card(\"Gemme\", \"Bouquet De Scories\", \"W91\", 4)\n verrePyroclastique = Card(\"Gemme\", \"Verre Pyroclastique\", \"W98\",3)\n self.gemDeck.extend([elementExtraterrestre,beryliteHantee,pierreDeDouleur,opaleBrulante,saphirNuageux,agregatDeDiamants,jade,\n rubisFulgurant,perleFiltrante,ambreDeBRisbois,agateSangsue,scarabeeFossilise,pierreDeSang,mineraiDeLaBreche,\n diamantInquietant,magnetiteCongelee,bouquetDeScories,verrePyroclastique])\n\n def createRelicCards(self):\n sphereDesSecrets = Card(\"Relic\", \"Sphère Des Secrets\", \"030\", 3)\n batonDexplosion = Card(\"Relic\", \"Bâton D'explosion\", \"104\", 3)\n vortexEnBouteille = Card(\"Relic\", \"Vortex En Bouteille\", \"109\", 3)\n dagueFlechissante = Card(\"Relic\", \"Dague Fléchissante\", \"114\", 2)\n orbeDeStabilisation = Card(\"Relic\", \"Orbe De Stabilisation\", \"119\", 4)\n talismanDeMage = Card(\"Relic\", \"Talisman De Mage\", \"124\", 5)\n cubeAstral = Card(\"Relic\", \"Cube Astral\", \"025\", 5)\n prismeInstable = Card(\"Relic\", \"Prisme Instable\", \"129\", 3)\n marteauEnFusion = Card(\"Relic\", \"Marteau En Fusion\", \"N09\", 5)\n spiraleTemporelle = Card(\"Relic\", \"Spirale Temporelle\", \"N14\", 7)\n cleDimensionnelle = Card(\"Relic\", \"Clé Dimensionnelle\", \"V10\", 8)\n cachetDeLeternite = Card(\"Relic\", \"Cachet De L'éternité\", \"V15\", 3)\n lingotErratique = Card(\"Relic\", \"Lingot Erratique\", \"W77\", 5)\n boussoleDeCairn = Card(\"Relic\", \"Boussole De Cairn\", \"W105\", 4)\n parcheminDuConclave = Card(\"Relic\", \"Parchemin Du Conclave\", \"W110\", 3)\n attrapeurDeDemons = Card(\"Relic\", \"Attrapeur De Démons\", \"W115\", 3)\n totemDeMage = Card(\"Relic\", \"Totem De Mage\", \"W120\", 2)\n fetichePrimordial = Card(\"Relic\", \"Fétiche Primordial\", \"W125\", 4)\n ganteletDeVorticite = Card(\"Relic\", \"Gantelet De Vorticité\", \"W130\", 6)\n\n self.relicDeck.extend([sphereDesSecrets, dagueFlechissante, batonDexplosion, vortexEnBouteille, orbeDeStabilisation, talismanDeMage, cubeAstral, prismeInstable, marteauEnFusion, spiraleTemporelle, cleDimensionnelle,\n cachetDeLeternite, lingotErratique, boussoleDeCairn, parcheminDuConclave, attrapeurDeDemons, totemDeMage, fetichePrimordial, ganteletDeVorticite])\n\n def createSpellCards(self):\n\n carboniser = Card(\"Spell\", \"Carboniser\", \"035\", 8)\n catalyseur = Card(\"Spell\", \"Catalyseur\", \"040\", 6)\n auraDeReaction = Card(\"Spell\", \"Aura De Réaction\", \"045\", 5)\n conduitDuNeant = Card(\"Spell\", \"Conduit Du Néant\", \"050\", 7)\n pyromancie = Card(\"Spell\", \"Pyromancie\", \"055\", 7)\n calciner = Card(\"Spell\", \"Calciner\", \"060\", 5)\n visionAplifiée = Card(\"Spell\", \"Vision Amplifiée\", \"134\", 4)\n nexusDesArcanes = Card(\"Spell\", \"Nexus Des Arcanes\", \"139\", 7)\n arcChaotique = Card(\"Spell\", \"Arc Chaotique\", \"144\", 6)\n videDévorant = Card(\"Spell\", \"Vide Dévorant\", \"149\", 7)\n feuObscur = Card(\"Spell\", \"Feu Obscur\", \"154\", 5)\n volDessence = Card(\"Spell\", \"Vol D'Essence\", \"159\", 5)\n eclairEnrage = Card(\"Spell\", \"Éclair Enragé\", \"164\", 5)\n miseAFeu = Card(\"Spell\", \"Mise à Feu\", \"169\", 4)\n tentaculeDeLave = Card(\"Spell\", \"Tentacule De Lave\", \"174\", 4)\n vagueDoubli = Card(\"Spell\", \"Vague D'Oubli\", \"179\", 5)\n flammeDuPhenix = Card(\"Spell\", \"Flamme Du Phénix\", \"184\", 3)\n apercuPlanaire = Card(\"Spell\", \"Aperçu Planaire\", \"189\", 6)\n echoSpectral = Card(\"Spell\", \"Écho Spectral\", \"194\", 3)\n fouetArdent = Card(\"Spell\", \"Fouet Ardent\", \"199\", 6)\n flamber = Card(\"Spell\", \"Flamber\", \"N19\", 4)\n radiance = Card(\"Spell\", \"Radiance\", \"N24\", 8)\n laMarqueDuSage = Card(\"Spell\", \"La Marque Du Sage\", \"N29\", 7)\n eclairDeProphetie = Card(\"Spell\", \"Éclair De Prophétie\", \"N34\", 6)\n resonance = Card(\"Spell\", \"Résonance\", \"V20\", 6)\n embrassement = Card(\"Spell\", \"Embrassement\", \"V25\", 2)\n fulminer = Card(\"Spell\", \"Fulminer\", \"V30\", 5)\n feuInterieur = Card(\"Spell\", \"Feu Intérieur\", \"V36\", 2)\n flechetteThermique = Card(\"Spell\", \"Fléchette Thermique\", \"V40\", 4)\n aurore = Card(\"Spell\", \"Aurore\", \"W135\", 5)\n carbonisation = Card(\"Spell\", \"Carbonisation\", \"W140\", 4)\n conjurationDesOublies = Card(\"Spell\", \"Conjuration Des Oubliés\", \"W145\", 6)\n flecheCeleste = Card(\"Spell\", \"Flèche Céleste\", \"W150\", 5)\n champDeConvection = Card(\"Spell\", \"Champ De Convection\", \"W155\", 5)\n cristallisation = Card(\"Spell\", \"Cristallisation\", \"W160\", 8)\n equilibre = Card(\"Spell\", \"Équilibre\", \"W165\", 7)\n torrentArdent = Card(\"Spell\", \"Torrent Ardent\", \"W170\", 5)\n eclairDechirant = Card(\"Spell\", \"Éclair Déchirant\", \"W175\", 4)\n meche = Card(\"Spell\", \"Mèche\", \"W180\", 4)\n forgeNova = Card(\"Spell\", \"Forge Nova\", \"W185\", 6)\n affluxPyrotechnique = Card(\"Spell\", \"Afflux Pyrotechnique\", \"W190\", 4)\n retourALaPoussiere = Card(\"Spell\", \"Retour À La Poussière\", \"W195\", 7)\n familierImaginaire = Card(\"Spell\", \"Familier Imaginaire\", \"W200\", 3)\n\n self.spellDeck.extend([carboniser, catalyseur, auraDeReaction, conduitDuNeant, pyromancie, calciner, visionAplifiée, nexusDesArcanes, arcChaotique, videDévorant, feuObscur, volDessence,\n eclairEnrage, miseAFeu, tentaculeDeLave, vagueDoubli, flammeDuPhenix, apercuPlanaire, echoSpectral, fouetArdent, flamber, radiance, laMarqueDuSage, eclairDeProphetie,\n resonance, embrassement, fulminer, feuInterieur, flechetteThermique, aurore, carbonisation, conjurationDesOublies, flecheCeleste, champDeConvection, cristallisation,\n equilibre, torrentArdent, eclairDechirant, meche, forgeNova, affluxPyrotechnique, retourALaPoussiere, familierImaginaire])\n\n\n\n\n\n\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":14051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"283687321","text":"import numpy as np\nimport scipy as sp\nfrom scipy.integrate import odeint\nfrom scipy import linalg as la\nfrom math import *\nimport cmath \n\nclass Lorenz:\n\n def __init__(self, coordinaten, sigma = 10.0, rho = 28, beta = 8/3):\n self.s = float(sigma)\n self.r = float(rho)\n self.b = float(beta)\n self.xs = coordinaten[0]\n self.ys = coordinaten[1]\n self.zs = coordinaten[2]\n \n\n def solve(self, T, dt):\n T = int(T)\n dt = float(dt)\n looptijd = [0]*ceil(T/dt)\n for i in range(len(looptijd)):\n looptijd[i] = i*dt\n \n return odeint(self.lorenz, [self.xs, self.ys, self.zs], looptijd)\n\n def lorenz(self, lijst, t):\n x = lijst[0]\n y = lijst[1]\n z = lijst[2]\n x_dot = self.s*(y - x)\n y_dot = x*(self.r - z) - y\n z_dot = x*y - self.b*z\n return [x_dot, y_dot, z_dot]\n \n def df(self, u):\n \n jacobi = np.array([[0,0,0],[0,0,0],[0,0,0]])\n jacobi[0][0] = -1*self.s\n jacobi[0][1] = self.s\n jacobi[1][0] = self.r - float(u[2])\n jacobi[1][1] = -1\n jacobi[1][2] = - float(u[0])\n jacobi[2][0] = float(u[1])\n jacobi[2][1] = float(u[0])\n jacobi[2][2] = -1*self.b\n print(jacobi)\n\n return jacobi\n\n def isStable(self, u):\n res = self.df(u)\n w = la.eigvals(res)\n\n if (w[0].real<0 and w[1].real<0 and w[2].real<0):\n return True\n else:\n return False\n\n","sub_path":"Opdracht6/lorenz.py","file_name":"lorenz.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"9593611","text":"import tensorflow as tf\nimport time\n\nfrom bot_code.trainer.base_classes.default_model_trainer import DefaultModelTrainer\nfrom bot_code.trainer.utils import random_packet_creator\nfrom bot_code.trainer.utils import controller_statistics\nfrom bot_code.utils.dynamic_import import get_class\nfrom tqdm import tqdm\n\n\n\nclass RandomPacketTrainer(DefaultModelTrainer):\n total_batches = None\n save_step = None\n teacher_package = None\n teacher_class_name = None\n teacher = None\n controller_stats = None\n start_time = None\n model_save_time = None\n frame_per_file = 20000\n\n def __init__(self):\n super().__init__()\n\n def get_random_data(self, packet_generator, input_formatter):\n state_object = packet_generator.get_random_array()\n output_array = input_formatter.create_input_array(state_object, state_object.time_diff)\n return output_array, state_object\n\n def get_config_name(self):\n return 'randomised_trainer.cfg'\n\n def get_event_filename(self):\n return 'random_packet'\n\n def load_config(self):\n super().load_config()\n # Obtaining necessary data for training from the config\n config = self.create_config()\n self.total_batches = config.getint('Randomised Trainer Configuration', 'total_batches')\n self.save_step = config.getint('Randomised Trainer Configuration', 'save_step')\n # Over here the model data is obtained\n self.teacher_package = config.get('Randomised Trainer Configuration', 'teacher_package')\n self.teacher_class_name = config.get('Randomised Trainer Configuration', 'teacher_class_name')\n\n def setup_trainer(self):\n super().setup_trainer()\n self.teacher = self.teacher_package.split('.')[-1]\n\n def instantiate_model(self, model_class):\n return model_class(self.sess, self.action_handler.get_logit_size(),\n action_handler=self.action_handler, is_training=True,\n optimizer=self.optimizer,\n config_file=self.create_model_config(), teacher=self.teacher)\n\n def setup_model(self):\n super().setup_model()\n teacher_class = get_class(self.teacher_package, self.teacher_class_name)\n teacher = teacher_class(self.batch_size)\n packet_generator = random_packet_creator.TensorflowPacketGenerator(self.batch_size)\n input_state, state_object = self.get_random_data(packet_generator, self.input_formatter)\n\n real_output = teacher.get_output_vector_model(state_object)\n real_indexes = self.action_handler.create_action_indexes_graph(tf.stack(real_output, axis=1))\n self.model.create_model(input_state)\n self.model.create_copy_training_model(model_input=input_state, taken_actions=real_indexes)\n self.model.create_savers()\n self.model.initialize_model()\n\n # Print out what the model uses\n self.model.printParameters()\n\n # Initialising statistics and printing them before training\n self.controller_stats = controller_statistics.OutputChecks(self.sess, self.action_handler,\n self.batch_size, self.model.smart_max,\n state_object=state_object,\n bot=teacher)\n self.controller_stats.create_model()\n\n def _run_trainer(self):\n self.start_time = time.time()\n self.controller_stats.get_amounts()\n\n total_batches = self.total_batches\n batch_size = self.batch_size\n save_step = 100.0 / self.save_step\n sess = self.sess\n model = self.model\n\n # Percentage to print statistics (and also save the model)\n save_step = int((total_batches * batch_size) / save_step)\n print('training on the equivalent of', self.total_batches * self.batch_size / self.frame_per_file, 'games')\n print('Prints at this percentage:', 100.0 / ((total_batches * batch_size) / save_step))\n model_counter = 0\n self.model_save_time = 0\n\n # Running the model\n for i in tqdm(range(total_batches)):\n model.run_train_step(True, None, i)\n\n if ((i + 1) * batch_size) % save_step == 0:\n print('\\nStats at', (i + 1) * batch_size, 'frames (', i + 1, 'batches): ')\n self.controller_stats.get_amounts()\n print('Saving model', model_counter)\n start_saving = time.time()\n model.save_model(model.get_model_path(model.get_default_file_name() + str(model_counter)),\n global_step=i, quick_save=True)\n # print('saved model in', time.time() - start_saving, 'seconds')\n self.model_save_time += time.time() - start_saving\n model_counter += 1\n\n def finish_trainer(self):\n print('trained on the equivalent of', self.total_batches * self.batch_size / self.frame_per_file, 'games')\n start_saving = time.time()\n self.model.save_model()\n print('saved model in', time.time() - start_saving, 'seconds')\n self.model_save_time += time.time() - start_saving\n\n total_time = time.time() - self.start_time\n print('Total time:', total_time)\n print('Time per batch:', (total_time - self.model_save_time) / (float(self.total_batches)))\n print('Time spent saving', self.model_save_time)\n\n print('Final stats:')\n self.controller_stats.get_amounts()\n self.controller_stats.get_final_stats()\n super().finish_trainer()\n\n\nif __name__ == '__main__':\n RandomPacketTrainer().run()\n","sub_path":"bot_code/trainer/random_packet_trainer.py","file_name":"random_packet_trainer.py","file_ext":"py","file_size_in_byte":5709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"226747716","text":"# Sprite classes\nimport pygame\nfrom pkg.level1.settings import *\nfrom pkg.foundation.display import *\nfrom .interactionmark import *\nfrom .jaildoor import *\nfrom .mob import *\nfrom .speechballoon import *\nfrom .spritesheet import *\nfrom .toilet import *\nfrom .text import *\n\nclass Walls(pygame.sprite.Sprite):\n def __init__(self, x, y, width, height):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((width, height))\n #self.image.fill(RED)\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\nclass Exits(pygame.sprite.Sprite):\n def __init__(self, x, y, width, height):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((width, height))\n self.image.fill(BLACK)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\nclass Bed(pygame.sprite.Sprite):\n def __init__(self, x, y, width, height):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((width, height))\n #self.image.fill(GREEN)\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\nclass Bars(pygame.sprite.Sprite):\n def __init__(self, x, y, width, height):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((width, height))\n #self.image.fill(GREEN)\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, visible_speech_balloon):\n self.visible_speech_balloon = visible_speech_balloon\n self.spritesheet = Spritesheet('pkg/level1/images/spritesheet.png')\n pygame.sprite.Sprite.__init__(self)\n self.walking = False\n self.can_walk = True\n self.current_frame = 0\n self.last_update = 0\n self.load_images()\n self.image = self.walk_down_frames[0]\n self.rect = self.image.get_rect()\n self.rect.x = (width / 2)\n self.rect.y = (height / 2)\n self.speedx = 0\n self.speedy = 0\n\n def load_images(self):\n # Player walk sprites\n self.walk_up_frames = [self.spritesheet.get_image(771, 324, 57, 82),\n self.spritesheet.get_image(771, 407, 57, 82),\n self.spritesheet.get_image(771, 324, 57, 82),\n self.spritesheet.get_image(877, 83, 57, 82)]\n for frame in self.walk_up_frames:\n frame.set_colorkey(BLACK)\n\n\n self.walk_down_frames = [ self.spritesheet.get_image(889, 263, 57, 82),\n self.spritesheet.get_image(780, 174, 57, 82),\n self.spritesheet.get_image(889, 263, 57, 82),\n self.spritesheet.get_image(759, 84, 57, 82)]\n for frame in self.walk_down_frames:\n frame.set_colorkey(BLACK)\n\n\n self.walk_left_frames = [ self.spritesheet.get_image(664, 0, 57, 82),\n self.spritesheet.get_image(659, 246, 57, 82),\n self.spritesheet.get_image(664, 0, 57, 82),\n self.spritesheet.get_image(697, 164, 57, 82)]\n for frame in self.walk_left_frames:\n frame.set_colorkey(BLACK)\n\n\n self.walk_right_frames = [ self.spritesheet.get_image(635, 164, 57, 82),\n self.spritesheet.get_image(602, 82, 57, 82),\n self.spritesheet.get_image(635, 164, 57, 82),\n self.spritesheet.get_image(602, 0, 57, 82)]\n for frame in self.walk_right_frames:\n frame.set_colorkey(BLACK)\n\n def update(self):\n self.animate()\n self.speedx = 0\n self.speedy = 0\n self.keys = pygame.key.get_pressed()\n if (self.keys[pygame.K_UP] or self.keys[pygame.K_w]) and self.can_walk:\n if not self.visible_speech_balloon:\n self.speedy = -6\n self.speedx = 0\n if (self.keys[pygame.K_DOWN] or self.keys[pygame.K_s]) and self.can_walk:\n if not self.visible_speech_balloon:\n self.speedy = 6\n self.speedx = 0\n if (self.keys[pygame.K_LEFT] or self.keys[pygame.K_a]) and self.can_walk:\n if not self.visible_speech_balloon:\n self.speedx = -6\n self.speedy = 0\n if (self.keys[pygame.K_RIGHT] or self.keys[pygame.K_d]) and self.can_walk:\n if not self.visible_speech_balloon:\n self.speedx = 6\n self.speedy = 0\n\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n\n def animate(self):\n now = pygame.time.get_ticks()\n if self.speedx | self.speedy != 0:\n self.walking = True\n else:\n self.walking = False\n # walk animation\n if self.walking:\n if now - self.last_update > 200:\n self.last_update = now\n self.current_frame = (self.current_frame + 1) % len(self.walk_left_frames)\n if self.speedx > 0:\n self.image = self.walk_right_frames[self.current_frame]\n if self.speedx < 0:\n self.image = self.walk_left_frames[self.current_frame]\n if self.speedy < 0:\n self.image = self.walk_up_frames[self.current_frame]\n if self.speedy > 0:\n self.image = self.walk_down_frames[self.current_frame]\n","sub_path":"pkg/level1/sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":5717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"253315999","text":"from tkinter import Label, Frame, Button, Checkbutton, Radiobutton, Listbox,\\\n\tTk, StringVar, IntVar, Entry, END, X, BOTH\n\n#create a window\nwindow = Tk()\n\n# create a label (text line)\ntext_label = Label(window, text=\"hello tkinter !\")\n# display the label in the windows\ntext_label.pack()\n\n# create a text zone\nvar_text = StringVar()\nline_text = Entry(window, textvariable=var_text, width=30)\nline_text.pack()\n\n# create a check zone\nvar_case = IntVar()\ncase = Checkbutton(window, text=\"don't ask this question\",\n\tvariable=var_case)\ncase.pack()\n\n# create a radio button\nvar_choice = StringVar()\n\nred_choice = Radiobutton(window, text=\"red\", variable=var_choice,\n\tvalue=\"red\")\nblue_choice = Radiobutton(window, text=\"blue\", variable=var_choice,\n\tvalue=\"blue\")\ngreen_choice = Radiobutton(window, text=\"green\", variable=var_choice,\n\tvalue=\"green\")\n\nred_choice.pack()\nblue_choice.pack()\ngreen_choice.pack()\n\n# create a listbox\nliste = Listbox(window)\n\nliste.insert(END, \"hy\")\nliste.insert(END, \"i'm\")\nliste.insert(END, \"rock\")\nliste.insert(END, \"baby\")\n\nliste.pack()\n\n# create a frame\nframe = Frame(window, width=768, height=576, borderwidth=1)\nframe.pack(fill=BOTH)\n\nmsg = Label(frame, text=\"our window\")\nmsg.pack(side=\"top\", fill=X)\n\n# create a button to quit the window\nexit_button = Button(window, text=\"exit\", command=window.quit)\nexit_button.pack()\n\n# begin the loop Tkinter\nwindow.mainloop()\n","sub_path":"tkinter/hello_tkinter.py","file_name":"hello_tkinter.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"271533938","text":"from plone.app.layout.viewlets.common import ViewletBase\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom zope.component import getMultiAdapter\n\nclass ViewOptions(ViewletBase):\n render = ViewPageTemplateFile('viewoptions.pt')\n\n def _isType(self, type):\n return self.context.portal_type == type\n\n def _endsWith(self, path):\n return self.context.request.URL.endswith(path)\n\n def getTabs(self):\n\n project = self.context.restrictedTraverse('@@unep_utils').projectParentURL()\n if project:\n tabs = [\n (project, \"Overview\",\n self._isType('Project') and self._endsWith('/base_view')),\n (\"%s/project_general_info\" % project,\"General\",\n self._isType('ProjectGeneralInformation')),\n (\"%s/fmi_folder\" % project, \"Financial\",\n self._isType('FMIFolder')),\n (\"%s/milestones\" % project, \"Milestones\",\n self._isType('Milestone')),\n (\"%s/mne_folder\" % project, \"Monitoring & Evaluation\",\n self._isType('MandEfolder')),\n (\"%s/@@reports\" % project, \"Reports\",\n self._isType('Project') and self._endsWith('/@@reports')),\n (\"%s/documents\" % project, \"Documents\",\n self._isType('Folder')),\n ]\n elif self._isType('ProjectDatabase'):\n tabs = [\n (self.context.absolute_url(), \"Projects A-Z\",\n self._endsWith('/base_view')),\n (\"%s/@@reports\" % self.context.absolute_url(), \"Reports\",\n self._endsWith('/@@reports')),\n ]\n else:\n return []\n\n return [dict(url=i, title=j, selected=k) for i,j,k in tabs]\n","sub_path":"unep.theme/tags/0.6/unep/theme/browser/viewlets/viewoptions.py","file_name":"viewoptions.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"280570558","text":"'''\nCreated on Jul 24, 2013\n\n@package: ally core\n@copyright: 2012 Sourcefabric o.p.s.\n@license: http://www.gnu.org/licenses/gpl-3.0.txt\n@author: Gabriel Nistor\n\nProvides the indexing for content definitions.\n'''\n\nfrom ally.api.type import Type\nfrom ally.container.ioc import injected\nfrom ally.design.processor.attribute import requires, defines\nfrom ally.support.util import firstOf\nfrom ally.support.util_context import listing, iterate\nfrom ally.design.processor.context import Context\nfrom ally.design.processor.handler import HandlerProcessor\n\n# --------------------------------------------------------------------\n\nclass Decoding(Context):\n '''\n The decoding context.\n '''\n # ---------------------------------------------------------------- Defined\n contentDefinitions = defines(dict, doc='''\n @rtype: dictionary{string: Context}\n The definition context for the content decoding indexed by category.\n ''')\n # ---------------------------------------------------------------- Required\n parent = requires(Context)\n name = requires(str)\n children = requires(dict)\n \nclass Definition(Context):\n '''\n The definition context.\n '''\n # ---------------------------------------------------------------- Defined\n name = defines(str, doc='''\n @rtype: string\n The definition name.\n ''')\n types = defines(list, doc='''\n @rtype: list[Type]\n The definition types in the normal order of the appearance.\n ''')\n references = defines(list, doc='''\n @rtype: list[Context]\n The definition references that directly linked with this definition.\n ''')\n # ---------------------------------------------------------------- Required\n category = requires(str)\n\n# --------------------------------------------------------------------\n\n@injected\nclass DefinitionContentHandler(HandlerProcessor):\n '''\n Implementation for a handler that provides the indexing for content definitions.\n '''\n \n separator = None\n # The separator to use for content names, if not provided the names will be placed as simple names.\n \n def __init__(self):\n assert self.separator is None or isinstance(self.separator, str), 'Invalid separator %s' % self.separator\n super().__init__(Definition=Definition)\n \n def process(self, chain, decoding:Decoding, definition:Context=None, **keyargs):\n '''\n @see: HandlerProcessor.process\n \n Index the definition for content.\n '''\n assert isinstance(decoding, Decoding), 'Invalid decoding %s' % decoding\n \n if not definition: return\n assert isinstance(definition, Definition), 'Invalid definition %s' % definition\n \n if decoding.contentDefinitions is None: decoding.contentDefinitions = {}\n assert isinstance(definition.category, str), 'Invalid definition category %s' % definition.category\n assert definition.category not in decoding.contentDefinitions, 'Already a definition for \\'%s\\'' % definition.category\n \n decoding.contentDefinitions[definition.category] = definition\n if self.separator: definition.name = self.separator.join(reversed(listing(decoding, Decoding.parent, Decoding.name)))\n else: definition.name = decoding.name\n \n if decoding.children:\n for child in decoding.children.values():\n assert isinstance(child, Decoding), 'Invalid decoding %s' % child\n if child.contentDefinitions and definition.category in child.contentDefinitions:\n if definition.references is None: definition.references = []\n definition.references.append(child.contentDefinitions[definition.category])\n \n if not definition.types:\n for child in iterate(decoding, lambda decoding: firstOf(decoding.children.values())\n if decoding.children and len(decoding.children) == 1 else None):\n assert isinstance(child, Decoding), 'Invalid decoding %s' % child\n \n if child.type and child.type.isPrimitive:\n assert isinstance(child.type, Type), 'Invalid decoding type %s' % child.type\n if definition.types is None: definition.types = []\n definition.types.append(child.type)\n break\n\n return definition\n \n","sub_path":"components/ally-core/ally/core/impl/processor/decoder/content/definition_content.py","file_name":"definition_content.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"427761059","text":"from torch import nn\r\nimport torch.nn.functional as F\r\n#from options_FedMA import add_fit_args\r\n\r\n\r\n\r\nclass CNN(nn.Module):\r\n def __init__(self, args):\r\n super(CNN, self).__init__()\r\n self.conv1 = nn.Conv2d(3, 64, 5)\r\n #self.norm = nn.BatchNorm2d(64)\r\n self.pool = nn.MaxPool2d(2, 2)\r\n self.conv2 = nn.Conv2d(64, 64, 5)\r\n #self.dropout = nn.Dropout(0.2)\r\n self.fc1 = nn.Linear(64*5*5, 384)\r\n self.fc2 = nn.Linear(384, 192)\r\n self.fc3 = nn.Linear(192, args.num_classes)\r\n\r\n def forward(self, x):\r\n x = self.pool(F.relu(self.conv1(x)))\r\n x = self.pool(F.relu(self.conv2(x)))\r\n #x = self.norm(x)\r\n x = x.view(-1, 64*5*5)\r\n #x = self.dropout(x)\r\n x = F.relu(self.fc1(x))\r\n x = F.relu(self.fc2(x))\r\n x = self.fc3(x)\r\n return F.log_softmax(x, dim=1)\r\n\r\nclass CNNContainer(nn.Module):\r\n def __init__(self, input_channel,num_filters, kernel_size, input_dim, hidden_dims, output_dim):\r\n super(CNNContainer, self).__init__()\r\n self.conv1 = nn.Conv2d(input_channel, num_filters, kernel_size, 1) #3,64\r\n self.conv2 = nn.Conv2d(num_filters, num_filters, kernel_size, 1) #new number,64\r\n\r\n self.fc1 = nn.Linear(input_dim, hidden_dims[0]) #1600, 384\r\n self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1])#384,192\r\n self.fc3 = nn.Linear(hidden_dims[1], output_dim)#192,10\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = F.max_pool2d(x, 2, 2)\r\n x = F.relu(x)\r\n x = self.conv2(x)\r\n x = F.max_pool2d(x, 2, 2)\r\n x = F.relu(x)\r\n x = x.view(-1, x.size()[1] * x.size()[2] * x.size()[3])\r\n x = self.fc1(x)\r\n x = self.fc2(x)\r\n x = self.fc3(x)\r\n\r\n return x\r\n\r\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"308451514","text":"# -*- coding: UTF-8 -*-\r\n\r\nimport io\r\nimport sys\r\n\r\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')\r\n\r\nimport scrapy\r\n\r\nfrom scrapy.http import Request\r\n\r\nfrom gg.items import DangdangItem\r\n\r\nclass dangSpider(scrapy.Spider):\r\n name = 'dangdang'\r\n allowed_domins = ['dangdang.com']\r\n start_urls = ['http://category.dangdang.com/pg1-cid4002590.html']\r\n\r\n def parse(self,response):\r\n url_list = response.xpath('//a[@class=\"pic\"]/@href').extract()\r\n for url in url_list:\r\n yield Request(url, callback=self.parse_name)\r\n for i in range(2,5):\r\n page_url = 'http://category.dangdang.com/pg{}-cid4002590.html'.format(i)\r\n print(page_url)\r\n yield Request(page_url, callback=self.parse)\r\n\r\n def parse_name(self,response):\r\n items = DangdangItem()\r\n items['title'] = response.xpath('//div[@class=\"name_info\"]/h1/@title').extract()\r\n items['link'] = response.url\r\n items['price'] = response.xpath('//p[@id=\"dd-price\"]/text()').extract()\r\n\r\n yield items\r\n","sub_path":"scrapy crawl/dangdang.com/spiders/DangSpider.py","file_name":"DangSpider.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"507637788","text":"import sacred\nimport numpy as np\n\nfrom sounds_deep.contrib.sacred_ingredients.load_data_ingredient import load_data_ingredient, load_data\nfrom sounds_deep.contrib.sacred_ingredients.define_svae_ingredient import define_svae_ingredient, define_model, write_verbose_ops\nfrom sounds_deep.contrib.sacred_ingredients.train_ingredient import train_ingredient, run_training\n\nex = sacred.Experiment(\n 'svae_experiment',\n ingredients=[\n load_data_ingredient, define_svae_ingredient, train_ingredient\n ])\n\nex.observers.append(sacred.observers.TinyDbObserver.create('svae_results'))\n\n\n@ex.automain\ndef run():\n train_gen, _, batches_per_epoch, data_shape = load_data()\n _, input_ph, train_op, verbose_ops_dict = define_model(data_shape)\n output = run_training(write_verbose_ops, train_op, train_gen, input_ph,\n verbose_ops_dict, batches_per_epoch)\n\n max_mean = lambda out_dict_list, metric: float(np.max([np.mean(out_dict[metric]) for out_dict in out_dict_list]))\n\n return {\n 'best_elbo': max_mean(output, 'elbo'),\n 'best_iw_elbo': max_mean(output, 'iw_elbo')\n }\n","sub_path":"sounds_deep/contrib/sacred_ingredients/svae_experiment.py","file_name":"svae_experiment.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"146171042","text":"# -*- coding: utf-8 -*- \nfrom tkinter import *\nimport webbrowser\nimport pyperclip\nimport threading as mp\nimport ctypes\nimport time\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport json\n\n#https://www.lfd.uci.edu/~gohlke/pythonlibs/#pyqt4\n#https://segmentfault.com/a/1190000005165656\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtWebKit import *\n\n\nclass MyBrowser(QWidget):\n\n def __init__(self, parent = None):\n super(MyBrowser, self).__init__(parent)\n self.createLayout()\n self.createConnection()\n\n def say_codehtml(self, getkey, webnum, openpdf = 0):\n self.flag = True\n count = 0;\n urlf = ''\n url = 'http://so.szlcsc.com/global.html?c=&k='+getkey\n r = requests.get(url)\n html = r.text\n #html = urlopen(url).read().decode('gbk')\n soup = BeautifulSoup(html, features='lxml')\n \n all_href = ''\n for link in soup.find_all('a'):\n all_href = link.get('href')\n if(type(all_href) == str):\n if 'item' in all_href:\n urlf = all_href\n \n print(all_href)\n self.flag = False\n break\n \n for link in soup.find_all('a'):\n all_href = link.get('href') \n if(type(all_href) == str):\n if 'item' in all_href:\n count = count + 1 \n if(count > 4):\n self.flag = True\n break\n \n if(self.flag): \n pass\n elif(openpdf == 0):\n urlf = QUrl(urlf)\n if (webnum == 0):\n self.webView1.load(urlf)\n else:\n self.webView2.load(urlf)\n else:\n self.get_pdf(urlf)\n \n def get_pdf(self, url):\n r = requests.get(url)\n html = r.text\n soup = BeautifulSoup(html, features='lxml')\n \n all_href = ''\n link = soup.find_all('span', attrs={'id':'downloadFile'})\n print(link)\n for i in link:\n getid = i.get('param-click')\n\n url = r'http://www.szlcsc.com/order/OrderCommonAction!selectProductPDFAndPCBListJsonp.action?callback=%27loadFilePDFData%27&annexNumber='+getid+'&callback=jQuery183014143773355556677_1522834983842&_=1522835034347'\n r = requests.get(url)\n html = r.content.decode()[61:-1]\n html = json.loads(html)\n html = html['fileList'][0]\n print(html['annexNumber'])\n webbrowser.open(r'http://www.szlcsc.com/product/pdf/A_' + html['annexNumber'] + r'.PDF') \n\n def search(self):\n address = str(self.addressBar.text())\n if address:\n #if address.find('://') == -1:\n # address = 'http://' + address\n address = address.split()\n print(address)\n self.say_codehtml(address[0], 0)\n self.say_codehtml(address[1], 1)\n \n def searchpdf(self):\n address = str(self.addressBar.text())\n if address:\n #if address.find('://') == -1:\n # address = 'http://' + address\n address = address.split()\n print(address)\n self.say_codehtml(address[0], 0, 1)\n self.say_codehtml(address[1], 1, 1)\n \n\n def createLayout(self):\n self.setWindowTitle(\"keakon's browser\")\n\n self.addressBar = QLineEdit()\n self.goButton1 = QPushButton(\"&打开网页\")\n self.goButton2 = QPushButton(\"&打开pdf\")\n self.webView1 = QWebView()\n self.webView2 = QWebView()\n\n #创建plot布局\n layout = QGridLayout()\n\n #将控件添加到布局中\n layout.addWidget(self.addressBar, 0, 0, 1, 18)\n layout.addWidget(self.goButton1, 0, 19, 1, 1)\n layout.addWidget(self.goButton2, 0, 20, 1, 1)\n layout.addWidget(self.webView1, 1, 0, 5, 10)\n layout.addWidget(self.webView2, 1, 10, 5, 20)\n\n self.setLayout(layout)\n\n def createConnection(self):\n self.connect(self.addressBar, SIGNAL('returnPressed()'), self.search)\n self.connect(self.addressBar, SIGNAL('returnPressed()'), self.addressBar, SLOT('selectAll()'))\n self.connect(self.goButton1, SIGNAL('clicked()'), self.search)\n self.connect(self.goButton1, SIGNAL('clicked()'), self.addressBar, SLOT('selectAll()'))\n self.connect(self.goButton2, SIGNAL('clicked()'), self.searchpdf)\n self.connect(self.goButton2, SIGNAL('clicked()'), self.addressBar, SLOT('selectAll()'))\n\n \n\nif __name__=='__main__':\n app = QApplication(sys.argv)\n\n browser = MyBrowser()\n browser.show()\n\n sys.exit(app.exec_())\n","sub_path":"text/PYQT4te.py","file_name":"PYQT4te.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"447814421","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 11 01:21:23 2017\n\n@author: NV\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nsys.path.append(r'D:\\Share\\masuyama\\00_Sync\\Tool') # os.pardir)\nimport modu_htn as htn\nhtn.setFigParam()\nplt.close('all')\n\n\nT2 = 7.12*10**-6# measured by echo\ndirPath =''\n\n\"\"\"\nfilenames = []\nfor no in range(1,13):\n filenames.append('Nconst_'+str(no)+'.tpmr.npz')\n\"\"\"\nfilenames =['0013_TPMR_spinecho.tpmr.npz', '0014_TPMR_xy16__Nconst_1.tpmr.npz', '0015_TPMR_xy16__Nconst_2.tpmr.npz', \n'0016_TPMR_xy16__Nconst_3.tpmr.npz', '0017_TPMR_xy16__Nconst_4.tpmr.npz', '0018_TPMR_xy16__Nconst_5.tpmr.npz', \n'0019_TPMR_xy16__Nconst_6.tpmr.npz', '0020_TPMR_xy16__Nconst_7.tpmr.npz', '0021_TPMR_xy16__Nconst_8.tpmr.npz', \n'0022_TPMR_xy16__Nconst_9.tpmr.npz', '0023_TPMR_xy16__Nconst_10.tpmr.npz', '0024_TPMR_xy16__Nconst_11.tpmr.npz', \n'0025_TPMR_xy16__Nconst_12.tpmr.npz', '0026_TPMR_xy16__Nconst_13.tpmr.npz', '0027_TPMR_xy16__Nconst_14.tpmr.npz', \n'0028_TPMR_xy16__Nconst_15.tpmr.npz', '0029_TPMR_xy16__Nconst_16.tpmr.npz']\nsw_1points = False # run_1points を使っているときはON。\n\naddname=filenames[0][0:4]\n\n####################################################\npulseNumDict ={'xy16N':16, 'xy16':16, 'xy8':8, 'spinecho': 1}# number of pi pulse\nNList =[]\nT2List =[]\nT2SDList = []\nmaxSigs = []\nminSigs = []\nfor filename in filenames:\n npzfile = np.load(os.path.join(dirPath ,filename) )\n config = npzfile['config'].item() \n if sw_1points: \n data0, data1, data0ref, data1ref = npzfile['data0'],npzfile['data1'],npzfile['data0ref'],npzfile['data1ref']\n else:\n data0, data1, data0ref, data1ref = htn.get_data01_01ref(npzfile)\n \n data = data0 - data0ref - ( data1 - data1ref)\n \n if config['method'] == 'spinecho':\n N= pulseNumDict[config['method']]# each pi pulse had tau-pi-tau\n figtitle = 'spinecho'\n else:\n N = config['Nconst']*pulseNumDict[config['method']]#total pulse \n figtitle = config['method']+' N'+str(config['Nconst'])\n time = 2*npzfile['xdata']*N# each pi pulse had tau-pi-tau\n \n\n \n def T2func(xdataLine, a1, a2, a3):\n #return a1 * np.exp(-xdataLine/ a2 ) #+ a3\n if 1< a3 or a3 <3:\n return a1 * np.exp(- (xdataLine/ a2)**a3 ) #+ a3\n else:\n 10**8\n \n \n popt0 = (max(data), 20*10**-6, 1)\n popt, pcov, SD = htn.data_fit(T2func, time, data, initial_param1=popt0)\n xdataLine = np.linspace(min(time), max(time), 200)\n\n plt.figure(1, figsize = htn.calcFigSize(column =1, h_ratio= 0.8))\n plt.plot(time*10**6, data, 'o-')\n #plt.plot(xdataLine*10**6, T2func(xdataLine, *popt), 'r-')\n\n plt.figure(2, figsize = htn.calcFigSize(column =1, h_ratio= 0.8))\n plt.plot(time*10**6, data/max(data), 'o-')\n\n plt.figure(3, figsize = htn.calcFigSize(column =1, h_ratio= 1.2))\n plt.plot(npzfile['xdata']*10**6, data, 'o-')\n \n \n plt.figure(figsize = htn.calcFigSize(column =1, h_ratio= 0.8))\n plt.title(figtitle+r': T$_2$ = '+'{:.3f}'.format(popt[1]*10**6)+r' $\\mu$s, '+'n = {:.2f}'.format(popt[2]))\n plt.axhline(y=0, xmin=0, xmax=1, linewidth=0.5, color = 'k', alpha=0.5 )\n plt.plot(time*10**6, data, 'bo', alpha = 0.7)\n plt.plot(xdataLine*10**6, T2func(xdataLine, *popt), 'r-')\n plt.xlabel(r'Time ($\\mu$s)')\n plt.tight_layout()\n plt.savefig(filename + '_fit.png', dpi = 300)#transparent=True)\n if len(filenames)!=1:\n plt.close()\n\n \n NList.append(N)\n T2List.append(popt[1])\n T2SDList.append(SD[1])\n maxSigs.append(max(data))\n minSigs.append(min(data))\n\nplt.figure(1)\nplt.axhline(y=0, xmin=0, xmax=1, linewidth=0.5, color = 'k', alpha=0.5 )\nplt.xlabel(r'Time ($\\mu$s)')\nplt.tight_layout()\nplt.xscale('log')\n\n\nplt.figure(2)\nplt.axhline(y=0, xmin=0, xmax=1, linewidth=0.5, color = 'k', alpha=0.5 )\nplt.xlabel(r'Time ($\\mu$s)')\nplt.tight_layout()\nplt.xscale('log')\n\nplt.figure(3)\nplt.axhline(y=0, xmin=0, xmax=1, linewidth=0.5, color = 'k', alpha=0.5 )\nplt.xlabel(r'Pulse duration ($\\mu$s)')\nplt.tight_layout()\nplt.xscale('log')\n\n\ndef T_coh(T2, N):\n # de2010universal - Universal dynamical decoupling of a single solid-state\n # spin from a spin bath\n return T2 * N**(2 / 3)\nNList =np.array(NList)\n \nif len(filenames)>1 and T2!=None:\n plt.figure(figsize = htn.calcFigSize(column =1, h_ratio= 0.8))\n Nline = np.linspace(min(NList), max(NList))\n plt.plot(Nline, T_coh(T2, Nline) / T2, 'r-', alpha=0.5)\n \n #plt.plot(NList, np.array(T2List) / T2, 'bo')\n plt.errorbar(NList, np.array(T2List) / T2, yerr = np.array(T2SDList)/T2, fmt='bo',ecolor='b')# 本当はT2のエラーも必要かも。誤差伝搬\n \n # plt.yscale('log')\n plt.ylabel(r'T $_2$ enhancement')\n plt.xlabel('Total pulse number')\n #plt.xscale('log')\n #plt.yscale('log')\n plt.xlim(min(NList)*0.9, max(NList)*1.1)\n plt.tight_layout()\n plt.savefig(addname+'_T2_enhancement.png',dpi=300)\n print('T2List',T2List)\n \n \n plt.figure(figsize = htn.calcFigSize(column =1, h_ratio= 0.8))\n plt.plot(NList, maxSigs, 'bo')\n plt.xlabel('Total pulse number')\n plt.ylabel('Maximum signal')\n plt.tight_layout()\n \n plt.figure(figsize = htn.calcFigSize(column =1, h_ratio= 0.8))\n plt.plot(NList, minSigs, 'bo')\n plt.xlabel('Total pulse number')\n plt.ylabel('Minimum signal')\n plt.tight_layout()","sub_path":"AnaTools/ana_T2_TPMR.py","file_name":"ana_T2_TPMR.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"602573405","text":"def fn():\r\n n,m = map(int,input().strip().split())\r\n a = []\r\n count = 0\r\n for i in range(n):\r\n a.append(list(map(int,input().strip().split())))\r\n for i in range(n):\r\n for j in range(m):\r\n if a[i][j] == 1:\r\n #print(i,j)\r\n #if j >= len(a[i])-1 or a[i][j+1] == 0:\r\n #print(\"hi\")\r\n if (i <= 0 or a[i-1][j] == 0) and (j >= len(a[i])-1 or a[i][j+1] == 0):\r\n count += 1\r\n print(count)\r\nfn()\r\n\r\n","sub_path":"python/island.py","file_name":"island.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"279412089","text":"import socket\n\ns = socket.socket()\n\nip = \"127.0.0.1\"\nport = int(input(\"Friend's port: \"))\n\ns.connect((ip, port))\n\nmsg = \" \"\n\nwhile msg != \"q\":\n m = input(\"You: \")\n s.send(m.encode())\n m2 = s.recv(1024)\n msg = m2.decode()\n print(\"%s : %s\"%(port, msg))\n\nprint(\"Exiting...\")\ns.close()\n","sub_path":"client2.py","file_name":"client2.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"441906945","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 1 16:07:50 2020\n\n@author: Reaz \n\nascending or from lowest to highest \n\n\nDocumentation: We are comparing each element to its left \nand swapping it. Therefore we are not targeting each element \nindividually from the array in the outer for loop, but rather\ntargeting the indexes sequentially. Therefore, we may/will \ntarget the same element more than once, but that's okay because\nthe element to its left has already been taken care of when we \nswapped. \n\n \n \n\"\"\" \n\n\narray=[5,1,4,2,13,2,21,3]\na=array\n\nfor i in range (len(a)): \n for i in range (len(a)-1): \n if a[i]> a[i+1]: \n temp=a[i] \n a[i]=a[i+1] \n a[i+1]=temp \n \n \nsorted_array=a \nprint (f'the sorted array is {sorted_array}') ","sub_path":"search and sort algorithms/bubblesort.py","file_name":"bubblesort.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"472075306","text":"from wsgiref.simple_server import make_server\nfrom pyramid.config import Configurator\nfrom pyramid.view import view_config, view_defaults\nfrom pyramid.response import Response\nfrom github import Github\n\nENDPOINT = \"webhook\"\n\n@view_defaults(\n route_name=ENDPOINT, renderer=\"json\", request_method=\"POST\"\n)\nclass PayloadView(object):\n \"\"\"\n View receiving of Github payload. By default, this view it's fired only if\n the request is json and method POST.\n \"\"\"\n\n def __init__(self, request):\n self.request = request\n # Payload from Github, it's a dict\n self.payload = self.request.json\n\n @view_config(header=\"X-Github-Event:push\")\n def payload_push(self):\n \"\"\"This method is a continuation of PayloadView process, triggered if\n header HTTP-X-Github-Event type is Push\"\"\"\n print(\"No. commits in push:\", len(self.payload['commits']))\n return Response(\"success\")\n\n @view_config(header=\"X-Github-Event:pull_request\")\n def payload_pull_request(self):\n \"\"\"This method is a continuation of PayloadView process, triggered if\n header HTTP-X-Github-Event type is Pull Request\"\"\"\n print(\"PR\", self.payload['action'])\n print(\"No. Commits in PR:\", self.payload['pull_request']['commits'])\n\n return Response(\"success\")\n\n @view_config(header=\"X-Github-Event:ping\")\n def payload_else(self):\n print(\"Pinged! Webhook created with id {}!\".format(self.payload[\"hook\"][\"id\"]))\n return {\"status\": 200}\n\n\ndef create_webhook():\n \"\"\" Creates a webhook for the specified repository.\n\n This is a programmatic approach to creating webhooks with PyGithub's API. If you wish, this can be done\n manually at your repository's page on Github in the \"Settings\" section. There is a option there to work with\n and configure Webhooks.\n \"\"\"\n\n USERNAME = \"\"\n PASSWORD = \"\"\n OWNER = \"\"\n REPO_NAME = \"\"\n EVENTS = [\"push\", \"pull_request\"]\n HOST = \"\"\n\n config = {\n \"url\": \"http://{host}/{endpoint}\".format(host=HOST, endpoint=ENDPOINT),\n \"content_type\": \"json\"\n }\n\n g = Github(USERNAME, PASSWORD)\n repo = g.get_repo(\"{owner}/{repo_name}\".format(owner=OWNER, repo_name=REPO_NAME))\n repo.create_hook(\"web\", config, EVENTS, active=True)\n\n\nif __name__ == \"__main__\":\n config = Configurator()\n\n create_webhook()\n\n config.add_route(ENDPOINT, \"/{}\".format(ENDPOINT))\n config.scan()\n\n app = config.make_wsgi_app()\n server = make_server(\"0.0.0.0\", 80, app)\n server.serve_forever()","sub_path":"tests/webhooks.py","file_name":"webhooks.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"649742290","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\turl(\n\t\tr'^api/v1/diagnosis_codes/(?P[0-9]+)$',\n\t\tviews.get_delete_update_diagnosis_code,\n\t\tname='get_delete_update_diagnosis_code'\n\t\t),\n\turl(\n\t\tr'^api/v1/diagnosis_codes/$',\n\t\tviews.get_post_diagnosis,\n\t\tname='get_post_diagnosis'\n\t\t)\n]","sub_path":"diagnosis_codes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"84160630","text":"\"\"\"Support remote entity for Xiaomi Miot.\"\"\"\nimport logging\nimport time\nfrom functools import partial\n\nfrom homeassistant.const import * # noqa: F401\nfrom homeassistant.components import remote\nfrom homeassistant.components.remote import (\n DOMAIN as ENTITY_DOMAIN,\n RemoteEntity,\n)\n\nfrom miio.chuangmi_ir import (\n ChuangmiIr,\n DeviceException,\n)\n\nfrom . import (\n DOMAIN,\n CONF_MODEL,\n XIAOMI_CONFIG_SCHEMA as PLATFORM_SCHEMA, # noqa: F401\n MiotEntity,\n async_setup_config_entry,\n bind_services_to_entries,\n)\nfrom .core.miot_spec import (\n MiotSpec,\n)\nfrom .core.xiaomi_cloud import (\n MiotCloud,\n MiCloudException,\n)\n\n_LOGGER = logging.getLogger(__name__)\nDATA_KEY = f'{ENTITY_DOMAIN}.{DOMAIN}'\n\nSERVICE_TO_METHOD = {}\n\n\nasync def async_setup_entry(hass, config_entry, async_add_entities):\n await async_setup_config_entry(hass, config_entry, async_setup_platform, async_add_entities, ENTITY_DOMAIN)\n\n\nasync def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n hass.data.setdefault(DATA_KEY, {})\n hass.data[DOMAIN]['add_entities'][ENTITY_DOMAIN] = async_add_entities\n model = str(config.get(CONF_MODEL) or '')\n entities = []\n miot = config.get('miot_type')\n if miot:\n spec = await MiotSpec.async_from_type(hass, miot)\n if spec.name in ['remote_control', 'ir_remote_control']:\n if 'chuangmi.remote.' in model or 'chuangmi.ir.' in model:\n entities.append(MiotRemoteEntity(config, spec))\n elif model in [\n 'xiaomi.wifispeaker.l05c',\n 'xiaomi.wifispeaker.lx5a',\n 'xiaomi.wifispeaker.lx06',\n ]:\n entities.append(MiotRemoteEntity(config, spec))\n for entity in entities:\n hass.data[DOMAIN]['entities'][entity.unique_id] = entity\n async_add_entities(entities, update_before_add=True)\n bind_services_to_entries(hass, SERVICE_TO_METHOD)\n\n\nclass MiotRemoteEntity(MiotEntity, RemoteEntity):\n def __init__(self, config, miot_spec: MiotSpec):\n self._miot_spec = miot_spec\n super().__init__(miot_service=None, config=config, logger=_LOGGER)\n host = config.get(CONF_HOST)\n token = config.get(CONF_TOKEN)\n self._device = ChuangmiIr(host, token)\n self._attr_should_poll = False\n\n async def async_added_to_hass(self):\n await super().async_added_to_hass()\n did = self.miot_did\n mic = self.miot_cloud\n irs = []\n if did and isinstance(mic, MiotCloud):\n dls = await mic.async_get_devices() or []\n for d in dls:\n if did != d.get('parent_id'):\n continue\n ird = d.get('did')\n rdt = await self.hass.async_add_executor_job(\n partial(mic.request_miot_api, 'v2/irdevice/controller/keys', {'did': ird})\n ) or {}\n kys = (rdt.get('result') or {}).get('keys', {})\n if not kys:\n self.logger.info('%s: IR device %s(%s) have no keys: %s', self.name, ird, d.get('name'), rdt)\n irs.append({\n 'did': ird,\n 'name': d.get('name'),\n 'keys': kys,\n })\n if irs:\n self._state_attrs['ir_devices'] = irs\n\n def is_on(self):\n return True\n\n def send_remote_command(self, command, **kwargs):\n \"\"\"Send commands to a device.\"\"\"\n repeat = kwargs.get(remote.ATTR_NUM_REPEATS, remote.DEFAULT_NUM_REPEATS)\n delays = kwargs.get(remote.ATTR_DELAY_SECS, remote.DEFAULT_DELAY_SECS)\n did = kwargs.get(remote.ATTR_DEVICE)\n for _ in range(repeat):\n for cmd in command:\n try:\n if f'{cmd}'[:4] == 'key:':\n ret = self.send_cloud_command(did, cmd)\n else:\n ret = self._device.play(cmd)\n self.logger.info('%s: Send IR command %s(%s) result: %s', self.name, cmd, kwargs, ret)\n except (DeviceException, MiCloudException) as exc:\n self.logger.error('%s: Send IR command %s(%s) failed: %s', self.name, cmd, kwargs, exc)\n time.sleep(delays)\n\n def send_cloud_command(self, did, command):\n key = f'{command}'\n if key[:4] == 'key:':\n key = key[4:]\n try:\n key = int(key)\n except (TypeError, ValueError):\n key = None\n if not did or not key:\n self.logger.warning('%s: IR command %s to %s invalid for cloud.', self.name, command, did)\n return False\n mic = self.miot_cloud\n if not mic:\n return False\n res = mic.request_miot_api('v2/irdevice/controller/key/click', {\n 'did': did,\n 'key_id': key,\n }) or {}\n return res\n\n async def async_send_command(self, command, **kwargs):\n \"\"\"Send commands to a device.\"\"\"\n await self.hass.async_add_executor_job(\n partial(self.send_remote_command, command, **kwargs)\n )\n\n def learn_command(self, **kwargs):\n \"\"\"Learn a command from a device.\"\"\"\n raise NotImplementedError()\n\n def delete_command(self, **kwargs):\n \"\"\"Delete commands from the database.\"\"\"\n raise NotImplementedError()\n","sub_path":"custom_components/xiaomi_miot/remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"348469483","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse, FileResponse\nfrom django.shortcuts import render, redirect\nfrom django.template import loader\nfrom django.contrib.auth.models import User\nfrom django.contrib import auth\nfrom webprojectapp.models import Restaurant # 상원\nfrom datetime import datetime, timedelta # 수정\nfrom webprojectapp.models import Hospital # 강용\nfrom webprojectapp.models import Clinic # 강용\n# from webprojectapp.models import Board # 하영\n\n\n# 수정님\ndef main(request):\n template = loader.get_template('main.html')\n date = datetime.now() - timedelta(1)\n context = {'current_date': date}\n return HttpResponse(template.render(context, request))\n\n\ndef register(request):\n res_data = None\n if request.method == 'POST':\n useremail = request.POST.get('useremail')\n firstname = request.POST.get('firstname', None)\n lastname = request.POST.get('lastname', None)\n password = request.POST.get('password', None)\n re_password = request.POST.get('re-password', None)\n res_data = {}\n if User.objects.filter(username=useremail):\n res_data['error'] = '이미 가입된 아이디(이메일주소)입니다.'\n elif password != re_password:\n res_data['error'] = '비밀번호가 다릅니다.'\n else:\n user = User.objects.create_user(username=useremail,\n first_name=firstname,\n last_name=lastname,\n password=password)\n auth.login(request, user)\n redirect(\"index.html\")\n return render(request, 'register.html', res_data)\n\n\ndef login(request):\n if request.method == \"POST\":\n useremail = request.POST.get('useremail', None)\n password = request.POST.get('password', None)\n user = auth.authenticate(username=useremail, password=password)\n if user is not None:\n auth.login(request, user)\n return redirect(\"main\")\n else:\n return render(request, 'login.html', {'error': '사용자 아이디 또는 패스워드가 틀립니다.'})\n else:\n return render(request, 'login.html')\n\n\ndef logout(request):\n if request.user.is_authenticated:\n auth.logout(request)\n return redirect(\"main\")\n\n\ndef only_member(request):\n context = None\n if request.user.is_authenticated:\n context = {'logineduser': request.user.last_name + request.user.first_name}\n return render(request, 'member.html', context)\n\n\n# 상원\ndef map1(request):\n search = request.GET.get('search')\n Restaurants = Restaurant.objects.all()\n Rname = []\n Raddress = []\n for rest in Restaurants:\n Rname.append(rest.r_name)\n Raddress.append(rest.r_address)\n\n if request.method == 'POST':\n txt = (request.POST['text'])\n context = {\n \"txt\": txt,\n \"Rname\": Rname,\n \"Raddress\": Raddress,\n \"search\": search,\n }\n else:\n context = {\n \"Rname\": Rname,\n \"Raddress\": Raddress,\n \"search\": search,\n }\n return render(request, 'map1.html', context)\n\n\n# 강용님\ndef map2(request):\n hospitals = Hospital.objects.all()\n hname = []\n haddress = []\n htype = []\n hnumber = []\n for hospital in hospitals:\n hname.append(hospital.h_name)\n haddress.append(hospital.h_address)\n htype.append(hospital.h_type)\n hnumber.append(hospital.h_number)\n\n search = request.GET.get('search')\n if request.method == 'POST':\n txt = (request.POST['text'])\n context = {\n \"txt\": txt,\n \"search\": search,\n \"hospitals\": hospitals,\n \"hname\": hname,\n \"haddress\": haddress,\n \"htype\": htype,\n \"hnumber\": hnumber\n }\n else:\n context = {\n \"search\": search,\n \"hospitals\": hospitals,\n \"hname\": hname,\n \"haddress\": haddress,\n \"htype\": htype,\n \"hnumber\": hnumber\n }\n return render(request, 'map2.html', context)\n\n\ndef map2_1(request):\n clinics = Clinic.objects.all()\n cname = []\n caddress = []\n cnumber = []\n for clinic in clinics:\n cname.append(clinic.name)\n caddress.append(clinic.address)\n cnumber.append(clinic.number)\n\n search = request.GET.get('search')\n if request.method == 'POST':\n txt = (request.POST['text'])\n context = {\n \"txt\": txt,\n \"search\": search,\n \"clinics\": clinics,\n \"cname\": cname,\n \"caddress\": caddress,\n \"cnumber\": cnumber\n }\n else:\n context = {\n \"search\": search,\n \"clinics\": clinics,\n \"cname\": cname,\n \"caddress\": caddress,\n \"cnumber\": cnumber\n }\n return render(request, 'map2_1.html', context)\n\n\n# 하영님\n# def board(request):\n# # 모든 Board를 가져와 boardlist에 저장\n# boardlist = Board.objects.all()\n# context = {'boardlist': boardlist}\n# return render(request, 'board.html', context)\n#\n#\n# def board_view(request, pk):\n# # 게시글(Board) 중 pk를 이용해 하나의 게시글(post)를 검색\n# board = Board.objects.get(pk=pk)\n# return render(request, 'boards/board_view.html', {'board': board}) # 추가\n#\n#\n# def board_write(request):\n# return render(request, 'boards/board_write.html')\n\n\n# def new_post(request):\n# if request.method == 'POST':\n# if request.POST['mainphoto'] :\n# new_article = Board.objects.create(\n# no=request.POST['no'],\n# writer=request.POST['writer'],\n# postname=request.POST['postname'],\n# contents=request.POST['contents'],\n# satisfaction=request.POST['satisfaction'],\n# mainphoto=request.POST['mainphoto'],\n# )\n# else:\n# new_article = Board.objects.create(\n# no=request.POST['no'],\n# writer=request.POST['writer'],\n# postname=request.POST['postname'],\n# contents=request.POST['contents'],\n# satisfaction=request.POST['satisfaction'],\n# mainphoto=request.POST['mainphoto'],\n# )\n# return redirect('board')\n# return render(request, 'boards/new_write.html')\n\n# def write(request):\n# if request.method == 'POST' and request.user.is_authenticated:\n# writer = request.user\n# postname = request.POST['postname']\n# contents = request.POST['contents']\n# mainphoto = request.POST['mainphoto']\n#\n# vdate = Board(\n# writer=request.user,\n# postname=postname,\n# contents=contents,\n# mainphoto=mainphoto)\n# vdate.save()\n# return redirect('board')\n#\n#\n# def remove_board(request, pk):\n# board = Board.objects.get(pk=pk)\n# if request.method == 'POST':\n# board.delete()\n# return redirect('../../')\n# return render(request, 'boards/remove_post.html', {'board': board})","sub_path":"progress/0217/1차작업/webprojectapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"34632730","text":"from flask import request,jsonify\nfrom . import api\nfrom ai import mongoClient\n# from ai.controller.train import scheduler\nfrom ai.controller.train import trainModel\n\n@api.route('/messenger/add', methods=['POST'])\ndef messageAdd():\n response = request.get_json()\n message = response['message']\n db = mongoClient.ai_db\n db.ai_data.insert_many(message)\n res = trainModel()\n if res['code'] == 200:\n return jsonify({\"code\":200, \"msg\":\"success\"})\n else:\n return jsonify({\"code\":400,'msg':'fail'})","sub_path":"ai/controller/receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"117058161","text":"\"\"\"\nVishnu... thank you for electronics\nAuthor :- Manas Kumar Mishra\nTask :- Payment process (client for card company and server for payment gateway)\nBegin date :- 21-March-2021 \n\"\"\"\n\nfrom socket import*\nfrom random import randint\nfrom AES_encrypt import* #Python file name for encryption\nfrom AES_Decrypt import* #Python file name for decryption\n\nglobal dataofUsers\ndataofUsers ={\n\t'MANAS':['MANAS', '1001 0110 2002 0011', '2023-07-31', '000', 'MANAS KUMAR MISHRA'],\n\t'MISS KR':['MISS KR','1001 0110 2002 0026','2023-07-31','001','KARTHIKA RAJESH'],\n\t\"GANESH\":['GANESH','1001 0110 2002 0006','2023-07-31','002','GANESH T S']\n}\n\n# function for converting the binary message into list\n# Input is receved message from payment gateway\n# output is full message in list\ndef give_list(recvMessage):\n\trecvMessage = recvMessage.decode()\n\n\trecvMessage2 = eval(recvMessage)\n\n\treturn recvMessage2\n\n\n\n\npayProPortNumber = 9999\n\nTPSportnumber = 9988\nTPSipaddress = '169.254.142.108'\n\npayProInstance = socket(AF_INET, SOCK_STREAM)\nTPSsocket = socket(AF_INET,SOCK_STREAM)\ntry:\n\tTPSsocket.connect((TPSipaddress, TPSportnumber))\n\tprint(\"Connection accepted with TPS...:)\")\nexcept:\n\tprint(\"Connection not accepted with TPS!!!\")\n\n\n\n# TPSsocket.connect((TPSipaddress, TPSportnumber))\n\n\npayProInstance.bind(('',payProPortNumber))\npayProInstance.listen(1)\n\n\nprint(\"Payment processor is listening...\")\nwhile 1:\n\t\n\tpaygateInstance, paygetAddress = payProInstance.accept()\n\t\n\tprint(\"Connection accepted...:)\")\n\t\n\t# recieving user data \n\n\tShared=share_key() #sharing public key for encryption\n\tpaygateInstance.send(Shared.encode())\n\t\n\t#Decryption\n\trecvMessage = paygateInstance.recv(4096)\n\tprint(\"Something RECEIVED...:)\",recvMessage)\n\t#recvMsg = give_list(recvMessage)\n\trecvmsg=recvMessage.decode()\n\trecvMsg=eval(recvmsg)\t\n\trecvMsg = AES_Decrypt(recvMsg[0],recvMsg[1])\t\n\trecvMsg=recvMsg.split(\",\")\n\tprint('decrypted message',recvMsg)\n\t\n\tif(recvMsg==list(dataofUsers['MANAS']) or recvMsg==list(dataofUsers['MISS KR']) or recvMsg == list(dataofUsers['GANESH'])):\n\t\t#send feedback after data verification\n\t\tsharekey=paygateInstance.recv(2048)\n\t\tPlaintext='True'\n\t\tencrypteddata=str(AES_encrypt(sharekey,Plaintext))\n\t\tpaygateInstance.send(encrypteddata.encode())\n\n\t\t# Receiving the amount details \n\t\tprint(\"Receiveing the amount info....\")\n\t\tshky=share_key()\n\t\tpaygateInstance.send(shky.encode())\n\t\trecvAmount = paygateInstance.recv(2048)\n\t\trecvAmt = recvAmount.decode()\n\t\trecvAmt = eval(recvAmt)\n\t\trecvAmt = AES_Decrypt(recvAmt[0],recvAmt[1])\n\t\trecvAmt=recvAmt.split(\",\")\n\n\t\tprint(\"Amount received...\")\n\t\t\n\t\tprint(\"Amount requested :\", recvAmt)\n\n\t\tpaygateInstance.close()\n\n\t\t# TPS PART. \n\t\t# Here , we are making a packet for communicating with TPS layer\n\t\tpacket = []\n\t\t# Card number \n\t\tpacket.append(recvMsg[1]) \n\n\t\t# Card holder name \n\t\tpacket.append(recvMsg[4])\n\n\t\t# Amount and merchant \n\t\tpacket.append(recvAmt[0])\n\t\tpacket.append(recvAmt[1])\n\t\tpacket.append(recvAmt[2])\n\n\t\t\t\n\n\t\t# TPSsocket.connect((TPSipaddress, TPSportnumber))\n\t\ttry:\n\t\t\tPlaintext=str(packet[0])\n\t\t\tfor i in range(1,len(packet)):\n\t\t\t\tPlaintext=Plaintext +','+ packet[i]\n\t\t\n\t\t\tshkey=TPSsocket.recv(2048)\n\t\t\tencrypteddata=str(AES_encrypt(shkey,Plaintext))\n\t\t\tTPSsocket.send(encrypteddata.encode())\n\t\t\tprint('packet',packet)\n\t\t\tprint('packet',Plaintext)\n\t\t\tprint(\"Message (packet) sent to TPS\")\n\t\texcept:\n\t\t\tprint(\"Not sending data to TPS\")\n\n\t\t\n\t\tprint(\"Amount requested :\", recvAmt[0])\n\n\telse:\n\t\tshkey=TPSsocket.recv(2048)\n\t\tPlaintext='False'\n\t\tencrypteddata=str(AES_encrypt(shkey,Plaintext))\n\t\tTPSsocket.send(encrypteddata.encode())\n\t\tpaygateInstance.close()\n\n\t\tprint(\"Wrong detalis\")\n\n\n\t\n\t#PP CONNECTED TO APP\n\totpinstance, otpaddress = payProInstance.accept()\n\t\n\tprint(\"Ready to listen OTP...\")\n\t\n\tshky=share_key()\n\totpinstance.send(shky.encode())\n\trecvotp = otpinstance.recv(4096)\n\trecvotp = recvotp.decode()\n\trecvotp = eval(recvotp)\n\trecvOTP = AES_Decrypt(recvotp[0],recvotp[1])\n\tprint('otp recieved')\n\t#recvOTP = otpinstance.recv(2048)\n\t\n\t# Todo :- Encryption and decryption\n\tshkey=TPSsocket.recv(2048)\t\n\tencrypteddata=str(AES_encrypt(shkey,recvOTP))\n\tTPSsocket.send(encrypteddata.encode())\n\n\tprint(\"OTP send to the TPS\")\n\n\tshky=share_key()\t\n\tTPSsocket.send(shky.encode())\n\trecve = TPSsocket.recv(2048)\n\trecve = recve.decode()\n\trecve = eval(recve)\t\n\trecve = AES_Decrypt(recve[0],recve[1])\n\n\tprint(recve)\n\t\n\tprint(\"Received feedback about OTP\")\n\tshkey=otpinstance.recv(2048)\t\n\tencrypteddata=str(AES_encrypt(shkey,recve))\n\totpinstance.send(encrypteddata.encode())\n\n\t# TPSsocket.close()\n\totpinstance.close()\n\n \n \n\n\n","sub_path":"paymentProcessor.py","file_name":"paymentProcessor.py","file_ext":"py","file_size_in_byte":4568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"319924392","text":"#!/usr/bin/env python\n\n# Adapted from https://github.com/spiralout/external-sort\n\n# external sort:\n# 1. split file into separate pieces, each of size block_size, sorting the words in the piece and emitting a partially sorted shard\n# 2. merge all the shards together into one file\n\nimport os\nimport sys\nimport argparse\n\nclass FileSplitter(object):\n BLOCK_FILENAME_FORMAT = 'block_{0}.dat'\n\n def __init__(self, filename):\n self.filename = filename\n self.block_filenames = []\n\n def write_block(self, data, block_number):\n filename = self.BLOCK_FILENAME_FORMAT.format(block_number)\n file = open(filename, 'w')\n file.write(data)\n file.close()\n self.block_filenames.append(filename)\n\n def get_block_filenames(self):\n return self.block_filenames\n\n def split(self, block_size, sort_key=None):\n file = open(self.filename, 'r')\n i = 0\n\n while True:\n # NOTE: the argument passed to readlines is sizehint, which could potentially be exceeded\n # ---\n # Per the Python manual: i\n # ``f.readlines()`` returns a list containing all the lines of data in the file.\n # If given an optional parameter *sizehint*, it reads that many bytes from the\n # file and enough more to complete a line, and returns the lines from that. \n # ---\n # If the problem has very strict memory requirements, a workaround should be implemented \n\n lines = file.readlines(block_size) \n\n if lines == []:\n break\n\n if sort_key is None:\n lines.sort()\n else:\n lines.sort(key=sort_key)\n\n self.write_block(''.join(lines), i)\n i += 1\n\n def cleanup(self):\n for filename in self.block_filenames:\n try:\n os.remove(filename)\n except OSError as e: \n print (\"Failed with:\", e.strerror)\n print (\"Error code:\", e.code)\n\nclass NWayMerge(object):\n def select(self, choices):\n min_index = -1\n\n for an_index in choices: # initialize min_index\n min_index = an_index\n break\n\n for i in choices:\n if choices[i] < choices[min_index]:\n min_index = i\n return min_index\n\nclass FilesArray(object):\n def __init__(self, files):\n self.files = files\n self.empty = set()\n self.num_buffers = len(files)\n self.buffers = {i: None for i in range(self.num_buffers)}\n\n def get_dict(self):\n return {i: self.buffers[i] for i in range(self.num_buffers) if i not in self.empty}\n\n def refresh(self):\n for i in range(self.num_buffers):\n if self.buffers[i] is None and i not in self.empty:\n self.buffers[i] = self.files[i].readline()\n\n if self.buffers[i] == '':\n self.empty.add(i)\n\n if len(self.empty) == self.num_buffers:\n return False\n\n return True\n\n def unshift(self, index):\n value = self.buffers[index]\n self.buffers[index] = None\n return value\n\nclass FileMerger(object):\n def __init__(self, merge_strategy):\n self.merge_strategy = merge_strategy\n\n def merge(self, filenames, outfilename, buffer_size):\n outfile = open(outfilename, 'w', buffer_size)\n buffers = FilesArray(self.get_file_handles(filenames, buffer_size))\n\n while buffers.refresh():\n min_index = self.merge_strategy.select(buffers.get_dict())\n outfile.write(buffers.unshift(min_index))\n\n def get_file_handles(self, filenames, buffer_size):\n files = {}\n\n for i in range(len(filenames)):\n files[i] = open(filenames[i], 'r', buffer_size)\n\n return files\n\nclass ExternalSort(object):\n def __init__(self, block_size):\n self.block_size = block_size\n\n def sort(self, filename, sort_key=None):\n num_blocks = self.get_number_blocks(filename, self.block_size)\n splitter = FileSplitter(filename)\n splitter.split(self.block_size, sort_key)\n\n merger = FileMerger(NWayMerge())\n buffer_size = int(self.block_size / (num_blocks + 1)) # at any point in time, num_blocks + 1 files will be open (including output file) \n merger.merge(splitter.get_block_filenames(), filename + '_sorted.out', buffer_size)\n\n splitter.cleanup()\n\n def get_number_blocks(self, filename, block_size):\n return (os.stat(filename).st_size / block_size) + 1\n\ndef parse_memory(string):\n if string[-1].lower() == 'k':\n return int(string[:-1]) * 1024\n elif string[-1].lower() == 'm':\n return int(string[:-1]) * 1024 * 1024\n elif string[-1].lower() == 'g':\n return int(string[:-1]) * 1024 * 1024 * 1024\n else:\n return int(string)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--mem',\n help='amount of memory to use for sorting',\n default='10M')\n parser.add_argument('--filename',\n help='name of file to sort',\n default=\"out.txt\")\n args = parser.parse_args()\n\n sorter = ExternalSort(parse_memory(args.mem))\n sorter.sort(args.filename)\n\n# Verify against slow implementation\n\"\"\"\n sortedFile = args.filename + '_sorted.out'\n sortedF = open(sortedFile, 'r')\n sortedFlines = sortedF.readlines()\n\n ref = open(args.filename, 'r')\n sortedRefLines = ref.readlines()\n sortedRefLines.sort()\n\n n1 = len(sortedRefLines)\n if n1 != len(sortedFlines):\n print(\"Sorted file is not equivalent.\")\n for i in range(n1):\n if sortedRefLines[i] != sortedFlines[i]:\n print(\"Sorted file is not equivalent.\")\n break\n print(\"Sorted file is correct!\")\n\"\"\"\n\nif __name__ == '__main__':\n main()\n","sub_path":"external-sort/extsort.py","file_name":"extsort.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"211423816","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport re\n#To print Chinese\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus'] =False\n\n#Definition of tree nodes\nclass node:\n def __init__(self):\n self.children = []\n self.value = -1\n self.critical = None\n self.div = 0\n self.accuracy = -1\npass\n\ndef Entropy(dataset, y, k):\n if len(dataset) == 0:\n return 0\n p = [0 for i in range(k)]\n for i in range(len(dataset)):\n p[y[i]] += 1\n p = [p[i]/len(dataset) for i in range(k)]\n s = 0\n #print(p)\n for i in range(k):\n if p[i]!=0:\n s += p[i]*math.log2(p[i])\n return -s\npass\n\n#this fuuntion return the gain list of all attributes\ndef Gain(dataset, label, y, k):\n gain = []\n entro = []\n for i in range(len(label)):\n entro.append([])\n for j in range(len(label[i])):\n entro[i].append([[],[]])\n\n #Construct continuous variable binary classification\n T = []\n for i in range(len(label)):\n if isinstance(label[i][0], float):\n if i>=1 and not isinstance(label[i-1][0], float):\n th = i\n t = []\n entro[i][0] = []\n entro[i][1] = []\n for j in range(len(label[i])-1):\n t.append((label[i][j]+label[i][j+1])/2)\n T.append(t)\n\n #Calculate the information entropy matrix\n for i in range(len(dataset)):\n for j in range(len(label)):\n if isinstance(label[j][0], float):\n for l in range(len(T[j-th])):\n t = T[j-th][l]\n entro[j][0].append([[],[]])\n entro[j][1].append([[],[]])\n #print(isinstance(data[i][j], float), isinstance(t,float))\n #print(dataset[i][j])\n if dataset[i][j] < t:\n entro[j][0][l][0].append(dataset[i])\n entro[j][0][l][1].append(y[i])\n else:\n entro[j][1][l][0].append(dataset[i])\n entro[j][1][l][1].append(y[i])\n else:\n for d in range(len(label[j])):\n #print(dataset[i], label[j])\n if dataset[i][j] == label[j][d]:\n entro[j][d][0].append(dataset[i])\n entro[j][d][1].append(y[i])\n #print(entro)\n\n #Construct gain list, here we can use heap storing the gains to save time\n div = [None for _ in range(len(label))]\n for i in range(len(label)):\n e = Entropy(dataset, y, k)\n s = 0\n if isinstance(label[i][0], float):\n #Here can also be optimized by heap structure\n s = 0 \n for l in range(len(T[i-th])):\n #print(T[i-th][l], entro[i][0][l][0], entro[i][1][l][0])\n x1 = len(entro[i][0][l][1])/len(dataset)*Entropy(entro[i][0][l][0], entro[i][0][l][1], k)\n x2 = len(entro[i][1][l][1])/len(dataset)*Entropy(entro[i][1][l][0], entro[i][1][l][1], k)\n if s < e-x1-x2:\n s = e-x1-x2\n div[i] = T[i-th][l]\n gain.append(s)\n else:\n for j in range(len(label[i])):\n s += len(entro[i][j][1])/len(dataset)*Entropy(entro[i][j][0], entro[i][j][1], k)\n gain.append(e-s)\n ret = 0\n for i in range(len(gain)):\n if gain[i] > gain[ret]:\n ret = i\n #print (gain)\n return (ret,gain[ret], div)\npass\n\ndef Gini(dataset, y, k):\n if len(dataset) == 0:\n return 0\n gini = 0\n p = [0]*k\n for dy in y:\n p[dy] += 1\n for i in range(k):\n p[i]/=len(dataset)\n gini += p[i]*p[i]\n gini = 1-gini\n return gini\npass\ndef Gini_index(dataset, label, y, k):\n gini = []\n entro = []\n for i in range(len(label)):\n entro.append([])\n for j in range(len(label[i])):\n entro[i].append([[],[]])\n\n #Construct continuous variable binary classification\n T = []\n for i in range(len(label)):\n if isinstance(label[i][0], float):\n if i>=1 and not isinstance(label[i-1][0], float):\n th = i\n t = []\n entro[i][0] = []\n entro[i][1] = []\n for j in range(len(label[i])-1):\n t.append((label[i][j]+label[i][j+1])/2)\n T.append(t)\n\n #Calculate the information entropy matrix\n for i in range(len(dataset)):\n for j in range(len(label)):\n if isinstance(label[j][0], float):\n for l in range(len(T[j-th])):\n t = T[j-th][l]\n entro[j][0].append([[],[]])\n entro[j][1].append([[],[]])\n #print(isinstance(data[i][j], float), isinstance(t,float))\n #print(dataset[i][j])\n if dataset[i][j] < t:\n entro[j][0][l][0].append(dataset[i])\n entro[j][0][l][1].append(y[i])\n else:\n entro[j][1][l][0].append(dataset[i])\n entro[j][1][l][1].append(y[i])\n else:\n for d in range(len(label[j])):\n #print(dataset[i], label[j])\n if dataset[i][j] == label[j][d]:\n entro[j][d][0].append(dataset[i])\n entro[j][d][1].append(y[i])\n #print(entro)\n\n #Construct gini index list, here we can use heap storing the gini indeces to save time\n div = [None for _ in range(len(label))]\n for i in range(len(label)):\n s = 0\n if isinstance(label[i][0], float):\n #Here can also be optimized by heap structure\n s = 0 \n for l in range(len(T[i-th])):\n #print(T[i-th][l], entro[i][0][l][0], entro[i][1][l][0])\n x1 = len(entro[i][0][l][1])/len(dataset)*Gini(entro[i][0][l][0], entro[i][0][l][1], k)\n x2 = len(entro[i][1][l][1])/len(dataset)*Gini(entro[i][1][l][0], entro[i][1][l][1], k)\n if s < x1+x2:\n s = x1+x2\n div[i] = T[i-th][l]\n gini.append(s)\n else:\n for j in range(len(label[i])):\n s += len(entro[i][j][1])/len(dataset)*Gini(entro[i][j][0], entro[i][j][1], k)\n gini.append(s)\n ret = 0\n for i in range(len(gini)):\n if gini[i] > gini[ret]:\n ret = i\n return (ret,gini[ret], div)\n\n#Thit function return if the pre-cut should be done in such a devision, children is devision lists and valid is validation data.\ndef pre_cut(accuracy, children_x, children_y, valid, valid_y):\n #construct decisions in different sub-nodes\n #print(\"children\", children_x, children_y)\n #print(\"valid\",valid, valid_y)\n critical = []\n for child_y in children_y:\n num = {}\n for y in child_y:\n if y in num:\n num[y] += 1\n else:\n num[y] = 1\n mi = 0\n m = 0 \n for i,count in num.items():\n if count > m:\n mi = i\n m = count\n #print(mi, num, child_y)\n critical.append(mi)\n\n #Calculate the accuracy after partition\n acc_after = 0\n count = 0\n #print(critical)\n #print (valid, valid_y)\n for v in range(len(children_x)):\n for ty in valid_y[v]:\n if ty == critical[v]:\n acc_after += 1\n count += 1\n #print(count, acc_after)\n if count == 0:\n acc_after = 1\n else:\n acc_after /= count\n \n #print(\"before = \",accuracy,\"after\",acc_after)\n #print(\"pre-cut?\", acc_after < accuracy)\n return acc_after <= accuracy\n\ndef post_cut(tree, data, y, label):\n acc_before = test(data, y, label, tree)\n for i,child in enumerate(tree.children):\n if child.value == -1:\n flag = True\n for c in child.children:\n if not flag:\n break\n if c.value == -1:\n flag = False\n if flag:\n acc_after = test(data, y, label, child)\n if acc_after>=acc_before:\n new_node = node()\n new_node.value = max(y)\n tree.children[i] = new_node\n else:\n child = post_cut(child, data, y, label)\n tree.children[i] = child\n return tree\n\n\n#This function return the root node of our decision tree\ndef DecisionTree(dataset, y, label, valid, validy, k, ifprecut):\n n = node()\n #One of recursion ending condition\n if len(set(y)) == 1:\n n.value = y[0]\n #print(n.value)\n return n\n \n #The codes below calculate the most frequency classification value and store it into 'mi'.\n num = {}\n for x in y:\n if x in num:\n num[x] += 1\n else:\n num[x] = 1\n m = 0\n mi = 0\n for i in range(len(num)):\n if num[i] >= m:\n m = num[i]\n mi = i\n\n #Two kinds of ending condition of recursion\n if len(label) == 0:\n n.value = mi\n #print(n.value)\n return n\n flag = True\n for i in range(len(dataset)):\n for j in range(i+1,len(dataset)):\n if dataset[i]!=dataset[j]:\n flag = False\n if flag:\n n.value = mi\n #print(n.value)\n return n\n \n #Calculate the gain/gini_index and decide the attribute to branch next step\n index,g,div = Gini_index(dataset, label, y, k)\n n.critical = index\n n.div = div[index]\n #print(index, div)\n #Continuous variables and Discrete variables have different judgement structure\n if isinstance(label[index][0], float):\n t = div[index]\n D = [[] for _ in range(2)]\n Dy = [[] for _ in range(2)]\n V = [[] for _ in range(2)]\n Vy = [[] for _ in range(2)]\n for i in range(len(dataset)):\n if dataset[i][index] < t:\n D[0].append(dataset[i][:index]+dataset[i][index+1:])\n Dy[0].append(y[i])\n else:\n D[1].append(dataset[i][:index]+dataset[i][index+1:])\n Dy[1].append(y[i])\n for i in range(len(valid)):\n if dataset[i][index] < t:\n V[0].append(valid[i][:index]+valid[i][index+1:])\n Vy[0].append(validy[i])\n else:\n V[1].append(valid[i][:index]+valid[i][index+1:])\n Vy[1].append(validy[i])\n else:\n D = [[] for _ in range(len(label[index]))]\n Dy = [[] for _ in range(len(label[index]))]\n V = [[] for _ in range(len(label[index]))]\n Vy = [[] for _ in range(len(label[index]))]\n for j in range(len(label[index])):\n for i in range(len(dataset)):\n if dataset[i][index] == label[index][j]:\n D[j].append(dataset[i][:index]+dataset[i][index+1:])\n Dy[j].append(y[i])\n for i in range(len(valid)):\n if valid[i][index] == label[index][j]:\n V[j].append(valid[i][:index]+valid[i][index+1:])\n Vy[j].append(validy[i])\n new_label = label[:index]+label[index+1:]\n\n #Calculate the accuracy of this node which will be used in deciding pre-cut.\n s = 0\n #print(validy, mi)\n for y in validy:\n if y == mi:\n s += 1\n if len(validy)!=0:\n s /= len(validy)\n else:\n s = 1\n n.accuracy = s\n #Do precut judgement\n #print (\"D=\",D, \"V=\",V)\n if ifprecut:\n if pre_cut(n.accuracy, D, Dy, V, Vy):\n #print(\"here\", mi)\n n.value = mi\n return n\n\n #Recursion to visit childrens of the root\n #print(D)\n #print(Dy)\n #print(new_label)\n new_valid = []\n new_validy = []\n for v,vy in zip(V,Vy):\n new_valid += v\n new_validy += vy\n if isinstance(label[index][0], float):\n it = 2\n else:\n it = len(label[index])\n for v in range(it):\n if D[v]!=[]:\n #print(D[v], Dy[v])\n c = DecisionTree(D[v], Dy[v], new_label, V[v], Vy[v], k, ifprecut)\n n.children.append(c)\n else:\n c = node()\n c.value = mi\n n.children.append(c)\n #print(n.value)\n return n\npass\n\n#This two functions are to test data and return accuracy rate\ndef test_one(data, y, label, root):\n if root.value != -1:\n #print (root.value)\n return y==root.value\n if isinstance(data[root.critical], float):\n new_label = label[:root.critical]+label[root.critical+1:]\n new_data = data[:root.critical]+data[root.critical+1:]\n if data[root.critical] < root.div:\n #print(label[root.critical])\n return test_one(new_data, y, new_label, root.children[0])\n else:\n #print(label[root.critical])\n return test_one(new_data, y, new_label, root.children[1])\n else:\n for i in range(len(root.children)):\n if label[root.critical][i] == data[root.critical]:\n new_label = label[:root.critical]+label[root.critical+1:]\n new_data = data[:root.critical]+data[root.critical+1:]\n #print(i, data[root.critical], root.critical, label[root.critical][i])\n return test_one(new_data, y, new_label, root.children[i])\n return False\npass\ndef test(dataset, y, label, root):\n s = 0\n for i in range(len(dataset)):\n judge = test_one(dataset[i], y[i], label, root)\n #print(judge)\n s += int(judge)\n if len(dataset) == 0:\n return 1\n return s/len(dataset)\npass\n\n#Virtualization of tree, copied from https://blog.csdn.net/wancongconghao/article/details/71171981\ndecision_node = dict(boxstyle=\"sawtooth\",fc=\"0.8\")\nleaf_node = dict(boxstyle=\"round4\",fc=\"0.8\")\narrow_args = dict(arrowstyle=\"<-\")\n#获取树的叶子结点个数(确定图的宽度)\ndef get_leaf_num(tree):\n leaf_num = 0\n if len(tree.keys())==0:\n return 0\n first_key = list(tree.keys())[0]\n next_dict = tree[first_key]\n for key in next_dict.keys():\n if type(next_dict[key]).__name__==\"dict\":\n leaf_num +=get_leaf_num(next_dict[key])\n else:\n leaf_num +=1\n return leaf_num\n#获取数的深度(确定图的高度)\ndef get_tree_depth(tree):\n depth = 0\n if len(tree.keys())==0:\n return 0 \n first_key = list(tree.keys())[0]\n next_dict = tree[first_key]\n for key in next_dict.keys():\n if type(next_dict[key]).__name__ == \"dict\":\n thisdepth = 1+ get_tree_depth(next_dict[key])\n else:\n thisdepth = 1\n if thisdepth>depth: depth = thisdepth\n return depth\ndef plotNode(nodeTxt, centerPt, parentPt, nodeType):\n createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',\n xytext=centerPt, textcoords='axes fraction',\n va=\"center\", ha=\"center\", bbox=nodeType, arrowprops=arrow_args)\n#在父子节点间填充文本信息\ndef plotMidText(cntrPt, parentPt, txtString):\n xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0]\n yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1]\n createPlot.ax1.text(xMid, yMid, txtString, va=\"center\", ha=\"center\", rotation=30)\ndef plotTree(myTree, parentPt, nodeTxt):\n numLeafs = get_leaf_num(myTree)\n depth = get_tree_depth(myTree)\n firstStr = list(myTree.keys())[0]\n cntrPt = (plotTree.xOff + (1.0 + float(numLeafs)) / 2.0 / plotTree.totalW, plotTree.yOff)\n plotMidText(cntrPt, parentPt, nodeTxt)\n plotNode(firstStr, cntrPt, parentPt, decision_node)\n secondDict = myTree[firstStr]\n plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD\n for key in secondDict.keys():\n if type(secondDict[\n key]).__name__ == 'dict':\n plotTree(secondDict[key], cntrPt, str(key))\n else:\n plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalW\n plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leaf_node)\n plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))\n plotTree.yOff = plotTree.yOff + 1.0 / plotTree.totalD\ndef createPlot(inTree):\n fig = plt.figure(1, facecolor='white')\n fig.clf()\n axprops = dict(xticks=[], yticks=[])\n createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)\n plotTree.totalW = float(get_leaf_num(inTree))\n plotTree.totalD = float(get_tree_depth(inTree))\n if plotTree.totalW == 0:\n print(\"No nodes in the tree\")\n return\n plotTree.xOff = -0.5 / plotTree.totalW\n plotTree.yOff = 1.0\n plotTree(inTree, (0.5, 1.0), '')\n plt.show()\n\n#Transfer tree into dictionary form\ndef tree_to_dic(tree, labels, l):\n dic = {}\n if tree.value==-1: #not leaf node\n label = labels[tree.critical]\n new_labels = labels[:tree.critical]+labels[tree.critical+1:]\n new_l = l[:tree.critical] + l[tree.critical+1:]\n subdic = {}\n if isinstance(l[tree.critical][0], float):\n label = label + '<' + str(tree.div)\n for i in range(len(tree.children)):\n child = tree.children[i]\n if isinstance(l[tree.critical][i], float):\n la = 'Yes' if i==0 else 'No'\n else:\n la = l[tree.critical][i]\n if child.value == -1:\n #print(new_l[child.critical])\n subdic[la] = tree_to_dic(child, new_labels, new_l)\n else:\n subdic[la] = '好瓜' if child.value==1 else '坏瓜'\n dic[label] = subdic\n #print (dic)\n return dic\npass\n\n#start of main program\nvalue = re.compile(r'^[-+]?[0-9]+\\.[0-9]+$')\nfile = open(\"train.txt\")\ntrain = []\ntrain_y = []\nn = 0\nwhile 1:#read data\n line = file.readline().strip()\n if not line:\n break\n pass\n line = line.split(',')\n k = len(line)\n lines = []\n for i in range(1,k-1):\n lines.append(line[i])\n train.append(lines)\n if line[-1] == '否':\n train_y.append(0)\n else:\n train_y.append(1)\n n += 1\nfor i in range(len(train)):\n for j in range(len(train[i])):\n if value.match(train[i][j]):\n train[i][j] = float(train[i][j])\n#print(train, train_y)\nfile = open(\"validation.txt\")\nvalidation = []\nvalidation_y = []\nn = 0\nwhile 1:#read data\n line = file.readline().strip()\n if not line:\n break\n pass\n line = line.split(',')\n k = len(line)\n lines = []\n for i in range(1,k-1):\n lines.append(line[i])\n validation.append(lines)\n if line[-1] == '否':\n validation_y.append(0)\n else:\n validation_y.append(1)\n n += 1\nfor i in range(len(validation)):\n for j in range(len(validation[i])):\n if value.match(validation[i][j]):\n validation[i][j] = float(validation[i][j])\n#print(validation, validation_y)\n\n'''\n#swap attributes to test different answers\nfor i,x in enumerate(train):\n x = [x[4]]+x[:4]+x[5:]\n train[i] = x\nfor i,x in enumerate(validation):\n x = [x[4]]+x[:4]+x[5:]\n validation[i] = x\ntrain_y = [train_y[4]]+train_y[:4] + train_y[5:]\nvalidation_y = [validation_y[4]] + validation_y[:4] + validation_y[5:]\nprint(train, train_y, validation, validation_y)\n'''\n\n#Create attribute set of every label\nlabel = []\nfor i in range(0,len(train[0])):\n attribute = []\n for a in train:\n if not a[i] in attribute:\n attribute.append(a[i])\n label.append(sorted(attribute))\n\n#Create the decision tree without pre-cut nor post-cut\ntree = DecisionTree(train, train_y, label, validation, validation_y, 2, False)\n\n#test the decision tree (should be always 100% because I just use the generating data to test)\nprint(test(validation, validation_y, label, tree))\n\n#Transfer tree into a dictionary form and draw the tree graph\nlabels = ['色泽','根蒂','敲声','纹理','脐部','触感','密度','含糖率']\nt = tree_to_dic(tree, labels, label)\nprint (t)\ncreatePlot(t)\n\n#Do the post-cut and draw the graph\ntree = post_cut(tree, validation, validation_y, label)\nprint(test(validation, validation_y, label, tree))\nt = tree_to_dic(tree, labels, label)\nprint(t)\ncreatePlot(t)\n\n#Do the pre-cut and draw the graph\ntree = DecisionTree(train, train_y, label, validation, validation_y, 2, True)\nprint(test(validation, validation_y, label, tree))\nt = tree_to_dic(tree, labels, label)\nprint (t)\ncreatePlot(t)\n\n\n","sub_path":"Chapter4/4.4/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":20478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"311972047","text":"import pathlib\nfrom os.path import join, basename, abspath, isfile, dirname\n\npackage_link = \".tmp/symlink\"\n_path = str(pathlib.Path(__file__).parent.absolute())\n\ndef _cwd():\n return join(dirname(__file__))\n\ndef _join(a, b):\n return abspath(join(a, b))\n\ndef _split(a):\n return a.split(\"/\")\n\ndef _backout(path):\n return _join(path, \"..\")\n\ndef _import_fun(mod, func):\n return getattr(__import__(mod, fromlist=[func]), func)\n\ndef _get_pgk_dir():\n currentpath = _cwd()\n i = len(currentpath.split(\"/\"))\n while i > 0:\n currentpath = _join(currentpath, \"..\")\n if isfile(currentpath + \"/setup.py\"):\n return currentpath\n i = -1\n i = i - 1\n\ndef _get_pgk_name():\n currentpath = _cwd()\n i = len(currentpath.split(\"/\"))\n while i > 0:\n currentpath = _join(currentpath, \"..\")\n if isfile(currentpath + \"/setup.py\"):\n return basename(currentpath).replace(\"-\", \"\")\n i = -1\n i = i - 1\n\ndef setup_links(package_name):\n _link = package_link + \"/\"\n Path(_path + \"/\" + _link).mkdir(parents=True, exist_ok=True)\n if not os.path.islink(_path + \"/\" + _link + package_name):\n os.symlink(os.path.join(_path, _src), _path + \"/\" + _link + \"/\" + package_name)\n\ndef smart_reqs(repos, package_name):\n # styles = standalone, repo\n currentpath = _path\n def _get_deploy_style():\n currentpath = _path\n for _ in range(len(_split(currentpath))):\n currentpath = _backout(currentpath)\n if isdir(currentpath + \"/.tmp/repos\"):\n return \"repo\"\n\n if _get_deploy_style() == \"repo\":\n local_repos = os.listdir(_join(_path, \"..\"))\n if \".DS_Store\" in local_repos:\n local_repos.remove(\".DS_Store\")\n if package_name in local_repos:\n local_repos.remove(package_name)\n\n for repo in local_repos:\n repos = [_ for _ in repos if not _.endswith(repo + \".git\")]\n return repos\n\n return repos\n","sub_path":"src/framework/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"60409218","text":"#\n# Copyright (c) 2015, Prometheus Research, LLC\n#\n\n\nimport logging\nimport types\n\nfrom rex.core import get_settings\n\nfrom .util import merge_dicts\n\n\n__all__ = (\n 'get_logger',\n 'get_logging_config',\n 'disable_logging',\n 'enable_logging',\n)\n\n\ndef get_logger(name=None):\n \"\"\"\n A convenience wrapper around the ``logging.getLogger()`` function. If this\n function receives a class or a function, then this will return a logger for\n the name of that class/function. If this function receives an instance of a\n class, then this will return a logger for the name of the class the\n instance is of.\n\n :param name: the name of the logger\n :type name: str, class, function, instance\n :rtype: logging.Logger\n \"\"\"\n\n if isinstance(name, (type, types.FunctionType, types.MethodType)):\n # Classes or Functions\n name = name.__qualname__\n elif hasattr(name, '__class__') \\\n and ('__dict__' in dir(name) or hasattr(name, '__slots__')):\n # Instances of new-style classes\n name = name.__class__.__qualname__\n elif name is not None:\n name = str(name)\n\n return logging.getLogger(name)\n\n\nBASE_LOGGERS = {\n 'raven': {\n 'level': 'ERROR',\n },\n}\n\n\ndef get_logging_config():\n \"\"\"\n Assembles the configuration for the Python Logging framework using the\n RexDB application Settings defined by this package.\n\n :returns:\n A configuration that can be used with ``logging.config.dictConfig()``\n :rtype: dict\n \"\"\"\n\n loggers = merge_dicts(BASE_LOGGERS, get_settings().logging_loggers)\n\n return {\n 'version': 1,\n 'incremental': False,\n 'disable_existing_loggers': True,\n 'formatters': get_settings().logging_formatters,\n 'filters': get_settings().logging_filters,\n 'handlers': get_settings().logging_handlers,\n 'loggers': loggers,\n 'root': get_settings().logging_root,\n }\n\n\ndef disable_logging(level=logging.WARNING):\n \"\"\"\n Shuts down the logging in the application of all messages equal to or lower\n than the specified level.\n\n :param level:\n the level at which logging should be disabled. if not specified,\n defaults to ``logging.WARNING``.\n :type level: int\n \"\"\"\n\n logging.disable(level)\n\n\ndef enable_logging():\n \"\"\"\n (Re-)Enables logging in the application.\n \"\"\"\n\n logging.disable(logging.NOTSET)\n\n","sub_path":"src/rex.logging/src/rex/logging/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"83105714","text":"from flask import Flask, jsonify, request, session\nfrom flask_bcrypt import Bcrypt \nfrom flask_cors import CORS\nimport json\nfrom datetime import datetime\nimport random\nfrom collections import Counter\n\nfrom mongoengine import connect\nfrom mongoengine.queryset.visitor import Q\nfrom mongoengine import DoesNotExist, NotUniqueError\n\nfrom models import Shelves, Books, Users, Reviews\n\napp = Flask(__name__)\n\nbcrypt = Bcrypt(app)\napp.secret_key = 'secret'\n\nCORS(app)\n\nusername = 'admin'\npassword = 'admin'\ndb = 'illumina'\nhost = f'mongodb+srv://{username}:{password}@illumina-lmf8b.gcp.mongodb.net/{db}?retryWrites=true&w=majority'\n\nconnect(host=host)\n\n@app.route('/register', methods=[\"POST\"])\n@app.route('/book/register', methods=[\"POST\"])\n@app.route('/book-shelves/register', methods=['POST'])\ndef register():\n username = request.get_json()['username']\n email = request.get_json()['email']\n password = request.get_json()['password']\n date_of_birth = request.get_json()['dob']\n\n password = bcrypt.generate_password_hash(request.get_json()['password']).decode('utf-8')\n\n try:\n Users(\n username=username,\n email=email,\n date_of_birth=date_of_birth,\n password=password\n ).save()\n except NotUniqueError:\n return jsonify({\"error\":\"Username or Email is not unique\"})\n except:\n return jsonify({\"error\":\"Registration Error, please try again\"})\n\n return jsonify({'result' : email + ' registered'})\n\n@app.route('/login', methods=['POST'])\n@app.route('/book/login', methods=['POST'])\n@app.route('/book-shelves/login', methods=['POSt'])\ndef login():\n login_id = request.get_json()['login_id']\n password = request.get_json()['password']\n\n try:\n response = Users.objects(Q(username=login_id) or Q(email=login_id)).get()\n except DoesNotExist as err:\n return jsonify({\"user\": \"Invalid login id\"})\n\n if bcrypt.check_password_hash(response['password'], password):\n session['user'] = response['username']\n\n return jsonify({'username': response['username'] , 'profile_pic': response['profile_pic']})\n else:\n return jsonify({\"user\":\"Invalid Password\"})\n\n@app.route('/logout', methods=['POST'])\n@app.route('/book/logout', methods=['POST'])\n@app.route('/book-shelves/logout', methods=['POST'])\ndef logout():\n session.clear()\n return jsonify({\"logout\": True})\n\n@app.route('/search', methods=['GET'])\ndef search_book():\n title = request.args.get('title', default = None, type = str)\n \n books = Books.objects(book_title__icontains=title).only(\n 'book_title',\n 'id',\n 'cover_image',\n 'avg_rating',\n 'genres',\n 'author'\n )\n\n lim = 10\n total = books.count()\n if total%lim != 0:\n total = int(total/lim) + 1\n else:\n total = int(total/lim)\n\n if 'user' in session:\n user = Users.objects(username=session['user']).get()\n shelves = []\n for shelf in user['shelves']:\n shelves.append(shelf['shelf_title'])\n\n return jsonify({\"books\":books.to_json(), \"total\": total, \"shelves\": shelves})\n\n return jsonify({\"books\":books.to_json(), \"total\": total})\n\n@app.route('/book/', methods=['GET'])\ndef get_book(id):\n try:\n book = Books.objects(id=id).get()\n except:\n return jsonify({\"err\": \"Book not found\"})\n \n if 'user' in session:\n user = Users.objects(username=session['user']).get()\n shelves = []\n for shelf in user['shelves']:\n shelves.append(shelf['shelf_title'])\n\n return jsonify({\"book\":book.to_json(), \"shelves\": shelves})\n\n return jsonify({\"book\":book.to_json()})\n \n@app.route('/get-user', methods=['GET'])\n@app.route('/book/get-user', methods=['GET'])\n@app.route('/book-shelves/get-user', methods=['GET'])\ndef get_user():\n user = Users.objects(username=session['user']).get()\n return jsonify({\"user\":user.to_json()})\n\n@app.route('/book/add-review', methods=['POST'])\ndef add_review():\n review = request.get_json()['review']\n book = request.get_json()['book']\n\n user = Users.objects(username=session['user']).only('username', 'profile_pic').get()\n book = Books.objects(id=book).get()\n\n try:\n book.reviews.get(username=session['user'])['review_text'] = review\n except DoesNotExist:\n book.reviews.append(Reviews(\n username=user['username'],\n profile_pic=user['profile_pic'],\n review_text=review\n ))\n\n book.save()\n \n return jsonify({\"result\": True})\n\n@app.route('/book/rate-book', methods=['POST'])\ndef rate_book():\n book_id = request.get_json()['book']\n rating = request.get_json()['rating']\n \n book = Books.objects(id=book_id).get()\n\n try:\n book.reviews.get(username=session['user'])['rating'] = rating\n book.reviews.get(username=session['user'])['created'] = datetime.utcnow()\n except DoesNotExist:\n user = Users.objects(username=session['user']).only('username', 'profile_pic').get()\n book.reviews.append(Reviews(\n username=user['username'],\n profile_pic=user['profile_pic']\n ))\n\n book.save()\n\n return jsonify({\"result\": True})\n\n@app.route('/book-shelves/add-shelf', methods=['POST'])\n@app.route('/add-shelf', methods=['POST'])\ndef add_shelf():\n shelf = request.get_json()['shelf']\n user = Users.objects(username=session['user']).get()\n user.shelves.append(Shelves(\n shelf_title=shelf\n ))\n user.save()\n return jsonify({\"result\": True})\n\n@app.route('/add-book-to-shelf', methods=['POST'])\n@app.route('/book/add-book-to-shelf', methods=['POST'])\ndef add_book_to_shelf():\n shelf = request.get_json()['shelf']\n book = request.get_json()['book']\n\n user = Users.objects(username=session['user']).get()\n\n user.shelves.get(shelf_title=shelf).shelved_books.append(\n Books.objects(id=book).get()\n )\n user.save()\n return jsonify({\"result\": True})\n\n@app.route('/get-user-shelf', methods=['GET'])\n@app.route('/book-shelves/get-user-shelf', methods=['GET'])\ndef get_user_shelf():\n user = Users.objects(username=session['user']).get()\n\n shelves = []\n for shelf in user.shelves:\n books = []\n for book in shelf.shelved_books:\n books.append(\n {\n 'id': str(book.id),\n 'title': book.book_title,\n 'cover_image': book.cover_image,\n 'author': book.author\n }\n )\n shelves.append(\n {\n \"shelf_title\": shelf.shelf_title,\n \"shelf_pic\": shelf.shelf_pic,\n \"books\": books\n }\n )\n\n return jsonify({\"shelves\": json.dumps(shelves)})\n\n@app.route('/get-book-recommendation', methods=['GET'])\ndef get_book_recommendation():\n user = Users.objects(username=session['user']).get()\n\n genres = []\n ignore_books = []\n for shelf in user.shelves:\n for book in shelf.shelved_books:\n ignore_books.append(book['id'])\n for genre in book.genres:\n genres.append(genre)\n\n genres = list(dict(Counter(genres).most_common()).keys())\n\n if len(genres) > 5:\n genres = genres[:5]\n\n books = Books.objects(\n Q(avg_rating__gte=4.5) & Q(genres__in=list(genres)) & Q(id__nin=list(set(ignore_books)))\n ).only(\n 'book_title',\n 'id',\n 'cover_image',\n 'author',\n 'genres',\n 'avg_rating'\n ).to_json()\n \n books = json.loads(books)\n books = random.sample(books,6)\n\n return jsonify({\"rec\": json.dumps(books)})\n\n@app.route('/book/remove-review', methods=['POST'])\ndef remove_review():\n book = request.get_json()['book']\n\n book = Books.objects(id=book).get()\n book.reviews.remove(book.reviews.get(username=session['user']))\n book.save()\n\n return jsonify({'result': True})\n\n@app.route('/remove-shelf-book', methods=['POST'])\ndef remove_shelf_book():\n book = request.get_json()['book']\n shelf = request.get_json()['shelf']\n\n user = Users.objects(username=session['user']).get()\n user.shelves.get(shelf_title=shelf).shelved_books.remove(Books.objects(id=book).get())\n \n user.save()\n\n return jsonify({'result': True})\n\n@app.route('/remove-shelf', methods=['POST'])\ndef remove_shelf():\n shelf = request.get_json()['shelf']\n\n user = Users.objects(username=session['user']).get()\n user.shelves.remove(user.shelves.get(shelf_title=shelf))\n \n user.save()\n\n return jsonify({'result': True})\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n# add user, genre, author to search | not doing\n# add remove button to book shelves for shelf and books | done\n# create profile form\n# create personality graph","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"514591367","text":"import cv2\r\nimport matplotlib.pyplot as plt\r\n\r\nimg = cv2.imread(\"D:\\\\Coding\\\\pictures\\\\Fig0940.jpg\", cv2.IMREAD_GRAYSCALE)\r\n\r\n\r\nret1, th1 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (80, 80))\r\nopen1 = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\r\ntop_hat = img - open1\r\nret2, th2 = cv2.threshold(top_hat, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\n\r\nprint(ret1, ret2)\r\n\r\ntitles = [\"original\", \"thresh1\", \"opening\", \"top_hat\", \"thresh2\"]\r\nimages = [img, th1, open1, top_hat, th2]\r\n\r\nfor i in range(5):\r\n plt.subplot(2, 3, i+1)\r\n plt.imshow(images[i], cmap=\"gray\", )\r\n plt.title(titles[i])\r\n plt.xticks([]), plt.yticks([])\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"cv_07.py","file_name":"cv_07.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"203862127","text":"import itertools\nimport os\nfrom pathlib import Path\n\nimport dask.array as da\nimport fsspec\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\n\n\nclass DevNullStore:\n def __init__(self):\n pass\n\n def __setitem__(*args, **kwargs):\n pass\n\n\ndef temporal_mean(ds):\n return ds.mean(dim='time')\n\n\ndef spatial_mean(ds):\n weights = np.cos(np.radians(ds.lat)).where(ds['sst'][0].notnull())\n weights /= weights.sum()\n return (ds * weights).mean(dim=['lat', 'lon'])\n\n\ndef climatology(ds):\n seasonal_clim = ds.groupby('time.season').mean(dim='time')\n return seasonal_clim\n\n\ndef anomaly(ds):\n seasonal_clim = climatology(ds)\n seasonal_anom = ds.groupby('time.season') - seasonal_clim\n return seasonal_anom\n\n\ndef readfile(ds):\n print('readfile')\n null_store = DevNullStore()\n null_store['foo'] = 'bar'\n future = da.store(ds, null_store, lock=False, compute=False)\n future.compute()\n\n\ndef delete_dir(dir):\n files = os.listdir(dir)\n for file in files:\n os.remove(os.path.join(dir, file))\n\n\ndef get_filelist(fs, io_format, root, chunk_size):\n if isinstance(fs, fsspec.AbstractFileSystem):\n if io_format == 'zarr':\n fileObjs = fs.glob(f'{root}/sst.{chunk_size}*.zarr')\n elif io_format == 'netcdf':\n subs = 'zarr'\n zarr_flist = list(filter(lambda x: subs in x, fs.glob(f'{root}/*')))\n fileObjs = [fs.open(p) for p in fs.ls(f'{root}/') if p not in zarr_flist]\n else:\n if io_format == 'zarr':\n fileObjs = Path(f'{root}').glob(f'test1/sst.{chunk_size}*.zarr')\n return fileObjs\n\n\ndef openfile(fs, io_format, root, chunks, chunk_size):\n # print('openfile')\n # print(chunks)\n # if isinstance(fs, fsspec.AbstractFileSystem):\n if io_format == 'zarr':\n fileObjs = fs.glob(f'{root}/sst.{chunk_size}*.zarr')\n print(f'{root}/sst.{chunk_size}*.zarr')\n # ds = da.from_zarr(fs.get_mapper(f'{f[0]}/sst'))\n f = xr.open_zarr(fs.get_mapper(f'{fileObjs[0]}'))\n ds = f.sst.data\n print(ds)\n elif io_format == 'netcdf':\n subs = 'zarr'\n zarr_flist = list(filter(lambda x: subs in x, fs.glob(f'{root}/*')))\n fileObjs = [fs.open(p) for p in fs.ls(f'{root}/') if p not in zarr_flist]\n print(list(filter(lambda x: 'nc' in x, fs.glob(f'{root}/*'))))\n datasets = [xr.open_dataset(p, chunks={'time': chunks[0]}) for p in fileObjs]\n f = xr.concat(datasets, dim='time')\n ds = f.sst.data\n print(ds)\n '''\n else:\n if io_format == 'zarr':\n f = Path(f'{root}').glob(f'test1/sst.{chunk_size}*.zarr')\n ds = da.from_zarr(f'{next(f).as_posix()}/sst')\n print(ds)\n elif io_format == 'netcdf':\n f = xr.open_mfdataset(\n f'{root}/test1/sst.*.nc', combine='by_coords', engine='h5netcdf', chunks={'time': chunks[0]}\n )\n ds = f.sst.data\n print(ds)\n '''\n return ds\n\n\ndef writefile(ds, fs, io_format, root, fname):\n filename = f'sst.{fname}'\n # if isinstance(fs, fsspec.AbstractFileSystem):\n if io_format == 'zarr':\n if isinstance(fs, fsspec.AbstractFileSystem):\n store = fs.get_mapper(root=f'{root}/{filename}.zarr', check=False, create=True)\n else:\n store = f'{root}/test1/{filename}.zarr'\n ds = ds.to_zarr(\n store,\n encoding={'sst': {'compressor': None}},\n consolidated=True,\n compute=False,\n mode='w',\n )\n ds.compute()\n elif io_format == 'netcdf':\n ds_list = list(split_by_chunks(ds))\n dss = [item[1] for item in ds_list]\n paths = [create_filepath(ds, prefix=filename, root_path=f'{root}/test1') for ds in dss]\n xr.save_mfdataset(datasets=dss, paths=paths, engine='h5netcdf', parallel=True)\n if isinstance(fs, fsspec.AbstractFileSystem):\n fs.upload(lpath=f'{root}/test1', rpath=f'{root}/', recursive=True)\n\n return filename\n\n\ndef deletefile(fs, io_format, root, filename):\n if isinstance(fs, fsspec.AbstractFileSystem):\n if io_format == 'zarr':\n ret = fs.rm(path=f'{root}/{filename}.zarr', recursive=True)\n # ret = fs.rm(path=f'{root}', recursive=True)\n elif io_format == 'netcdf':\n ret = delete_dir('test1')\n ret = fs.rm(path=f'{root}/test1', recursive=True)\n else:\n if io_format == 'zarr':\n ret = os.system(f'rm -rf {root}/test1')\n # ret = delete_dir('test1')\n elif io_format == 'netcdf':\n ret = delete_dir(f'{root}/test1')\n return ret\n\n\ndef split_by_chunks(dataset):\n \"\"\"\n COPIED from https://github.com/pydata/xarray/issues/1093#issuecomment-259213382\n \"\"\"\n chunk_slices = {}\n for dim, chunks in dataset.chunks.items():\n print(dim)\n print(dataset.sizes[dim])\n slices = []\n start = 0\n if len(chunks) > 10:\n chunk_range = int(len(chunks) / 10)\n else:\n chunk_range = 1\n for i in range(len(chunks) - chunk_range + 1):\n if start >= dataset.sizes[dim]:\n break\n stop = start + chunks[i] * chunk_range\n slices.append(slice(start, stop))\n print(start, stop)\n start = stop\n chunk_slices[dim] = slices\n for slices in itertools.product(*chunk_slices.values()):\n selection = dict(zip(chunk_slices.keys(), slices))\n yield (selection, dataset[selection])\n\n\ndef create_filepath(ds, prefix='filename', root_path='.'):\n \"\"\"\n Generate a filepath when given an xarray dataset\n \"\"\"\n start = pd.to_datetime(str(ds.time.data[0])).strftime('%Y-%m-%d')\n end = pd.to_datetime(str(ds.time.data[-1])).strftime('%Y-%m-%d')\n filepath = f'{root_path}/{prefix}_{start}_{end}.nc'\n return filepath\n","sub_path":"benchmarks/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":5889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"457745919","text":"import os\nimport sys\n\n\nROOT = os.path.dirname(__file__)\npath = lambda *p: os.path.realpath(os.path.join(ROOT, *p))\n\nsys.path.insert(0, path(os.pardir))\n\n\nDEBUG = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'exampleapp.db'\n }\n}\n\nTEMPLATE_DIRS = (\n path('templates'),\n)\n\nINSTALLED_APPS = (\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.auth',\n 'django.contrib.admin',\n\n 'mediaembed_utils',\n \n 'audio',\n)\n\nMEDIA_ROOT = path('media')\nMEDIA_URL = '/media/'\n\nROOT_URLCONF = 'exampleapp.urls'\n","sub_path":"exampleapp/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"277766723","text":"import tensorflow.keras.backend as K\nimport tensorflow as tf\n\nfrom tensorflow_addons.layers import InstanceNormalization\nfrom tensorflow.keras.layers import Layer, Input, Conv2D, Activation, add, BatchNormalization, UpSampling2D, ZeroPadding2D, Conv2DTranspose, Flatten, MaxPooling2D, AveragePooling2D, InputSpec, LeakyReLU, Dense\n\nfrom tensorflow.keras.initializers import RandomNormal\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.backend import mean\nfrom tensorflow.keras.models import Model, model_from_json\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.python.keras.engine.network import Network\n\nimport random\nimport datetime\nimport time\nimport json\nimport math\nimport csv\nimport sys\nfrom os.path import join, exists\nfrom os import mkdir, makedirs\nimport numpy as np\n\n# sys.path.append('../')\n#import load_data\n\nfrom factories.optimizer_factory import Optimizer_Factory\nfrom layers.reflection_padding_2d import ReflectionPadding2D\nfrom metrics.loss_functions import lse, cycle_loss\n\ndef save_img(img, fname):\n if len(tf.shape(img)) > 3:\n img = img[tf.constant(0),:,:,:]\n img = (img + 1) / 2.0\n result_img = tf.image.encode_png(tf.image.convert_image_dtype(img, dtype=tf.uint8))\n tf.io.write_file(tf.constant(fname), result_img)\n\nclass ImagePool():\n def __init__(self, pool_size):\n self.pool_size = pool_size\n if self.pool_size > 0:\n self.num_imgs = 0\n self.images = []\n\n def query(self, images):\n if self.pool_size == 0:\n return images\n return_images = []\n for image in images:\n if len(image.shape) == 3:\n image = image[np.newaxis, :, :, :]\n\n if self.num_imgs < self.pool_size: # fill up the image pool\n self.num_imgs = self.num_imgs + 1\n if len(self.images) == 0:\n self.images = image\n else:\n self.images = np.vstack((self.images, image))\n\n if len(return_images) == 0:\n return_images = image\n else:\n return_images = np.vstack((return_images, image))\n\n else: # 50% chance that we replace an old synthetic image\n p = random.uniform(0, 1)\n if p > 0.5:\n random_id = random.randint(0, self.pool_size - 1)\n tmp = self.images[random_id, :, :, :]\n tmp = tmp[np.newaxis, :, :, :]\n self.images[random_id, :, :, :] = image[0, :, :, :]\n if len(return_images) == 0:\n return_images = tmp\n else:\n return_images = np.vstack((return_images, tmp))\n else:\n if len(return_images) == 0:\n return_images = image\n else:\n return_images = np.vstack((return_images, image))\n\n return return_images\n\n\nclass CycleGAN():\n \"\"\"docstring for CycleGAN.\"\"\"\n\n def __init__(self, cf):\n self.model = None\n self.cf = cf\n\n # from Keras documentation: Total number of steps (batches of samples) to yield from generator before declaring one epoch finished and starting the next epoch. It should typically be equal to the number of unique samples of your dataset divided by the batch size.\n if(cf.n_images_train is not None):\n self.steps_per_epoch = int(np.ceil(cf.n_images_train / float(cf.batch_size_train)))\n else:\n samplesTrainA = self.get_file_len(cf.trainA_file_path_full)\n samplesTrainB = self.get_file_len(cf.trainB_file_path_full)\n samplesTrain = max(samplesTrainA, samplesTrainB)\n self.steps_per_epoch = int(np.ceil(samplesTrain / float(cf.batch_size_train)))\n # zzh\n # self.steps_per_epoch = int(np.ceil(get_file_len(cf.train_file_path_full) / float(cf.batch_size_train)))\n # self.validation_steps = int(np.ceil(get_file_len(cf.valid_file_path_full) / float(cf.batch_size_valid)))\n # samplesTestA = self.get_file_len(cf.testA_file_path_full)\n # samplesTestB = self.get_file_len(cf.testB_file_path_full)\n # samplesTest = max(samplesTestA, samplesTestB)\n # self.test_steps = int(np.ceil(samplesTest / float(cf.batch_size_valid)))\n\n\n def get_file_len(self, file_path_full):\n return len([\"\" for line in open(file_path_full, \"r\")])\n\n def make_discriminators(self):\n # ======= Discriminator model ==========\n if self.cf.use_multiscale_discriminator:\n D_A = self.modelMultiScaleDiscriminator()\n D_B = self.modelMultiScaleDiscriminator()\n loss_weights_D = [0.5, 0.5] # 0.5 since we train on real and synthetic images\n else:\n D_A = self.modelDiscriminator()\n D_B = self.modelDiscriminator()\n loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images\n\n # Discriminator builds\n image_A = Input(shape=self.cf.input_shape)\n image_B = Input(shape=self.cf.input_shape)\n guess_A = D_A(image_A)\n guess_B = D_B(image_B)\n self.D_A = Model(inputs=image_A, outputs=guess_A, name='D_A_model')\n self.D_B = Model(inputs=image_B, outputs=guess_B, name='D_B_model')\n\n # self.D_A.summary()\n # self.D_B.summary()\n optimizer_D = Optimizer_Factory().make(self.cf.optimizer, self.cf.learning_rate_D)\n self.D_A.compile(optimizer=optimizer_D,\n loss=lse,\n loss_weights=loss_weights_D)\n self.D_B.compile(optimizer=optimizer_D,\n loss=lse,\n loss_weights=loss_weights_D)\n\n # Use Networks to avoid falsy keras error about weight descripancies\n self.D_A_static = Network(inputs=image_A, outputs=guess_A, name='D_A_static_model')\n self.D_B_static = Network(inputs=image_B, outputs=guess_B, name='D_B_static_model')\n\n # Do note update discriminator weights during generator training\n self.D_A_static.trainable = False\n self.D_B_static.trainable = False\n\n def make_generators(self):\n # Generators\n self.G_A2B = self.modelGenerator(name='G_A2B_model')\n self.G_B2A = self.modelGenerator(name='G_B2A_model')\n # self.G_A2B.summary()\n\n optimizer_G = Optimizer_Factory().make(self.cf.optimizer, self.cf.learning_rate_G)\n\n if self.cf.use_identity_learning:\n self.G_A2B.compile(optimizer=optimizer_G, loss='MAE')\n self.G_B2A.compile(optimizer=optimizer_G, loss='MAE')\n\n # Generator builds\n real_A = Input(shape=self.cf.input_shape, name='real_A')\n real_B = Input(shape=self.cf.input_shape, name='real_B')\n synthetic_B = self.G_A2B(real_A)\n synthetic_A = self.G_B2A(real_B)\n dA_guess_synthetic = self.D_A_static(synthetic_A)\n dB_guess_synthetic = self.D_B_static(synthetic_B)\n reconstructed_A = self.G_B2A(synthetic_B)\n reconstructed_B = self.G_A2B(synthetic_A)\n\n model_outputs = [reconstructed_A, reconstructed_B]\n compile_losses = [cycle_loss, cycle_loss,\n lse, lse]\n compile_weights = [self.cf.lambda_1, self.cf.lambda_2,\n self.cf.lambda_D, self.cf.lambda_D]\n\n if self.cf.use_multiscale_discriminator:\n for _ in range(2):\n compile_losses.append(lse)\n compile_weights.append(self.cf.lambda_D) # * 1e-3) # Lower weight to regularize the model\n for i in range(2):\n model_outputs.append(dA_guess_synthetic[i])\n model_outputs.append(dB_guess_synthetic[i])\n else:\n model_outputs.append(dA_guess_synthetic)\n model_outputs.append(dB_guess_synthetic)\n\n if self.cf.use_supervised_learning:\n model_outputs.append(synthetic_A)\n model_outputs.append(synthetic_B)\n compile_losses.append('MAE')\n compile_losses.append('MAE')\n compile_weights.append(self.cf.supervised_weight)\n compile_weights.append(self.cf.supervised_weight)\n\n self.model = Model(inputs=[real_A, real_B],\n outputs=model_outputs,\n name='G_model')\n\n self.model.compile(optimizer=optimizer_G,\n loss=compile_losses,\n loss_weights=compile_weights)\n # self.G_A2B.summary()\n\n def make(self):\n # Build model\n self.make_discriminators()\n print(' Build discriminators successfully...')\n # make generators\n self.make_generators()\n print(' Build generators successfully...')\n\n # Load pretrained weights\n # zzh\n if not self.cf.load_pretrained:\n print(' No loading pretrained weights...')\n elif not exists(join(self.cf.savepath, 'pretrained_weights')):\n print(' No pretrained weights exist...')\n return\n else:\n self.load_pretrained_weights(self.G_A2B)\n self.load_pretrained_weights(self.G_B2A)\n self.load_pretrained_weights(self.D_A)\n self.load_pretrained_weights(self.D_B)\n print(' Load pretrained weights successfully...')\n\n # if self.cf.resume_training:\n # model.load_weights(cf.checkpoint_path)\n\n # Show model structure\n if self.cf.show_model:\n model.summary()\n plot_model(model, to_file=join(self.cf.savepath, 'model.png'))\n\n # Output the model\n print(' Model: ' + self.cf.model_name)\n\n\n # Learning rate #\n def get_lr_linear_decay_rate(self, max_nr_images):\n # Calculate decay rates\n updates_per_epoch_D = 2 * max_nr_images + self.cf.discriminator_iterations - 1\n updates_per_epoch_G = max_nr_images + self.cf.generator_iterations - 1\n if self.cf.use_identity_learning:\n updates_per_epoch_G *= (1 + 1 / self.cf.identity_mapping_modulus)\n denominator_D = (self.cf.n_epochs - self.cf.decay_epoch) * updates_per_epoch_D\n denominator_G = (self.cf.n_epochs - self.cf.decay_epoch) * updates_per_epoch_G\n decay_D = self.cf.learning_rate_D / denominator_D\n decay_G = self.cf.learning_rate_G / denominator_G\n\n return decay_D, decay_G\n\n def update_lr(self, model, decay):\n new_lr = K.get_value(model.optimizer.lr) - decay\n if new_lr < 0:\n new_lr = 0\n # print(K.get_value(model.optimizer.lr))\n K.set_value(model.optimizer.lr, new_lr)\n\n\n # Images #\n def save_tmp_images(self, real_image_A, real_image_B, synthetic_image_A, synthetic_image_B):\n try:\n reconstructed_image_A = self.G_B2A.predict(synthetic_image_B)\n reconstructed_image_B = self.G_A2B.predict(synthetic_image_A)\n\n real_images = np.vstack((real_image_A[0], real_image_B[0]))\n synthetic_images = np.vstack((synthetic_image_B[0], synthetic_image_A[0]))\n reconstructed_images = np.vstack((reconstructed_image_A[0], reconstructed_image_B[0]))\n\n # save path\n result_path = join(self.cf.savepath, 'tmp_images')\n if exists(result_path) == False:\n mkdir(result_path)\n\n # write images\n save_img(real_image_A[0,:,:,:], join(result_path, 'realA.png'))\n save_img(real_image_B[0,:,:,:], join(result_path, 'realB.png'))\n save_img(synthetic_image_A[0,:,:,:], join(result_path, 'syntheticA.png'))\n save_img(synthetic_image_B[0,:,:,:], join(result_path, 'syntheticB.png'))\n save_img(reconstructed_image_A[0,:,:,:], join(result_path, 'reconstructedA.png'))\n save_img(reconstructed_image_B[0,:,:,:], join(result_path, 'reconstructedB.png'))\n\n except: # Ignore if file is open\n pass\n\n def saveImages(self, epoch, real_image_A, real_image_B, num_saved_images=1):\n result_path = join(self.cf.savepath, 'images')\n # if not exists(join(result_path, 'A')):\n # makedirs(join(result_path, 'A'))\n # makedirs(join(result_path, 'B'))\n # makedirs(join(result_path, 'Atest'))\n # makedirs(join(result_path, 'Btest'))\n\n for i in range(num_saved_images):\n # if i == num_saved_images:\n # real_image_A = self.A_test[0]\n # real_image_B = self.B_test[0]\n # real_image_A = np.expand_dims(real_image_A, axis=0)\n # real_image_B = np.expand_dims(real_image_B, axis=0)\n # testString = 'test'\n #\n # else:\n # #real_image_A = self.A_train[rand_A_idx[i]]\n # #real_image_B = self.B_train[rand_B_idx[i]]\n # if len(real_image_A.shape) < 4:\n # real_image_A = np.expand_dims(real_image_A, axis=0)\n # real_image_B = np.expand_dims(real_image_B, axis=0)\n\n synthetic_image_B = self.G_A2B.predict(real_image_A)\n synthetic_image_A = self.G_B2A.predict(real_image_B)\n reconstructed_image_A = self.G_B2A.predict(synthetic_image_B)\n reconstructed_image_B = self.G_A2B.predict(synthetic_image_A)\n\n # write images\n print(\"SHAPE\")\n print(tf.shape(real_image_A))\n save_img(real_image_A[i,:,:,:], join(result_path, str(epoch) + '_' + str(i) + '_realA.png'))\n save_img(real_image_B[i,:,:,:], join(result_path, str(epoch) + '_' + str(i) + '_realB.png'))\n save_img(synthetic_image_A[i,:,:,:], join(result_path, str(epoch) + '_' + str(i) + '_syntheticA.png'))\n save_img(synthetic_image_B[i,:,:,:], join(result_path, str(epoch) + '_' + str(i) + '_syntheticB.png'))\n save_img(reconstructed_image_A[i,:,:,:], join(result_path, str(epoch) + '_' + str(i) + '_reconstructedA.png'))\n save_img(reconstructed_image_B[i,:,:,:], join(result_path, str(epoch) + '_' + str(i) + '_reconstructedB.png'))\n\n # Training #\n def print_ETA(self, start_time, epoch, loop_index):\n passed_time = time.time() - start_time\n\n iterations_so_far = ((epoch - 1) * self.steps_per_epoch + loop_index) / self.cf.batch_size_train\n iterations_total = self.cf.n_epochs * self.steps_per_epoch / self.cf.batch_size_train\n iterations_left = iterations_total - iterations_so_far\n eta = round(passed_time / (iterations_so_far + 1e-5) * iterations_left)\n\n passed_time_string = str(datetime.timedelta(seconds=round(passed_time)))\n eta_string = str(datetime.timedelta(seconds=eta))\n print('Time passed', passed_time_string, ': ETA in', eta_string)\n\n def _run_training_iteration(self, loop_index, epoch, real_images_A, real_images_B, ones, zeros, synthetic_pool_A, synthetic_pool_B):\n # ======= Discriminator training ==========\n # Generate batch of synthetic images\n synthetic_images_B = self.G_A2B.predict(real_images_A)\n synthetic_images_A = self.G_B2A.predict(real_images_B)\n synthetic_images_A = synthetic_pool_A.query(synthetic_images_A)\n synthetic_images_B = synthetic_pool_B.query(synthetic_images_B)\n\n for _ in range(self.cf.discriminator_iterations):\n DA_loss_real = self.D_A.train_on_batch(x=real_images_A, y=ones)\n DB_loss_real = self.D_B.train_on_batch(x=real_images_B, y=ones)\n DA_loss_synthetic = self.D_A.train_on_batch(x=synthetic_images_A, y=zeros)\n DB_loss_synthetic = self.D_B.train_on_batch(x=synthetic_images_B, y=zeros)\n if self.cf.use_multiscale_discriminator:\n DA_loss = sum(DA_loss_real) + sum(DA_loss_synthetic)\n DB_loss = sum(DB_loss_real) + sum(DB_loss_synthetic)\n print('DA_losses: ', np.add(DA_loss_real, DA_loss_synthetic))\n print('DB_losses: ', np.add(DB_loss_real, DB_loss_synthetic))\n else:\n DA_loss = DA_loss_real + DA_loss_synthetic\n DB_loss = DB_loss_real + DB_loss_synthetic\n D_loss = DA_loss + DB_loss\n\n if self.cf.discriminator_iterations > 1:\n print('D_loss:', D_loss)\n sys.stdout.flush()\n\n '''\n # zzh\n directory = join(self.cf.savepath, 'saved_models')\n if not exists(directory):\n makedirs(directory)\n temp_D_A_PATH = join(directory,'temp_D_A')\n temp_D_B_PATH = join(directory, 'temp_D_B')\n self.D_A.save_weights(temp_D_A_PATH)\n self.D_A_static.load_weights(temp_D_A_PATH)\n self.D_B.save_weights(temp_D_B_PATH)\n self.D_B_static.load_weights(temp_D_B_PATH)\n '''\n # ======= Generator training ==========\n target_data = [real_images_A, real_images_B] # Compare reconstructed images to real images\n if self.cf.use_multiscale_discriminator:\n for i in range(2):\n target_data.append(ones[i])\n target_data.append(ones[i])\n else:\n target_data.append(ones)\n target_data.append(ones)\n\n if self.cf.use_supervised_learning:\n target_data.append(real_images_A)\n target_data.append(real_images_B)\n\n for _ in range(self.cf.generator_iterations):\n G_loss = self.model.train_on_batch(x=[real_images_A, real_images_B], y=target_data)\n if self.cf.generator_iterations > 1:\n print('G_loss:', G_loss)\n sys.stdout.flush()\n\n gA_d_loss_synthetic = G_loss[1]\n gB_d_loss_synthetic = G_loss[2]\n reconstruction_loss_A = G_loss[3]\n reconstruction_loss_B = G_loss[4]\n\n # Identity training\n if self.cf.use_identity_learning and loop_index % self.cf.identity_mapping_modulus == 0:\n G_A2B_identity_loss = self.G_A2B.train_on_batch(\n x=real_images_B, y=real_images_B)\n G_B2A_identity_loss = self.G_B2A.train_on_batch(\n x=real_images_A, y=real_images_A)\n print('G_A2B_identity_loss:', G_A2B_identity_loss)\n print('G_B2A_identity_loss:', G_B2A_identity_loss)\n\n # Update learning rates\n if self.cf.use_linear_decay and epoch > self.cf.decay_epoch:\n self.update_lr(self.D_A, self.decay_D)\n self.update_lr(self.D_B, self.decay_D)\n self.update_lr(self.model, self.decay_G)\n\n # Store training data\n self.training_history['DA_losses'].append(DA_loss)\n self.training_history['DB_losses'].append(DB_loss)\n self.training_history['gA_d_losses_synthetic'].append(gA_d_loss_synthetic)\n self.training_history['gB_d_losses_synthetic'].append(gB_d_loss_synthetic)\n self.training_history['gA_losses_reconstructed'].append(reconstruction_loss_A)\n self.training_history['gB_losses_reconstructed'].append(reconstruction_loss_B)\n\n GA_loss = gA_d_loss_synthetic + reconstruction_loss_A\n GB_loss = gB_d_loss_synthetic + reconstruction_loss_B\n reconstruction_loss = reconstruction_loss_A + reconstruction_loss_B\n\n self.training_history['D_losses'].append(D_loss)\n self.training_history['G_losses'].append(G_loss)\n self.training_history['reconstruction_losses'].append(reconstruction_loss)\n\n # GA_losses.append(GA_loss)\n # GB_losses.append(GB_loss)\n\n print('\\n')\n print('Epoch----------------', epoch, '/', self.cf.n_epochs)\n print('Loop index----------------', loop_index + 1, '/', self.steps_per_epoch)\n print('D_loss: ', D_loss)\n print('G_loss: ', G_loss[0])\n print('reconstruction_loss: ', reconstruction_loss)\n print('DA_loss:', DA_loss)\n print('DB_loss:', DB_loss)\n\n\n if loop_index % 20 == 0:\n # Save temporary images continously\n #self.save_tmp_images(real_images_A, real_images_B, synthetic_images_A, synthetic_images_B)\n self.print_ETA(self.start_time, epoch, loop_index)\n\n def train(self, train_gen, cb):\n if (not self.cf.train_model):\n return None\n\n print('\\n > Training the model...')\n # ======================================================================\n # Begin training\n # ======================================================================\n training_history = {}\n\n DA_losses = []\n DB_losses = []\n gA_d_losses_synthetic = []\n gB_d_losses_synthetic = []\n gA_losses_reconstructed = []\n gB_losses_reconstructed = []\n\n GA_losses = []\n GB_losses = []\n reconstruction_losses = []\n D_losses = []\n G_losses = []\n\n self.training_history = {\n 'DA_losses': DA_losses,\n 'DB_losses': DB_losses,\n 'gA_d_losses_synthetic': gA_d_losses_synthetic,\n 'gB_d_losses_synthetic': gB_d_losses_synthetic,\n 'gA_losses_reconstructed': gA_losses_reconstructed,\n 'gB_losses_reconstructed': gB_losses_reconstructed,\n 'D_losses': D_losses,\n 'G_losses': G_losses,\n 'reconstruction_losses': reconstruction_losses}\n\n # Image pools used to update the discriminators\n synthetic_pool_A = ImagePool(self.cf.synthetic_pool_size)\n synthetic_pool_B = ImagePool(self.cf.synthetic_pool_size)\n\n # self.saveImages('(init)')\n\n # Tweaks\n self.REAL_LABEL = 1.0 # Use e.g. 0.9 to avoid training the discriminators to zero loss\n\n # labels\n if self.cf.use_multiscale_discriminator:\n label_shape1 = (self.cf.batch_size_train,) + self.D_A.output_shape[0][1:]\n label_shape2 = (self.cf.batch_size_train,) + self.D_A.output_shape[1][1:]\n #label_shape4 = (self.cf.batch_size_train,) + self.D_A.output_shape[2][1:]\n ones1 = np.ones(shape=label_shape1) * self.REAL_LABEL\n ones2 = np.ones(shape=label_shape2) * self.REAL_LABEL\n #ones4 = np.ones(shape=label_shape4) * self.REAL_LABEL\n ones = [ones1, ones2] # , ones4]\n zeros1 = ones1 * 0\n zeros2 = ones2 * 0\n #zeros4 = ones4 * 0\n zeros = [zeros1, zeros2] # , zeros4]\n else:\n label_shape = (self.cf.batch_size_train,) + self.D_A.output_shape[1:]\n ones = np.ones(shape=label_shape) * self.REAL_LABEL\n zeros = ones * 0\n\n # Linear decay\n if self.cf.use_linear_decay:\n self.decay_D, self.decay_G = self.get_lr_linear_decay_rate(self.steps_per_epoch * self.cf.n_epochs)\n\n # Start stopwatch for ETAs\n self.start_time = time.time()\n\n for epoch in range(1, self.cf.n_epochs + 1):\n loop_index = 1\n for images in train_gen:\n real_images_A = images[0]\n real_images_B = images[1]\n\n # Run all training steps\n self._run_training_iteration(loop_index, epoch, real_images_A, real_images_B, ones, zeros, synthetic_pool_A, synthetic_pool_B)\n\n # Store models\n if loop_index % 20000 == 0:\n self.save_model(self.D_A, loop_index)\n self.save_model(self.D_B, loop_index)\n self.save_model(self.G_A2B, loop_index)\n self.save_model(self.G_B2A, loop_index)\n\n # Break if loop has ended\n if loop_index >= self.steps_per_epoch:\n break\n\n loop_index += 1\n\n #================== within epoch loop end ==========================\n\n if epoch % self.cf.save_interval == 0:\n print('\\n', '\\n', '-------------------------Saving images for epoch', epoch, '-------------------------', '\\n', '\\n')\n self.saveImages(epoch, real_images_A, real_images_B, tf.shape(real_images_A)[0])\n\n if epoch % 5 == 0:\n # self.save_model(self.model)\n self.save_model(self.D_A, epoch)\n self.save_model(self.D_B, epoch)\n self.save_model(self.G_A2B, epoch)\n self.save_model(self.G_B2A, epoch)\n\n self.writeLossDataToFile(self.training_history)\n\n # Flush out prints each loop iteration\n sys.stdout.flush()\n\n self.save_model(self.D_A, epoch)\n self.save_model(self.D_B, epoch)\n self.save_model(self.G_A2B, epoch)\n self.save_model(self.G_B2A, epoch)\n print('Training finished.')\n\n\n def predict(self, test_gen, tag='pred'):\n pass\n\n\n def test(self, test_gen):\n if not self.cf.test_model:\n return\n else:\n test_save_path = join(self.cf.savepath, 'test_results')\n if not exists(test_save_path):\n makedirs(test_save_path)\n print(test_save_path)\n\n print('\\n > Testing the model...')\n\n # Load best trained model\n\n # zzh\n start_time = time.time()\n load_epoch = self.cf.load_epoch_for_test\n self.load_model_weights(self.G_A2B, load_epoch)\n self.load_model_weights(self.G_B2A, load_epoch)\n # self.load_model_weights(self.D_A, load_epoch)\n # self.load_model_weights(self.D_B, load_epoch)\n\n # get correct number of test samples depending on debugging or not\n # if debug, will use n_images_test\n if self.cf.n_images_test is not None:\n nb_test_samples = self.cf.n_images_test\n else:\n samplesTestA = self.get_file_len(self.cf.testA_file_path_full)\n samplesTestB = self.get_file_len(self.cf.testB_file_path_full)\n nb_test_samples = max(samplesTestA, samplesTestB)\n\n test_steps = int(np.ceil(nb_test_samples / float(self.cf.batch_size_test)))\n # Evaluate model\n index = 0\n for images in test_gen:\n\n real_images_A_test = images[0]\n real_images_B_test = images[1]\n\n synthetic_images_B = self.G_A2B.predict_on_batch(real_images_A_test)\n synthetic_images_A = self.G_B2A.predict_on_batch(real_images_B_test)\n\n recon_images_A = self.G_B2A.predict_on_batch(synthetic_images_B)\n recon_images_B = self.G_A2B.predict_on_batch(synthetic_images_A)\n\n save_img(real_images_A_test[0,:,:,:], join(test_save_path, 'test_' + str(index) + '_realA.png'))\n save_img(real_images_B_test[0,:,:,:], join(test_save_path, 'test_' + str(index) + '_realB.png'))\n save_img(synthetic_images_A[0,:,:,:], join(test_save_path, 'test_' + str(index) + '_syntheticA.png'))\n save_img(synthetic_images_B[0,:,:,:], join(test_save_path, 'test_' + str(index) + '_syntheticB.png'))\n save_img(recon_images_A[0,:,:,:], join(test_save_path, 'test_' + str(index) + '_reconstructedA.png'))\n save_img(recon_images_B[0,:,:,:], join(test_save_path, 'test_' + str(index) + '_reconstructedB.png'))\n\n print('\\n')\n print('Generate synthetic images for testing---------', index, '/', nb_test_samples)\n\n index += 1\n\n if index > test_steps:\n break\n\n # y_predictions = self.model.predict(test_gen.make_one_shot_iterator(), steps=nb_test_samples)\n # total_time = time.time() - start_time\n # fps = float(nb_test_samples) / total_time\n # s_p_f = total_time / float(nb_test_samples)\n # print (' Testing time: {}. FPS: {}. Seconds per Frame: {}'.format(total_time, fps, s_p_f))\n #\n # # store predicted labels\n # result_path = join(self.cf.savepath, 'predicted_labels')\n # if exists(result_path) == False:\n # mkdir(result_path)\n #\n # results = []\n # fp = open(self.cf.test_file_path_full)\n # image_names = fp.readlines()\n # fp.close()\n # for (idx, img_num) in enumerate(image_names):\n # if idx > nb_test_samples-1:\n # continue\n # y_sample_prediction = y_predictions[idx,:,:,:,:]\n #\n # # print('sample: ' + img_num + ', idx: ' + str(idx))\n # # print('min: ' + str(np.min(y_sample_prediction)))\n # # print('max: ' + str(np.max(y_sample_prediction)))\n #\n # # compress to top probabilty\n #\n # result = np.argmax(y_sample_prediction, axis=-1)\n # z=np.shape(result)[-1]\n # for i in range(z):\n # img_num = img_num.strip('\\n')\n # save_img(result[:,:,i:i+1], join(result_path, img_num + '_slice'+str(i)+'.png'))\n\n print('{} pairs of synthetic images have been generated and placed in {}'\n .format(nb_test_samples, test_save_path))\n total_time = time.time() - start_time\n print('Testing time: {}'.format(total_time))\n#===============================================================================\n# Architecture functions\n\n def ck(self, x, k, use_normalization):\n # zzh: add init\n init = RandomNormal(stddev=0.02)\n x = Conv2D(filters=k, kernel_size=4, strides=2, padding='same', kernel_initializer=init)(x)\n # Normalization is not done on the first discriminator layer\n if use_normalization:\n x = InstanceNormalization(axis=3, center=True, epsilon=1e-5)(x, training=True)\n x = LeakyReLU(alpha=0.2)(x)\n return x\n\n def c7Ak(self, x, k):\n init = RandomNormal(stddev=0.02)\n x = Conv2D(filters=k, kernel_size=7, strides=1, padding='valid', kernel_initializer=init)(x)\n x = InstanceNormalization(axis=3, center=True, epsilon=1e-5)(x, training=True)\n x = Activation('relu')(x)\n return x\n\n def dk(self, x, k):\n init = RandomNormal(stddev=0.02)\n x = Conv2D(filters=k, kernel_size=3, strides=2, padding='same', kernel_initializer=init)(x)\n x = InstanceNormalization(axis=3, center=True, epsilon=1e-5)(x, training=True)\n x = Activation('relu')(x)\n return x\n\n def Rk(self, x0):\n init = RandomNormal(stddev=0.02)\n\n k = int(x0.shape[-1])\n # first layer\n x = ReflectionPadding2D((1,1))(x0)\n x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid', kernel_initializer=init)(x)\n x = InstanceNormalization(axis=3, center=True, epsilon=1e-5)(x, training=True)\n x = Activation('relu')(x)\n # second layer\n x = ReflectionPadding2D((1, 1))(x)\n x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid', kernel_initializer=init)(x)\n x = InstanceNormalization(axis=3, center=True, epsilon=1e-5)(x, training=True)\n # merge\n x = add([x, x0])\n return x\n\n def uk(self, x, k):\n init = RandomNormal(stddev=0.02)\n # (up sampling followed by 1x1 convolution <=> fractional-strided 1/2)\n if self.cf.use_resize_convolution:\n x = UpSampling2D(size=(2, 2))(x) # Nearest neighbor upsampling\n x = ReflectionPadding2D((1, 1))(x)\n x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid', kernel_initializer=init)(x)\n else:\n x = Conv2DTranspose(filters=k, kernel_size=3, strides=2, padding='same', kernel_initializer=init)(x) # this matches fractinoally stided with stride 1/2\n x = InstanceNormalization(axis=3, center=True, epsilon=1e-5)(x, training=True)\n x = Activation('relu')(x)\n return x\n\n#===============================================================================\n# Models\n\n def modelMultiScaleDiscriminator(self, name=None):\n x1 = Input(shape=self.cf.input_shape)\n x2 = AveragePooling2D(pool_size=(2, 2))(x1)\n #x4 = AveragePooling2D(pool_size=(2, 2))(x2)\n\n out_x1 = self.modelDiscriminator('D1')(x1)\n out_x2 = self.modelDiscriminator('D2')(x2)\n #out_x4 = self.modelDiscriminator('D4')(x4)\n\n return Model(inputs=x1, outputs=[out_x1, out_x2], name=name)\n\n def modelDiscriminator(self, name=None):\n # Specify input\n input_img = Input(shape=self.cf.input_shape)\n # Layer 1 (#Instance normalization is not used for this layer)\n # C64\n x = self.ck(input_img, 64, False)\n # Layer 2\n # C 128\n x = self.ck(x, 128, True)\n # Layer 3\n # C 256\n x = self.ck(x, 256, True)\n # Layer 4\n # C 512\n x = self.ck(x, 512, True)\n # Output layer\n if self.cf.use_patchgan:\n init = RandomNormal(stddev=0.02)\n x = Conv2D(filters=1, kernel_size=4, strides=1, padding='same', kernel_initializer=init)(x)\n else:\n x = Flatten()(x)\n x = Dense(1)(x)\n #x = Activation('sigmoid')(x) - No sigmoid to avoid near-fp32 machine epsilon discriminator cost\n return Model(inputs=input_img, outputs=x, name=name)\n\n def modelGenerator(self, name=None):\n init = RandomNormal(stddev=0.02)\n # Specify input\n input_img = Input(shape=self.cf.input_shape)\n # Layer 1\n x = ReflectionPadding2D((3, 3))(input_img)\n ''' \n # zzh\n x = self.c7Ak(x, 32)\n # Layer 2\n x = self.dk(x, 64)\n # Layer 3\n x = self.dk(x, 128)\n\n if self.cf.use_multiscale_discriminator:\n # Layer 3.5\n x = self.dk(x, 256)\n\n # Layer 4-12: Residual layer\n for _ in range(4, 13):\n x = self.Rk(x)\n\n if self.cf.use_multiscale_discriminator:\n # Layer 12.5\n x = self.uk(x, 128)\n\n # Layer 13\n x = self.uk(x, 64)\n # Layer 14\n x = self.uk(x, 32)\n x = ReflectionPadding2D((3, 3))(x)\n x = Conv2D(self.cf.channel_size, kernel_size=7, strides=1)(x)\n x = Activation('tanh')(x) # They say they use Relu but really they do not\n return Model(inputs=input_img, outputs=x, name=name)\n # zzh\n '''\n # c7s1-64\n x = self.c7Ak(x, 64)\n # Layer 2, d128\n x = self.dk(x, 128)\n # Layer 3, d256\n x = self.dk(x, 256)\n\n if self.cf.use_multiscale_discriminator:\n # Layer 3.5\n x = self.dk(x, 512)\n\n # Layer 4-12: Residual layer\n for _ in range(4, 13):\n x = self.Rk(x)\n\n if self.cf.use_multiscale_discriminator:\n # Layer 12.5\n x = self.uk(x, 256)\n\n # Layer 13, u128\n x = self.uk(x, 128)\n # Layer 14, u64\n x = self.uk(x, 64)\n x = ReflectionPadding2D((3, 3))(x)\n x = Conv2D(self.cf.channel_size, kernel_size=7, strides=1, padding='valid', kernel_initializer=init)(x)\n x = Activation('tanh')(x) # They say they use Relu but really they do not\n return Model(inputs=input_img, outputs=x, name=name)\n#===============================================================================\n# Loading / Saving\n\n def save_model(self, model, epoch):\n # Create folder to save model architecture and weights\n directory = join(self.cf.savepath, 'saved_models')\n if not exists(directory):\n makedirs(directory)\n\n # model_path_w = 'saved_models/{}_weights_epoch_{}.hdf5'.format(model.name, epoch)\n model_path_w = join(directory, '{}_weights_epoch_{}.hdf5'.format(model.name, epoch))\n model.save_weights(model_path_w)\n #model_path_m = 'saved_models/{}_model_epoch_{}.json'.format(model.name, epoch)\n model_path_m = join(directory, '{}_model_epoch_{}.json'.format(model.name, epoch))\n model.save_weights(model_path_m)\n json_string = model.to_json()\n with open(model_path_m, 'w') as outfile:\n json.dump(json_string, outfile)\n print('{} has been saved in saved_models'.format(model.name))\n\n def writeLossDataToFile(self, history):\n keys = sorted(history.keys())\n out_file = join(self.cf.savepath, 'loss_output.csv')\n with open(out_file, 'w') as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerow(keys)\n writer.writerows(zip(*[history[key] for key in keys]))\n\n #zzh\n def load_model_weights(self, model, epoch):\n # The saved model folder\n directory = join(self.cf.savepath, 'saved_models')\n model_path_w = join(directory, '{}_weights_epoch_{}.hdf5'.format(model.name, epoch))\n model.load_weights(model_path_w)\n\n def load_pretrained_weights(self, model):\n # The pretrained model folder\n directory = join(self.cf.savepath, 'pretrained_weights')\n # all pretrained models should be save by 'save_weights' with name of 'pretrained_xxx'.\n model_path_w = join(directory, 'pretrained_{}.hdf5'.format(model.name))\n model.load_weights(model_path_w)\n","sub_path":"models/cyclegan.py","file_name":"cyclegan.py","file_ext":"py","file_size_in_byte":36879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"13947452","text":"\"\"\"Implementation of Rule L044.\"\"\"\nfrom typing import Dict, List\n\nfrom sqlfluff.core.rules.analysis.select_crawler import SelectCrawler\nfrom sqlfluff.core.dialects.base import Dialect\nfrom sqlfluff.core.rules.base import BaseRule, LintResult\n\n\nclass RuleFailure(Exception):\n \"\"\"Exception class for reporting lint failure inside deeply nested code.\"\"\"\n\n pass\n\n\nclass Rule_L044(BaseRule):\n \"\"\"Query produces an unknown number of result columns.\n\n | **Anti-pattern**\n | Querying all columns using `*` produces a query result where the number\n | or ordering of columns changes if the upstream table's schema changes.\n | This should generally be avoided because it can cause slow performance,\n | cause important schema changes to go undetected, or break production code.\n | For example:\n | * If a query does `SELECT t.*` and is expected to return columns `a`, `b`,\n | and `c`, the actual columns returned will be wrong/different if columns\n | are added to or deleted from the input table.\n | * `UNION` and `DIFFERENCE` clauses require the inputs have the same number\n | of columns (and compatible types).\n | * `JOIN` queries may break due to new column name conflicts, e.g. the\n | query references a column \"c\" which initially existed in only one input\n | table but a column of the same name is added to another table.\n | * `CREATE TABLE (<>) AS SELECT *`\n\n\n .. code-block::\n\n WITH cte AS (\n SELECT * FROM foo\n )\n\n SELECT * FROM cte\n UNION\n SELECT a, b FROM t\n\n | **Best practice**\n | Somewhere along the \"path\" to the source data, specify columns explicitly.\n\n .. code-block::\n\n WITH cte AS (\n SELECT * FROM foo\n )\n\n SELECT a, b FROM cte\n UNION\n SELECT a, b FROM t\n\n \"\"\"\n\n _works_on_unparsable = False\n\n def _handle_alias(self, alias_info, dialect, queries):\n select_info_target = SelectCrawler.get(\n alias_info.from_expression_element, queries, dialect\n )\n if isinstance(select_info_target, str):\n # It's an alias to an external table whose\n # number of columns could vary without our\n # knowledge. Thus, warn.\n self.logger.debug(\n f\"Query target {select_info_target} is external. Generating warning.\"\n )\n raise RuleFailure()\n else:\n # Handle nested SELECT.\n self._analyze_result_columns(select_info_target, dialect, queries)\n\n def _analyze_result_columns(\n self,\n select_info_list: List[SelectCrawler],\n dialect: Dialect,\n queries: Dict[str, List[SelectCrawler]],\n ):\n \"\"\"Given info on a list of SELECTs, determine whether to warn.\"\"\"\n # Recursively walk from the given query (select_info_list) to any\n # wildcard columns in the select targets. If every wildcard evdentually\n # resolves to a query without wildcards, all is well. Otherwise, warn.\n for select_info in select_info_list:\n self.logger.debug(f\"Analyzing query: {select_info.select_statement.raw}\")\n for wildcard in select_info.get_wildcard_info():\n if wildcard.tables:\n for wildcard_table in wildcard.tables:\n self.logger.debug(\n f\"Wildcard: {wildcard.segment.raw} has target {wildcard_table}\"\n )\n # Is it an alias?\n alias_info = select_info.find_alias(wildcard_table)\n if alias_info:\n # Found the alias matching the wildcard. Recurse,\n # analyzing the query associated with that alias.\n self._handle_alias(alias_info, dialect, queries)\n else:\n # Not an alias. Is it a CTE?\n if wildcard_table in queries:\n # Wildcard refers to a CTE. Analyze it.\n self._analyze_result_columns(\n queries.pop(wildcard_table), dialect, queries\n )\n else:\n # Not CTE, not table alias. Presumably an\n # external table. Warn.\n self.logger.debug(\n f\"Query target {wildcard_table} is external. Generating warning.\"\n )\n raise RuleFailure()\n else:\n # No table was specified with the wildcard. Assume we're\n # querying from a nested select in FROM.\n select_info_target = SelectCrawler.get(\n select_info.select_statement, queries, dialect\n )\n assert isinstance(select_info_target, list)\n self._analyze_result_columns(\n select_info_target,\n dialect,\n queries,\n )\n\n def _eval(self, segment, dialect, **kwargs):\n \"\"\"Outermost query should produce known number of columns.\"\"\"\n if segment.is_type(\"statement\"):\n queries = SelectCrawler.gather(segment, dialect)\n\n # Begin analysis at the final, outer query (key=None).\n if None in queries:\n select_info = queries[None]\n try:\n return self._analyze_result_columns(select_info, dialect, queries)\n except RuleFailure:\n return LintResult(\n anchor=queries[None][0].select_info.select_statement\n )\n return None\n","sub_path":"src/sqlfluff/rules/L044.py","file_name":"L044.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"597727139","text":"from flask import Flask\nfrom flask import request\nfrom flask import Response\nfrom flask import jsonify\nimport requests\nimport os\nimport json\nimport psycopg2\n\napp = Flask(__name__)\n\nplayers_db = os.environ['DATABASE_URL']\ncontent_db = os.environ['HEROKU_POSTGRESQL_JADE_URL']\n\n@app.route('/')\ndef sayHello():\n\n\n return \"Hello there\"\n\n\n#API 1.1\n@app.route('/game//player//character', methods=['POST'])\ndef createNewPlayerCharacter(game_id,player_id):\n #insert character into db\n\n content = request.get_json()\n title = content['title']\n\n with psycopg2.connect(players_db) as conn:\n cursor = conn.cursor()\n sqli_query = \"INSERT INTO characters (game_id, title, player_id) VALUES (?,?,?)\"\n query = cursor.execute(sqli_query,(int(game_id), title, int(player_id)))\n if query != False:#not sure if this if is needed, but i figured if the insert didn't work, I shouldn't get the lastrowid\n characters_id = cursor.lastrowid #this gets the characters_id\n #if the query failed\n if not query:\n abort(409, \"Could not create character\")\n\n return_json = {\"title\":title, \"id\":str(characters_id), \"game_id\":game_id, \"player_id\":player_id, \"location\":\"null\", \"attributes\":\"null\"}\n return jsonify(return_json), 201\n \n\n#1.2 GET /player//character\n#retrieve a list of player characters\n@app.route('/player//character',methods=['GET'])\ndef get_player_characters(player_id): \n with psycopg2.connect(players_db) as conn:\n cursor = conn.cursor()\n sqli_query = \"SELECT characters_id, title FROM characters WHERE player_id=?\"\n cursor.execute(sqli_query, (test,))\n result = cursor.fetchall()\n final = []\n for row in result:\n item = {'id': row[0], 'title': row[1]}\n final.append(item)\n return jsonify(final), 200\n \n\n#1.3 GET /player//character/\n#retrieve player character details\n@app.route('/player//character/',methods=['GET'])\ndef get_player_characters_details(player_id, characters_id): \n with psycopg2.connect(players_db) as conn:\n cursor = conn.cursor()\n sqli_query = \"SELECT * FROM characters WHERE characters_id=?\" \n cursor.execute(sqli_query, (characters_id,))\n result = cursor.fetchone()\n result_characters_id = result[0]\n result_game_id = result[1]\n result_player_id = result[2]\n result_title = result[3]\n\n with psycopg2.connect(players_db) as conn:\n cursor = conn.cursor()\n sqli_query = \"SELECT * FROM characters_attributes WHERE character_id=?\" \n cursor.execute(sqli_query, (characters_id,)) \n result = cursor.fetchone()\n result_attributes = {\"players_attributes_id\":result[0],\"player_id\":result[1],\"attr_title\":result[2],\"attr_value\":result[3]}\n character = {\"title\":result_title, \"id\":result_characters_id, \"game_id\":result_game_id, \"player_id\":result_player_id, \"attributes\":result_attributes}\n return jsonify(character), 200\n\n#get 1.4 from Laura\n\n\n# 4.1 - Retrieve all items\n# GET /game//item \n@app.route('/game//item',methods=['GET'])\ndef get_all_items(game_id): \n with psycopg2.connect(content_db) as conn:\n cursor = conn.cursor()\n sqli_query = \"SELECT items_id, title FROM items WHERE game_id=?\" \n cursor.execute(sqli_query, (game_id,))\n result = cursor.fetchall()\n return_json = []\n for row in result:\n item = {'id': row[0], 'title': row[1]}\n return_json.append(item)\n return jsonify(return_json), 200\n\n\n# 4.2 add a new item to a game\n# POST /game//item \n@app.route('/game//item', methods=['POST'])\ndef createNewItem(game_id):\n\n content = request.get_json()\n title = content['title']\n description = content['description']\n aliases = content['aliases']#array\n attributes = content['attributes']#object\n \n #3 inserts\n # 1. insert items -> title, desc, game_id\n # !!get the lastrowid\n with psycopg2.connect(content_db) as conn:\n cursor = conn.cursor()\n sqli_query = \"INSERT INTO items (game_id, title, description) VALUES (?,?,?)\"\n query = cursor.execute(sqli_query,(int(game_id), title, description))\n if query != False: \n item_id = cursor.lastrowid\n #if the query failed\n if not query:\n abort(409, \"Could not create item\")\n\n # 2. loop through aliases array, insert into aliases -> item_id, title\n with psycopg2.connect(content_db) as conn:\n cursor = conn.cursor()\n for v in aliases:\n sqli_query = \"INSERT INTO items_aliases (item_id, title) VALUES (?,?)\"\n query = cursor.execute(sqli_query, (item_id, v)) \n if not query:\n abort(409, \"Could not create item\")\n\n # 3. loop through attributes object, insert items_attr. -> item_id, attr_title, attr_value\n with psycopg2.connect(content_db) as conn:\n cursor = conn.cursor()\n for k in attributes:\n sqli_query = \"INSERT INTO items_attributes (item_id, attr_title, attr_value) VALUES (?,?,?)\"\n query = cursor.execute(sqli_query, (item_id, k, attributes[k])) \n if not query:\n abort(409, \"Could not create item\")\n\n #return_json = {\"title\":title, \"id\":items_id, \"game_id\":game_id, \"description\":description, \"aliases\":aliases, \"attributes\":attributes}\n #return jsonify(return_json), 201\n return get_item(game_id, item_id)\n\n\n# 4.3 - retrieve all item details\n@app.route('/game//item/',methods=['GET'])\ndef get_item(game_id,item_id): \n with psycopg2.connect(content_db) as conn:\n cursor = conn.cursor()\n sqli_query = \"SELECT title, description FROM items WHERE game_id=? AND items_id=?\" \n cursor.execute(sqli_query, (game_id, item_id))\n result = cursor.fetchone()\n title = result[0]\n description = result[1]\n sqli_query = \"SELECT title FROM items_aliases WHERE item_id=?\" #notice here it's item_id and above it's items_id... I was erroneously inconsistent while creating the db\n cursor.execute(sqli_query, (item_id,))\n result = cursor.fetchall()\n aliases = []\n for r in result: \n aliases.append(r[0])\n sqli_query = \"SELECT attr_title, attr_value FROM items_attributes WHERE item_id=?\" #notice here it's item_id and above it's items_id... I was erroneously inconsistent while creating the db\n cursor.execute(sqli_query, (item_id,))\n result = cursor.fetchall()\n attributes = {}\n for row in result: \n attributes[row[0]] = row[1]\n\n\n return_json = {\"title\":title, \"id\":item_id, \"game_id\":game_id, \"description\":description, \"aliases\":aliases, \"attributes\":attributes} \n return jsonify(return_json), 200\n\n# 4.4 update item details\n@app.route('/game//item/', methods=['PUT'])\ndef updateItemDetails(game_id,item_id):\n\n content = request.get_json()\n title = content['title']\n description = content['description']\n aliases = content['aliases']#array\n attributes = content['attributes']#object\n\n with psycopg2.connect(content_db) as conn:\n cursor = conn.cursor()\n sqli_query = \"UPDATE items SET title=?, description=? WHERE items_id=?\"\n query = cursor.execute(sqli_query,(title, description,item_id))\n if not query:\n abort(409, \"Could not update\")\n #delete aliases and reinsert \n sqli_query = \"DELETE FROM items_aliases WHERE item_id=?\"\n query = cursor.execute(sqli_query,(item_id,))\n if not query:\n abort(409, \"Could not update\")\n for k in aliases:\n sqli_query = \"INSERT INTO items_aliases (item_id, title) VALUES (?,?)\"\n query = cursor.execute(sqli_query, (item_id, k,)) \n if not query:\n abort(409, \"Could not create item\")\n #delete old attribtues and insert new ones\n sqli_query = \"DELETE FROM items_attributes WHERE item_id=?\"\n query = cursor.execute(sqli_query,(item_id,))\n if not query:\n abort(409, \"Could not update\")\n # 3. loop through attributes object, insert items_attr. -> item_id, attr_title, attr_value\n for k in attributes:\n sqli_query = \"INSERT INTO items_attributes (item_id, attr_title, attr_value) VALUES (?,?,?)\"\n query = cursor.execute(sqli_query, (item_id, k, attributes[k])) \n if not query:\n abort(409, \"Could not create item\")\n\n if not query:\n abort(409, \"Could not update\")\n\n return get_item(game_id,item_id)\n\n#this method executes after every API request\n@app.after_request\ndef after_requestuest(response):\n return response\n\napp.debug = True\nhost = os.environ.get('OP', '0.0.0.0')\nport = int(os.environ.get('PORT', 8080))\napp.run(host=host, port=port)\n","sub_path":"route_config.py","file_name":"route_config.py","file_ext":"py","file_size_in_byte":8369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"155116127","text":"from copy import deepcopy\nfrom multiprocessing import Process, Queue\nfrom itertools import product\nimport sys, os\nimport numpy as np\nimport time\nimport argparse\n\n\nsys.path.append(os.path.abspath(\".\"))\n\n\ndef kwargs_to_cmd(kwargs):\n cmd = \"python main.py \"\n for flag, val in kwargs.items():\n cmd += f\"--{flag}={val} \"\n\n return cmd\n\n\ndef run_exp(gpu_num, in_queue):\n while not in_queue.empty():\n try:\n experiment = in_queue.get(timeout=3)\n except:\n return\n\n before = time.time()\n\n experiment[\"multigpu\"] = gpu_num\n print(f\"==> Starting experiment {kwargs_to_cmd(experiment)}\")\n os.system(kwargs_to_cmd(experiment))\n\n with open(\"output.txt\", \"a+\") as f:\n f.write(\n f\"Finished experiment {experiment} in {str((time.time() - before) / 60.0)}.\"\n )\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu-sets', default=0, type=lambda x: [a for a in x.split(\"|\") if a])\n parser.add_argument('--seeds', default=1, type=int)\n parser.add_argument('--data', default='~/data', type=str)\n args = parser.parse_args()\n\n gpus = args.gpu_sets\n seeds = list(range(args.seeds))\n data = args.data\n\n config = \"experiments/GG/splitcifar100/configs/rn18-separate-heads-randw.yaml\"\n log_dir = \"supermasks/runs/rn18-separate-heads-randw\"\n experiments = []\n\n for seed in seeds:\n kwargs = {\n \"config\": config,\n \"name\": f\"id=separate-heads~seed={seed}\",\n \"seed\": seed,\n \"log-dir\": log_dir,\n \"data\": data\n }\n\n experiments.append(kwargs)\n\n print(experiments)\n input(\"Press any key to continue...\")\n queue = Queue()\n\n for e in experiments:\n queue.put(e)\n\n processes = []\n for gpu in gpus:\n p = Process(target=run_exp, args=(gpu, queue))\n p.start()\n processes.append(p)\n\n for p in processes:\n p.join()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"experiments/GG/splitcifar100/rn18-separate-heads-randw.py","file_name":"rn18-separate-heads-randw.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"579682514","text":"import cv2\nimport numpy as np \nimport matplotlib.pyplot as plt \n\nimg = cv2.imread('lena.jpg')\nimg = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n\n#Homogenous filter : each output is mean of its kernal neighbour\nkernel = np.ones((5,5),np.float32)/25\ndst =cv2.filter2D(img,-1,kernel)#Homogenous filter\n\nblur = cv2.blur(img,(5,5)) # blur image\n\n#Gaussian Filter. Good for high freq noise\ngblur = cv2.GaussianBlur(img,(5,5),0)\n\n#Medain filter. Replace with averge. good for salt & pepper noise\nmedian = cv2.medianBlur(img,5)#Kernel must me odd\n\n#bilateral filter .Preserve edges\nbfilter = cv2.bilateralFilter(img,9,75,75)\n\ntitles = [ 'image','2d Convolution0','blur','GBlur','Median','bilateral']\nimages = [img,dst,blur,gblur,median,bfilter]\n\nfor i in range(6):\n plt.subplot(3,2,i+1)\n plt.imshow(images[i],'gray')\n plt.title(titles[i])\n plt.xticks([]),plt.yticks([])\n\nplt.show()","sub_path":"Codes/smoothing.py","file_name":"smoothing.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"298522263","text":"from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor\nfrom apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom flask import Flask\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom pytz import utc\n\n# Flask app:\napp = Flask(__name__, instance_relative_config=True)\napp.config.from_object('config')\napp.config.from_pyfile('config.py')\n\n# Data base:\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\n\n# Scheduler:\n@app.before_first_request\ndef add_scheduler():\n jobstores = {\n 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')\n }\n executors = {\n 'default': ThreadPoolExecutor(20),\n 'processpool': ProcessPoolExecutor(5)\n }\n job_defaults = {\n 'coalesce': False,\n 'max_instances': 3\n }\n scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)\n\n scheduler.start()\n\n scheduler.reschedule_job(job_id='covid_job', trigger='interval', minutes=app.config['REQUEST_INTERVAL'])\n scheduler.reschedule_job(job_id='currencies_job', trigger='interval', minutes=app.config['REQUEST_INTERVAL'])\n\n scheduler.print_jobs()\n\n\nfrom app import models, routes\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"141953400","text":"#\n# @lc app=leetcode.cn id=1703 lang=python3\n#\n# [1703] 得到连续 K 个 1 的最少相邻交换次数\n#\n# https://leetcode-cn.com/problems/minimum-adjacent-swaps-for-k-consecutive-ones/description/\n#\n# algorithms\n# Hard (39.19%)\n# Likes: 19\n# Dislikes: 0\n# Total Accepted: 868\n# Total Submissions: 2.2K\n# Testcase Example: '[1,0,0,1,0,1]\\n2'\n#\n# 给你一个整数数组 nums 和一个整数 k 。 nums 仅包含 0 和 1 。每一次移动,你可以选择 相邻 两个数字并将它们交换。\n# \n# 请你返回使 nums 中包含 k 个 连续 1 的 最少 交换次数。\n# \n# \n# \n# 示例 1:\n# \n# 输入:nums = [1,0,0,1,0,1], k = 2\n# 输出:1\n# 解释:在第一次操作时,nums 可以变成 [1,0,0,0,1,1] 得到连续两个 1 。\n# \n# \n# 示例 2:\n# \n# 输入:nums = [1,0,0,0,0,0,1,1], k = 3\n# 输出:5\n# 解释:通过 5 次操作,最左边的 1 可以移到右边直到 nums 变为 [0,0,0,0,0,1,1,1] 。\n# \n# \n# 示例 3:\n# \n# 输入:nums = [1,1,0,1], k = 2\n# 输出:0\n# 解释:nums 已经有连续 2 个 1 了。\n# \n# \n# \n# \n# 提示:\n# \n# \n# 1 <= nums.length <= 10^5\n# nums[i] 要么是 0 ,要么是 1 。\n# 1 <= k <= sum(nums)\n# \n# \n#\n\n# @lc code=start\nfrom typing import List\n\n\nclass Solution:\n def minMoves(self, nums: List[int], k: int) -> int:\n if k == 1:\n return 0\n n = len(nums)\n g = []\n sum = [0]\n count = -1\n for i in range(n):\n if nums[i] == 1:\n count += 1\n g.append(i - count)\n sum.append(sum[-1] + g[-1])\n m = len(g)\n ans = float('inf')\n for i in range(m - k + 1):\n mid = (i + i + k - 1) // 2\n q = g[mid]\n ans = min(ans, (2 * (mid - i) - k + 1) * q + (sum[i + k] - sum[mid + 1]) - (sum[mid]\n - sum[i]))\n return ans\n \n# @lc code=end\n\n","sub_path":"1703.得到连续-k-个-1-的最少相邻交换次数.py","file_name":"1703.得到连续-k-个-1-的最少相邻交换次数.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"95144675","text":"\"Based on a code from Firedrake\"\n\"Plus moving frame -vv*(phi_x-Delta(phi)_x)\"\n\"Half domain\"\n\"BL: continuous Galerkin\"\n\"rectangle domain\"\n\nfrom firedrake import *\nfrom ini_eta import *\nfrom ini_phi import *\nfrom trans import *\nfrom firedrake.petsc import PETSc\nimport numpy as np\nimport time\n\n\n\nop2.init()\nparameters[\"coffee\"][\"O2\"] = False\n\n# Timing\nt00 = time.time()\n\n\n\n\"\"\" ________________ Parameters ________________ \"\"\"\n\nepsilon = 0.05 # Small amplitude parameter\nep=epsilon\nmu = 0.0025 # Small dispersion parameter\n\nt=-200\ndt = 0.005 # Time step\ndt1=dt\n\n\"\"\" ___________________ Mesh ___________________ \"\"\"\nmesh = Mesh(\"dom9_h4.msh\") # Load the mesh file\n\ncoords = mesh.coordinates\n\n\n\"\"\" ______________ Function Space ______________ \"\"\"\nV = FunctionSpace(mesh, \"CG\", 1) # Vector space\n\n\"\"\" ___________ Define the functions ___________ \"\"\"\n\neta0 = Function(V, name=\"eta\")\nphi0 = Function(V, name=\"phi\")\neta1 = Function(V, name=\"eta_next\")\nphi1 = Function(V, name=\"phi_next\")\neta2 = Function(V, name=\"eta_trans\")\nphi2 = Function(V, name=\"phi_trans\")\n\nq0 = Function(V)\nq1 = Function(V)\nphi_h = Function(V)\nR_half = Function(V, name=\"eta_xx\")\nq_h = Function(V, name=\"-phi_xx\")\n\n\nq = TrialFunction(V)\nv = TestFunction(V)\n\nu1 = Function(V) # eta(n)\nu2 = Function(V) # eta(n)\nu3=Function(V)\nu4=Function(V)\nut=Function(V)\nuxt=Function(V)\nd1 = Function(V) # eta(n)\n\n\n\n\n\n\"\"\" _____________ Initial solution _____________ \"\"\"\n\n\n\ng=.25\nqq=(2/9)**(1/6)*g/np.sqrt(ep)\ndel1=0.001\nww=107/104\nM=qq/(1+2*del1+np.sqrt(ww))\nm=M*del1\nk1 = -(1+.5*np.sqrt(ww))*M-m\nk2 = -(.5*np.sqrt(ww))*M-m\nk3 = k2+m\nk4 = -k3\nk5 = -k2\nk6 = -k1\n\n# Expression of eta and phi\nx = SpatialCoordinate(mesh)\nxx= (x[0]-5)*(4.5)**(1/6)*(ep/mu)**(1/2)\nyy= x[1]*(4.5)**(1/3)*ep*(1/mu)**(1/2)\nt0 = Constant(t)\n\n\n\neta0=initial_eta(xx,yy,u1,u2,u3,u4,ut,uxt,d1,eta0,k1,k2,k3,k4,k5,k6,t,ep,mu)\nphi0=initial_phi(xx,yy,u1,d1,phi0,k1,k2,k3,k4,k5,k6,t,ep,mu)\n\n\n\n\n\n\n\"\"\" _____________ Weak formulations _____________ \"\"\"#,time=t\n\nFphi_h = ( v*(phi_h-phi0)/(0.5*dt) + 0.5*mu*inner(grad(v),grad((phi_h-phi0)/(0.5*dt)))\n + v*eta0 + 0.5*epsilon*inner(grad(phi_h),grad(phi_h))*v)*dx\nphi_problem_h = NonlinearVariationalProblem(Fphi_h,phi_h)\nphi_solver_h = NonlinearVariationalSolver(phi_problem_h)\n\n# followed by a calculation of a half-step solution :math:`q`, performed using a linear solver::\n\naq_h = v*q*dx\nLq_h = 2.0/3.0*inner(grad(v),grad(phi_h))*dx\n\n\nq_problem_h = LinearVariationalProblem(aq_h,Lq_h,q_h)\nq_solver_h = LinearVariationalSolver(q_problem_h)\n\n\n\n\n#\nFeta = ( v*(eta1-eta0)/dt + 0.5*mu*inner(grad(v),grad((eta1-eta0)/dt))\n - 0.5*((1+epsilon*eta0)+(1+epsilon*eta1))*inner(grad(v),grad(phi_h))\n - mu*inner(grad(v),grad(q_h)))*dx\neta_problem = NonlinearVariationalProblem(Feta,eta1)\neta_solver = NonlinearVariationalSolver(eta_problem)\n\n# and finally the second half-step (explicit this time) for the equation of :math:`\\phi` is performed and :math:`q` is computed for the updated solution::\n#\nFphi = ( v*(phi1-phi_h)/(0.5*dt) + 0.5*mu*inner(grad(v),grad((phi1-phi_h)/(0.5*dt)))\n + v*eta1 + 0.5*epsilon*inner(grad(phi_h),grad(phi_h))*v)*dx\n\nphi_problem = NonlinearVariationalProblem(Fphi,phi1)\nphi_solver = NonlinearVariationalSolver(phi_problem)\n\nLq = 2.0/3.0*inner(grad(v),grad(phi1))*dx\nq_problem = LinearVariationalProblem(aq_h,Lq,q1)\nq_solver = LinearVariationalSolver(q_problem)\n\nphi1.assign(phi0)\nq_solver.solve()\n\nE_data1 = np.zeros(1)\nE1 = assemble( (0.5*eta0**2+0.5*(1+epsilon*eta0)*abs(grad(phi0))**2\\\n +mu*(inner(grad(q1),grad(phi0)))+mu*( - 0.75*q1**2))*dx )\nE_data1[0]=E1\n \nPETSc.Sys.Print(t,E1) \n\"\"\" _____________ Time loop _____________ \"\"\"\n\noutput1 = File('data/data_tes/output.pvd')\noutput1.write(phi0, eta0,phi2, eta2, time=t)\n\n\n# We are now ready to enter the main time iteration loop::\nt1=t\nstep=int(0)\nT = int(200)\n\nddx=1/12\ntrans_gap=2\n \nwhile t < t1+T: \n t += dt \n # PETSc.Sys.Print(t)\n phi_solver_h.solve()\n q_solver_h.solve()\n eta_solver.solve()\n phi_solver.solve()\n q_solver.solve() \n \n eta0.assign(eta1)\n phi0.assign(phi1)\n step +=int(1)\n if step % 50 == 0: \n output1.write(phi0, eta0,phi2, eta2, time=t)\n if step % 400== 0: \n phi2=trans(coords,phi0,phi2,ddx,trans_gap)\n eta2=trans(coords,eta0,eta2,ddx,trans_gap)\n output1.write(phi0, eta0,phi2, eta2, time=t)\n eta0.assign(eta2)\n phi0.assign(phi2) \n PETSc.Sys.Print(t,E1) \n \n E1 = assemble( (0.5*eta0**2+0.5*(1+epsilon*eta0)*abs(grad(phi0))**2\\\n +mu*(inner(grad(q1),grad(phi0)))+mu*( - 0.75*q1**2))*dx )\n E_data1=np.r_[E_data1,[E1]] \n\n np.savetxt('data/data_tes/energy1.csv', E_data1)\n\n \n\n#Edata.close()\nprint(time.time() - t00) # Print computational time (s)\n","sub_path":"BenneyLuke3A/BL_main_code.py","file_name":"BL_main_code.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"477610996","text":"import sys\nimport asyncio\nimport json\nfrom utils import scan_json\nimport config\n\nloop = asyncio.get_event_loop()\n\nconn = None\nlog_last = 0\n\nstate = 'unknown'\nuptime = None\nmem_rss = None\nmem_vms = None\n\ndef format_uptime(t):\n t = int(t)\n seconds = t % 60\n t = t // 60\n minutes = t % 60\n t = t // 60\n hours = t % 24\n t = t // 24\n return '{} days {}:{}:{}'.format(t, hours, minutes, seconds)\n\ndef format_mem(m):\n if m < 1024: return str(m)\n m = m / 1024\n if m < 1024: return '{:.1f}K'.format(m)\n m = m / 1024\n if m < 1024: return '{:.1f}M'.format(m)\n m = m / 1024\n return '{:.1f}G'.format(m)\n\ndef request(**req):\n if not conn: return\n conn.write(json.dumps(req).encode())\n\nclass ClientProtocol(asyncio.Protocol):\n def connection_made(self, transport):\n global conn\n conn = transport\n request(method=\"set_property\", key=\"keep_connection\", value=True)\n request(method=\"set_property\", key=\"push_notifications\", value=True)\n request(method=\"refresh\", count=10)\n self.buffer = b'';\n print(\"connected\")\n\n def connection_lost(self, exc):\n loop.stop()\n print(\"disconnected\")\n\n def data_received(self, data):\n self.buffer += data\n while True:\n (res, been_read, error) = scan_json(self.buffer)\n if error:\n print(error)\n self.buffer = self.buffer[been_read:]\n if not res: break\n if res['status'] == 'ok':\n if 'log' in res:\n global log_last\n for ent in res['log']:\n sys.stdout.write(ent['message'])\n if ent['time'] > log_last: log_last = ent['time']\n if 'state' in res:\n global state, uptime, mem_rss, mem_vms\n state = res['state']\n uptime = res.get('uptime', None)\n mem_rss = res.get('mem_rss', None)\n mem_vms = res.get('mem_vms', None)\n else:\n print(\"error: \"+res['error'])\n\ndef keyboard_reader():\n line = sys.stdin.readline().strip()\n if conn:\n if len(line) == 0: return\n if line == ':help' or line == ':?':\n print('console commands:')\n print(':log, :refresh - send refresh request')\n print(':status, :state - show process state')\n return\n elif line == ':log' or line == ':refresh':\n request(method=\"refresh\", time=log_last)\n return\n elif line == ':status' or line == ':state':\n print('state = '+state)\n if (uptime): print('uptime = '+format_uptime(uptime))\n if (mem_rss): print('mem_rss = '+format_mem(mem_rss))\n if (mem_vms): print('mem_vms = '+format_mem(mem_vms))\n return\n if line[0] == ':':\n request(method=\"daemon_command\", command=line[1:])\n else:\n request(method=\"process_command\", command=line)\n\nloop.add_reader(sys.stdin, keyboard_reader)\ntry:\n loop.run_until_complete(\n loop.create_unix_connection(ClientProtocol, config.daemon_socket)\n );\nexcept FileNotFoundError:\n print(config.daemon_socket+\" doesn't exist\")\n exit()\n\ntry:\n loop.run_forever()\nexcept KeyboardInterrupt:\n print(\"\\nKeyboardInterrupt\")\n pass\n\nloop.close()","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"364786304","text":"import time\nimport IEM as iem\nimport pickle as pkl\n\n# Classes\nclass Paquete:\n \"\"\"Clase que define a el paquete de venta\"\"\"\n def __init__(self, nombre, destino, precio):\n self.nombre = nombre\n self.destino = destino\n self.precio = precio\n\n# Functions\n\ndef cargar_paquetes():\n try:\n paquetes = pkl.load(open(\"Paquetes.data\", \"rb\"))\n except FileNotFoundError:\n paquetes = []\n pkl.dump(paquetes,open(\"Paquetes.data\", \"wb\"))\n finally:\n return paquetes\n\n\ndef cargar_libro():\n try:\n libro = pkl.load(open(\"libro.data\", \"rb\"))\n except FileNotFoundError:\n libro = []\n pkl.dump(libro,open(\"libro.data\", \"wb\"))\n finally:\n return libro\n\n\ndef nuevo_paquete():\n nuevo_nombre = input(\"Como se llama el paquete? \")\n nuevo_destino = input(\"Cual es el destino del paquete? \")\n try:\n nuevo_precio = int(input(\"Cual es el precio del paquete? \"))\n except ValueError:\n print(\"El precio necesita ser un numero, intenta nuevamente...\")\n nuevo_precio = int(input(\"Precio: \"))\n pack = Paquete(nuevo_nombre, nuevo_destino, nuevo_precio)\n paquetes.append(pack)\n pkl.dump(paquetes, open(\"Paquetes.data\", \"wb\"))\n\n\ndef leer_paquetes():\n for pack in paquetes:\n print(\"Nombre: {}\\nDestino: {} \\nPrecio: {}\".format(pack.nombre, pack.destino, pack.precio))\n\n\npause = iem.pause\nclear = iem.cls\n\npaquetes = cargar_paquetes()\nlibro = cargar_libro()\n\nleer_paquetes()\n","sub_path":"EXECUTE.py","file_name":"EXECUTE.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"419539049","text":"import pandas as pd\nimport numpy as np\nimport re\n\n\"\"\"\nThis module aims at separating the different column types\n\"\"\"\n\nTEXT_THRESHOLD = 10\nCATS_THRESHOLD = 10\n\n\ndef types(df, verbose=True):\n text, tags, cats, boolean= [], [], [], []\n cols = df.select_dtypes(exclude=[np.number]).columns.values\n N = max(len(col) for col in cols)\n for col in df.select_dtypes(exclude=[np.number]).columns:\n if df[col].dtype == np.bool:\n l = 1\n else:\n l = df[col].astype(str).fillna('None').str.split().str.len().max()\n n = df[col].nunique()\n if n == 2:\n boolean.append(col)\n if verbose:print(col.ljust(N, ' '), '-> bool', f'(max words: {l}, unique: {n})')\n elif n < CATS_THRESHOLD:\n if verbose:print(col.ljust(N, ' '), '-> cats', f'(max words: {l}, unique: {n})')\n cats.append(col)\n else:\n if l < TEXT_THRESHOLD:\n if verbose:print(col.ljust(N, ' '), '-> tags', f'(max words: {l}, unique: {n})')\n tags.append(col)\n else:\n if verbose:print(col.ljust(N, ' '), '-> text', f'(max words: {l}, unique: {n})')\n text.append(col)\n\n return {\n 'num': list(df.select_dtypes(include=[np.number]).columns.values),\n 'text': text,\n 'tags': tags,\n 'cats': cats,\n 'bool': boolean\n }\n\nfirst_cap_re = re.compile('(.)([A-Z][a-z]+)')\nall_cap_re = re.compile('([a-z0-9])([A-Z])')\ndef to_snake_case(name):\n s1 = first_cap_re.sub(r'\\1_\\2', name)\n return all_cap_re.sub(r'\\1_\\2', s1).lower()\n\ndef downcast(df):\n d = df.copy()\n for col in df:\n n = d[col].nunique()\n if n < 2:\n d.drop(col,inplace=True,axis=1)\n continue\n if n == 2:\n if df[col].dtype == np.bool:\n continue\n a = df[col].iloc[0]\n d[col] = d[col] == a\n continue\n if d[col].dtype == np.int:\n d[col] = pd.to_numeric(d[col],downcast='integer')\n d[col] = pd.to_numeric(d[col],downcast='unsigned')\n continue\n if d[col].dtype == np.float:\n try:\n if (df[col]==df[col].astype(int)).all():\n d[col] = pd.to_numeric(d[col],downcast='integer')\n d[col] = pd.to_numeric(d[col],downcast='unsigned')\n except:\n pass\n continue\n if d[col].dtype == 'object':\n if 2 * n <= len(d.index) :\n d[col] = d[col].fillna('MISSING').astype('category')\n return d","sub_path":"utils/.ipynb_checkpoints/columns-checkpoint.py","file_name":"columns-checkpoint.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"509432103","text":"import heapq\nimport re\nimport sys\n\nimport math\n\n\ndef count_upper_decile(filename):\n pattern = re.compile(b\"(\\d+) usec\")\n count = 0\n\n with open(filename, 'rb') as f:\n for line in f:\n if line.startswith(b\"open\"):\n break\n\n for line in f:\n if line.startswith(b\"open\"):\n count += 1\n\n if count > 0:\n heap_size = int(math.ceil(count / 10))\n heap = [0] * heap_size\n\n f.seek(0)\n for line in f:\n if line.startswith(b\"open\"):\n break\n\n for line in f:\n if line.startswith(b\"open\"):\n value = int(pattern.search(line).group(1))\n if value > heap[0]:\n heapq.heappushpop(heap, value)\n return heap[0]\n else:\n return None\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Name of file was not found.\", file=sys.stderr)\n else:\n try:\n upper_decile = count_upper_decile(sys.argv[1])\n if upper_decile is None:\n print(\"Sample is empty\")\n else:\n print(\"upper decile = {}\".format(upper_decile))\n except Exception as e:\n print(e, file=sys.stderr)\n","sub_path":"problems-3/milyaev/task-2/task-2.py","file_name":"task-2.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"236964093","text":"\"\"\"\n Author: Prava Dhulipalla\n Retrive sentences from the text that have the candidate name(s) passed\n\n\"\"\"\n\nimport nltk\n\n# list of words for scoring positive/negative\n# NOTE: need to add more words\npos_words = [\n 'good',\n 'best',\n 'great',\n 'greatest'\n 'excellent',\n 'positive',\n 'tremendous',\n 'marvelous',\n 'amazing',\n 'wonderful',\n 'fabulous',\n 'truthful',\n 'decent',\n 'improve',\n 'intelligent',\n 'smart',\n 'honest'\n 'nice',\n 'fine',\n 'love',\n 'loves'\n ]\n\nneg_words = [\n 'bad',\n 'worse',\n 'mean',\n 'crooked',\n 'liar',\n 'fraud',\n 'stupid',\n 'nasty',\n 'ruin',\n 'destroy',\n 'racist',\n 'racists',\n 'bigot',\n 'bigots',\n 'extremist',\n 'extremists',\n 'terrorist',\n 'terrorists',\n 'negative',\n 'fringe',\n 'hate',\n 'hates',\n 'hatred',\n 'dishonest',\n 'bombastic',\n 'sad',\n 'sick',\n 'deplorable',\n 'deplorables'\n ]\n\n\nclass Quote:\n \"\"\"\n A quote and a positive/negative/neutral flag\n \"\"\"\n def __init__(self):\n self.text = ''\n self.tone = ''\n\n def __str__(self):\n return self.text + ' (' + self.tone + ')'\n\n\ndef get_quote(text):\n \"\"\"\n We use a list of positive and negative words to identify the tone.\n If positive word exists in the sentence, but not \"not or no\" + word, we add 1 to the score.\n If negative word exists in the sentence, but not \"not or no\" + word, we subtract 1 from the score.\n positive score indicates positive tone, negative score indicates negative tone and zero score indicates neutral\n \"\"\"\n articles = ['a', 'an', 'the']\n negatives = ['no', 'not']\n quote = Quote()\n quote.text = text\n text_words = nltk.word_tokenize(text)\n score = 0\n\n for index, word in enumerate(text_words):\n if word.lower() in pos_words:\n # logic to take care of phrases like \"not good\" or \"not a good person\"\n if ((index > 0 and text_words[index-1].lower() in negatives) or\n (index > 1 and text_words[index-1].lower() in articles and\n text_words[index-2].lower() in negatives)):\n score -= 1 # not positive = negative\n else:\n score += 1 # postive\n\n elif word.lower() in neg_words:\n # logic to take care of phrases like \"not bad\" or \"not a bad person\"\n if ((index > 0 and text_words[index-1].lower() in negatives) or\n (index > 1 and text_words[index-1].lower() in articles and\n text_words[index-2].lower() in negatives)):\n score += 1 # not negative = positive\n else:\n score -= 1 # negative\n\n if score > 0:\n quote.tone = 'positive'\n elif score < 0:\n quote.tone = 'negative'\n else:\n quote.tone = 'neutral'\n\n return quote\n\n\ndef get_sentences(text, names):\n \"\"\"\n Returns sentences from text that conains candidate names(s).\n names is a list of name variations of candindate like ['Hillary', 'Clinton', 'Hillary Clinton']\n or ['Donald', 'Trump', 'Donald J. Trump']\n \"\"\"\n\n sentences1 = nltk.sent_tokenize(text)\n sentences2 = []\n for sentence in sentences1:\n if any(name in sentence for name in names):\n sentences2.append(sentence)\n\n return sentences2\n\n\n\n\nif __name__ == \"__main__\":\n names = ['William', 'Derksen', 'Will']\n text = \"I told Will to shut up. Will got hacked. Will doesn't know how to listen to directions. Kill Will.\"\n s = get_sentences(text, names)\n for x in s:\n print(get_quote(x))\n","sub_path":"phrase_extractor.py","file_name":"phrase_extractor.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"526798018","text":"import logging\nfrom logging.handlers import TimedRotatingFileHandler\nimport os\nimport json\nfrom collections import OrderedDict\nfrom datetime import datetime\n\nfrom .mylog import log\n\nCHECKER_LOG_DIR = \"/logs\"\nCHECKER_STATUS_DIR = \"/status\"\n\ndef get_checker_log_dir():\n return CHECKER_LOG_DIR\n\ndef get_checker_status_dir():\n return CHECKER_STATUS_DIR\n\n_checker_loggers = dict()\n\ndef get_checker_logger(checker_id):\n if checker_id in _checker_loggers:\n return _checker_loggers[checker_id]\n checker_logger = logging.getLogger(checker_id)\n checker_logger.setLevel(logging.INFO)\n checker_logger.propagate = False\n handler = TimedRotatingFileHandler(\n filename=os.path.join(get_checker_log_dir(),\n \"{0:s}.log\".format(checker_id)),\n when=\"midnight\",\n backupCount=10,\n utc=True)\n handler.setFormatter(logging.Formatter(\"%(message)s\"))\n checker_logger.addHandler(handler)\n _checker_loggers[checker_id] = checker_logger\n return checker_logger\n\ndef get_checker_writer(checker_id):\n return open(os.path.join(get_checker_status_dir(),\n \"{0:s}.json\".format(checker_id)),\n \"w\")\n\ndef _checker_result_record(checker_id, checker_result):\n return OrderedDict([\n (\"timestamp\", datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")),\n (\"checker_id\", checker_id),\n (\"result\", checker_result._asdict())\n ])\n\ndef write_checker_log(checker_id, checker_result):\n logger = get_checker_logger(checker_id)\n logger.info(json.dumps(_checker_result_record(checker_id, checker_result)))\n\ndef write_checker_status(checker_id, checker_result):\n with get_checker_writer(checker_id) as w:\n w.write(json.dumps(\n _checker_result_record(checker_id, checker_result),\n indent=2\n ))\n w.write('\\n')\n w.flush()\n os.fsync(w.fileno())\n\n_checker_memlog = dict()\n\ndef write_checker_memlog(checker_id, checker_result):\n _checker_memlog[checker_id] = checker_result\n\ndef get_checker_memlog(checker_id):\n if not checker_id in _checker_memlog:\n log.debug(\"Memlog for checker %s not found.\", checker_id)\n return _checker_memlog.get(checker_id)\n\ndef log_checker_result(checker_id, checker_result):\n write_checker_memlog(checker_id, checker_result)\n write_checker_status(checker_id, checker_result)\n write_checker_log(checker_id, checker_result)\n","sub_path":"src/checker/checkerlog.py","file_name":"checkerlog.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"147222356","text":"# -*- coding:utf-8 -*-\nimport os.path\nfrom selenium import webdriver\nfrom common import logger\nfrom config import config_info\n\nlog = logger.Logger(logger=\"BrowserEngine\").getlog()\n\nclass BrowserEngine(object):\n '''\n 3.os.path.dirname(path)\n 返回path的目录。其实就是os.path.split(path)的第一个元素。\n dir = F:\\web_automation\n '''\n dir = os.path.dirname(os.path.abspath('.'))\n chrome_driver_path = dir + '/tools/chromedriver.exe'\n firefox_driver_path = dir + '/tools/geckodriver.exe'\n ie_driver_path = dir + '/tools/IEDriverServer.exe'\n\n def __init__(self,driver):\n self.driver = driver\n\n def open_browser(self):\n browser = config_info.GetConfigData().getconfigdata()['browsername']\n url = config_info.GetConfigData().getconfigdata()['URL']\n\n\n if browser == 'Chrome':\n # self.driver = webdriver.Chrome()\n self.driver = webdriver.Chrome(self.chrome_driver_path)\n log.info(\"Starting chrome browser.\")\n elif browser == 'Firefox':\n self.driver = webdriver.Firefox(self.firefox_driver_path)\n log.info(\"Starting firefox browser.\")\n elif browser == 'PhantomJS':\n self.driver = webdriver.PhantomJS()\n log.info(\"Starting PhantomJS browser.\")\n elif browser == 'Ie':\n self.driver = webdriver.Ie(self.ie_driver_path)\n log.info(\"Starting ie browser.\")\n\n self.driver.get(url)\n log.info(\"Open url: %s\" % url)\n self.driver.maximize_window()\n log.info(\"Maximize the current window.\")\n self.driver.implicitly_wait(10)\n log.info(\"Set implicitly wait 10 seconds.\")\n return self.driver\n\nif __name__ == '__main__':\n browser = config_info.GetConfigData().getconfigdata()['browsername']\n url = config_info.GetConfigData().getconfigdata()['URL']\n print(browser)\n print(url)","sub_path":"common/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"120242608","text":"\"\"\"empty message\n\nRevision ID: 68ac6fb1c086\nRevises: 9f2abdd7a490\nCreate Date: 2018-06-25 12:49:54.650669\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '68ac6fb1c086'\ndown_revision = '9f2abdd7a490'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('department_tree',\n sa.Column('id', mysql.BIGINT(unsigned=True), nullable=False),\n sa.Column('department_id', mysql.BIGINT(unsigned=True), nullable=False),\n sa.Column('parent_id', mysql.BIGINT(unsigned=True), nullable=False),\n sa.Column('depth', mysql.BIGINT(unsigned=True), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n mysql_charset='utf8mb4',\n mysql_engine='InnoDB'\n )\n op.create_index('idx_department_tree_d', 'department_tree', ['department_id'], unique=False)\n op.create_index('idx_department_tree_p', 'department_tree', ['parent_id'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('idx_department_tree_p', table_name='department_tree')\n op.drop_index('idx_department_tree_d', table_name='department_tree')\n op.drop_table('department_tree')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/68ac6fb1c086_.py","file_name":"68ac6fb1c086_.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"608442794","text":"import numpy as np\r\nimport tensorflow as tf\r\nfrom dataset import read_libsvm, Dataset\r\nfrom softmax_model import placeholder, variables, inferDense, loss_train, calAcc\r\n\r\n\r\ndef train_save(ds: Dataset, t_labs: np.ndarray, t_data: np.ndarray, num_feats: int, num_class: int):\r\n x_data, labels = placeholder(num_feats, num_class)\r\n weight, bias, global_steps = variables(num_feats, num_class)\r\n logits, predict = inferDense(x_data, weight, bias)\r\n acc = calAcc(labels, predict)\r\n train_op, loss = loss_train(labels, logits, global_steps)\r\n tf.summary.scalar('acc', acc)\r\n tf.summary.scalar('loss', loss)\r\n init = tf.global_variables_initializer()\r\n\r\n merged = tf.summary.merge_all()\r\n with tf.Session() as sess:\r\n sess.run(init)\r\n writer = tf.summary.FileWriter('logs', sess.graph)\r\n\r\n for y_batch, x_batch in ds:\r\n _, gs = sess.run(fetches=[train_op, global_steps], feed_dict={\r\n x_data: x_batch, labels: y_batch\r\n })\r\n\r\n if gs % 200 == 0:\r\n acc_val, summary = sess.run(fetches=[acc, merged], feed_dict={\r\n x_data: t_data, labels: t_labs\r\n })\r\n print(type(summary))\r\n writer.add_summary(summary, gs)\r\n\r\n print(acc_val)\r\n writer.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n n_feats, batch_size, n_class = 180, 128, 3\r\n tr_data = read_libsvm(\"data/dna.scale.tr\")\r\n test_labs, test_data = read_libsvm(\"data/dna.scale.t\", one_hot=True)\r\n train_ds = Dataset(tr_data, nclass=n_class, max_epoch=1000, batch_size=batch_size)\r\n\r\n train_save(train_ds, test_labs, test_data, num_feats=n_feats, num_class=n_class)\r\n","sub_path":"pycode/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"217954981","text":"#!/usr/bin/python3\n\nimport argparse\nimport collections\nimport functools\nimport itertools\nimport re\nimport sys\nimport typing\n\n\ndef load_file(filename):\n contents = []\n with open(filename, 'r') as f:\n for line in f:\n line = line.strip() # remove trailing newline\n contents.append(line)\n return contents\n\n\ndef load_groups(filename):\n # because some files follow this format instead.\n contents = []\n acc = [] \n for line in load_file(filename):\n if line:\n acc.append(line)\n else:\n contents.append(acc)\n acc = []\n\n if acc:\n contents.append(acc)\n\n return contents\n\n\ndef any_item(iterable, default=None):\n for i in iterable:\n return i\n return default\n\n\n# re-implement itertools.product to better handle our use case\n# only handles length \ndef specialized_product(*args):\n l = len(args)\n assert l == 1 or l == 2\n if l == 1:\n for a in args[0]:\n yield a\n return\n for a in args[0]:\n for b in args[1]:\n yield a + b\n\n\ndef grouper(string, n):\n l = len(string)\n assert l % n == 0\n out = []\n for i in range(l // n):\n offset = i * n\n sub = string[offset:offset + n]\n out.append(sub)\n return out\n\n\nclass MessageParser:\n @staticmethod\n def __rule_match(line):\n s = line.split(\" \")\n return tuple([int(i) for i in line.split(\" \")])\n\n @staticmethod\n def from_lines(lines):\n # the dict has the following structure:\n # [int] = list(value)\n # value = str | tuple(int+)\n rules = {}\n for line in lines:\n key, match = line.split(\": \")\n key = int(key)\n if \"\\\"\" in match:\n rules[key] = [match[1:-1]]\n elif \"|\" in match:\n m1, m2 = match.split(\" | \")\n rules[key] = [MessageParser.__rule_match(m1), MessageParser.__rule_match(m2)]\n else:\n rules[key] = [MessageParser.__rule_match(match)]\n\n return MessageParser(rules)\n\n def __init__(self, rules):\n self.rules = rules\n self.matches = None\n\n def __repr__(self):\n return \"MessageParser({})\".format(repr(self.rules))\n\n def __matching(cache, rules, num):\n if num in cache:\n return cache[num]\n\n # generate all the matches for this rule.\n assert type(num) == int\n #print(\"generating\", num, flush=True)\n\n rule = rules[num]\n\n # every rule is a list of matching things.\n matches = []\n for i, subrule in enumerate(rule):\n if type(subrule) == str:\n #print(\"str match\", \"{}.{}\".format(num, i), subrule, flush=True)\n matches.append(subrule)\n else:\n submatches = [MessageParser.__matching(cache, rules, r) for r in subrule]\n #print(\"submatch\", \"{}.{}\".format(num, i), submatches, flush=True)\n expanded = specialized_product(*submatches)\n #print(\"expanded\", \"{}.{}\".format(num, i), expanded, flush=True)\n matches.extend(expanded)\n\n #print(\"generated\", num, \"value\", matches, \"from\", rules[num], flush=True)\n cache[num] = matches\n return cache[num]\n\n def __gen_matches(self):\n # generate all matches.\n cache = {}\n assert self.rules[0] == [(8, 11)]\n assert self.rules[8] == [(42,)]\n assert self.rules[11] == [(42, 31)]\n # This evaluates almost everything, and these rules are needed later.\n MessageParser.__matching(cache, self.rules, 31)\n MessageParser.__matching(cache, self.rules, 42)\n # convert rules 42 and 31 into sets, because we perform a bunch of contains\n # checks with those in particular.\n cache[42] = frozenset(cache[42])\n cache[31] = frozenset(cache[31])\n\n matches = None\n if False:\n # Generating this takes a considerable amount of time, that's not worth it.\n #\n # 1) generating all the values for rule 0 is slow: ~330ms\n # 2) converting the list to a set is slow: ~550ms\n #\n # Lookups in the set aren't slow, but amortized over construction time, it's not really worth it.\n #\n # Using the optimized 3 set lookup, we avoid ~880ms of overhead. See MessageParser.part1_match\n #\n # aside) not converting to a set and doing lookups is really slow: ~8s\n\n MessageParser.__matching(cache, self.rules, 0)\n matches = frozenset(cache[0])\n #matches = cache[0]\n # remove rules 0, 8, and 11 from the cache. They are changed in part 2\n # and we don't want to accidentally rely on them for part 1.\n del cache[0]\n del cache[8]\n del cache[11]\n\n # We take advantage of this property as well.\n # check it only once.\n p = len(any_item(cache[42]))\n for r in cache[42]:\n assert p == len(r)\n for r in cache[31]:\n assert p == len(r)\n\n # making the matching values into a set speeds up all contains checks.\n # 200% worth it.\n return (cache, matches)\n\n def gen_cache(self):\n assert self.rules[0] == [(8, 11)]\n assert self.rules[8] == [(42,)]\n assert self.rules[11] == [(42, 31)]\n\n if self.matches is None:\n self.matches = self.__gen_matches()\n\n def chunk_size(self):\n self.gen_cache()\n rule42 = self.matches[0][42]\n return len(any_item(rule42))\n\n def count(self):\n self.gen_cache()\n c42 = self.matches[0][42]\n c31 = self.matches[0][31]\n return len(c42) * len(c42) * len(c31)\n\n def part1_match(self, value):\n # This strategy is _much_ faster than evaluating all the strings that match rule 0\n # and checking if the string we are passed is in that set.\n self.gen_cache()\n\n # were going to abuse the rule structure, \n # make sure it matches our expectations.\n assert self.rules[0] == [(8, 11)]\n assert self.rules[8] == [(42,)]\n assert self.rules[11] == [(42, 31)]\n rule31 = self.matches[0][31]\n rule42 = self.matches[0][42]\n\n chunk_size = self.chunk_size()\n if len(value) % chunk_size != 0 or len(value) // chunk_size != 3:\n return False\n\n chunked = grouper(value, chunk_size)\n\n # if you noticed above:\n # rule 0 = 8 11\n # = 42 42 31\n return chunked[0] in rule42 and chunked[1] in rule42 and chunked[2] in rule31\n\n def part2_match(self, value):\n self.gen_cache()\n rule42 = self.matches[0][42]\n rule31 = self.matches[0][31]\n\n # They have overlap.\n #assert len(rule42.union(rule31)) == 0\n\n # The new 8 & 11 rules:\n #\n # 8: 42 | 42 8\n # 11: 42 31 | 42 11 31\n #\n # equivalent (ish):\n #\n # 8 can be seen as: 42 +\n # 11 is approximately matching brackets\n #\n # this makes rule 0:\n # 42 + 42 {n} 31 {n}\n\n # since everything in rule42 and rule31 has the same length we can chunk the input. :)\n chunk_size = self.chunk_size()\n if len(value) % chunk_size != 0:\n return False\n\n chunked = grouper(value, chunk_size)\n #print(\"chunked\", chunked)\n\n # recall we are trying to match:\n #\n # 42+ 42{n} 31{n}\n #\n # Rule 11 looks like bracket matching, but because we have rule 8\n # we can greedily match rule 42, and once we stop matching it, the rest\n # must match rule 31. However (!) we must make sure that we have more\n # matches against rule 42 than 31, because rule 8 requires at least 1\n # match.\n #\n # Therefore we end up with:\n #\n # 42{x} 31{y} ; x > y\n\n i = 0 # for \n for i in range(len(chunked)):\n # i isn't shadowed, it updates\n if chunked[i] not in rule42:\n break\n\n count42 = i\n\n for i in range(i, len(chunked)):\n if chunked[i] not in rule31:\n return False\n\n return count42 > len(chunked) - count42\n\n\ndef parseit(things):\n return (MessageParser.from_lines(things[0]), things[1])\n\n\ndef part1(things):\n parser, messages = things\n\n count = 0\n for m in messages:\n #print(\"checking\", m)\n if parser.part1_match(m):\n #print(\"matches!\")\n count += 1\n\n return count\n\n\ndef part2(things):\n parser, messages = things\n\n # part2 changes 2 rules at the root of matching stuff.\n print(\"42:\", len(parser.matches[0][42]))\n print(\"31:\", len(parser.matches[0][31]))\n\n count = 0\n for m in messages:\n #print(\"checking\", m)\n if parser.part2_match(m):\n #print(\"matches!\")\n count += 1\n\n return count\n\n\ndef main(filename):\n things = parseit(load_groups(filename))\n\n #print(things[0])\n print(\"matching strings (part 1):\", things[0].count())\n print()\n\n print(\"part 1:\", part1(things))\n print(\"part 2:\", part2(things))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('input', nargs='?', default='/dev/stdin')\n\n args = parser.parse_args(sys.argv[1:])\n main(args.input)\n","sub_path":"2020/day-19/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"33897516","text":"#! /usr/bin/env python\n\nimport os\nimport unittest\nfrom types import GeneratorType\n\nfrom grader import Grader, points\nfrom hw0 import case_sarcastically, detokenize, gen_sentences\n\n\nclass TestGenSentences(unittest.TestCase):\n @points(5)\n def test_type(self) -> None:\n \"\"\"Test that a generator is returned.\"\"\"\n gen = gen_sentences(os.path.join(\"test_data\", \"hw0_tokenized_text_1.txt\"))\n self.assertEqual(type(gen), GeneratorType)\n\n @points(10)\n def test_basic(self) -> None:\n \"\"\"Test reading a basic file.\"\"\"\n gen = gen_sentences(os.path.join(\"test_data\", \"hw0_tokenized_text_1.txt\"))\n self.assertEqual(\n next(gen), [\"Tokenized\", \"text\", \"is\", \"easy\", \"to\", \"work\", \"with\", \".\"]\n )\n self.assertEqual(\n next(gen), [\"Writing\", \"a\", \"tokenizer\", \"is\", \"a\", \"pain\", \".\"]\n )\n with self.assertRaises(StopIteration):\n next(gen)\n\n @points(10)\n def test_advanced(self) -> None:\n \"\"\"Test reading a more complex file.\"\"\"\n gen = gen_sentences(os.path.join(\"test_data\", \"hw0_tokenized_text_2.txt\"))\n self.assertEqual(next(gen), [\"Hello\", \",\", \"world\", \"!\"])\n # Between these sentences, there is a line in the file with a single space,\n # which should be skipped over.\n self.assertEqual(next(gen), [\"This\", \"is\", \"a\", \"normal\", \"sentence\", \".\"])\n self.assertEqual(\n next(gen),\n [\n '\"',\n \"I\",\n \"don't\",\n \"like\",\n \"it\",\n \"when\",\n \"there's\",\n \"too\",\n \"much\",\n \"punctuation\",\n \"!\",\n '\"',\n \",\",\n \"they\",\n \"exclaimed\",\n \".\",\n ],\n )\n with self.assertRaises(StopIteration):\n next(gen)\n\n\nclass TestDetokenize(unittest.TestCase):\n @points(10)\n def test_simple(self) -> None:\n \"\"\"Test a simple sentence.\"\"\"\n self.assertEqual(detokenize([\"Hello\", \",\", \"world\", \"!\"]), \"Hello, world!\")\n\n @points(5)\n def test_quotes1(self) -> None:\n \"\"\"Test how quotes are handled.\"\"\"\n self.assertEqual(\n detokenize(\n [\n '\"',\n \"I\",\n \"don't\",\n \"know\",\n \"what\",\n \"NLP\",\n \"is\",\n \",\",\n '\"',\n \"he\",\n \"said.\",\n ]\n ),\n '\"I don\\'t know what NLP is,\" he said.',\n )\n self.assertEqual(\n detokenize(\n ['\"', \"Too\", \"much\", \"punctuation\", \"!\", '\"', \"they\", \"exclaimed\", \".\"]\n ),\n '\"Too much punctuation!\" they exclaimed.',\n )\n\n @points(5)\n def test_quotes2(self) -> None:\n \"\"\"Test how quotes are handled.\"\"\"\n self.assertEqual(\n detokenize(\n [\n \"She\",\n \"said\",\n \",\",\n '\"',\n \"I\",\n \"don't\",\n \"like\",\n \"punctuation\",\n \",\",\n \"do\",\n \"you\",\n \"?\",\n '\"',\n ]\n ),\n 'She said, \"I don\\'t like punctuation, do you?\"',\n )\n\n @points(5)\n def test_strange_punc(self) -> None:\n \"\"\"Test unusual punctuation.\"\"\"\n self.assertEqual(\n detokenize(\n [\n \"Punctuation\",\n \"can\",\n \"surprise\",\n \"you\",\n \";\",\n \"no\",\n \"one\",\n \"expects\",\n \"the\",\n \"interrobang\",\n \"‽\",\n ]\n ),\n \"Punctuation can surprise you; no one expects the interrobang‽\",\n )\n\n @points(5)\n def test_em_dash(self) -> None:\n \"\"\"Test em dash.\"\"\"\n self.assertEqual(\n detokenize(\n [\n \"The\",\n \"em\",\n \"dash\",\n \"—\",\n \"one\",\n \"of\",\n \"my\",\n \"favorite\",\n \"characters\",\n \"—\",\n \"is\",\n \"often\",\n \"mistaken\",\n \"for\",\n \"the\",\n \"en\",\n \"dash\",\n \".\",\n ]\n ),\n \"The em dash—one of my favorite characters—is often mistaken for the en dash.\",\n )\n\n\nclass TestSarcasticCaser(unittest.TestCase):\n @points(5)\n def test_no_punc(self) -> None:\n \"\"\"Test basic text.\"\"\"\n assert case_sarcastically(\"hello\") == \"hElLo\"\n\n @points(10)\n def test_punc1(self) -> None:\n \"\"\"Test how punctuation is handled.\"\"\"\n assert case_sarcastically(\"hello, friend!\") == \"hElLo, FrIeNd!\"\n\n @points(10)\n def test_punc2(self) -> None:\n \"\"\"Test how punctuation is handled.\"\"\"\n assert case_sarcastically('Say \"hello,\" friend‽') == 'sAy \"HeLlO,\" fRiEnD‽'\n\n\ndef main() -> None:\n tests = [\n TestGenSentences,\n TestDetokenize,\n TestSarcasticCaser,\n ]\n grader = Grader(tests)\n grader.print_results()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cosi114_hw0_rachel_peng/test_hw0.py","file_name":"test_hw0.py","file_ext":"py","file_size_in_byte":5679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"318147878","text":"import aiohttp\nimport asyncio\nimport uvicorn\nfrom fastai import *\nfrom fastai.vision import *\nfrom io import BytesIO\nfrom starlette.applications import Starlette\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.responses import HTMLResponse, JSONResponse\nfrom starlette.staticfiles import StaticFiles\n\nexport_file_url = 'https://drive.google.com/uc?export=download&id=10RBKTfg2vlqJBp3Mrklqd1ZMot5al50J'\nexport_file_name = 'export.pkl'\n\nclasses = ['ALBATROSS',\n 'ALEXANDRINE PARAKEET',\n 'AMERICAN BITTERN',\n 'AMERICAN GOLDFINCH',\n 'AMERICAN KESTREL',\n 'AMERICAN REDSTART',\n 'ANHINGA',\n 'ANNAS HUMMINGBIRD',\n 'BALD EAGLE',\n 'BALTIMORE ORIOLE',\n 'BANANAQUIT',\n 'BAR-TAILED GODWIT',\n 'BARN OWL',\n 'BARN SWALLOW',\n 'BAY-BREASTED WARBLER',\n 'BELTED KINGFISHER',\n 'BIRD OF PARADISE',\n 'BLACK FRANCOLIN',\n 'BLACK SKIMMER',\n 'BLACK-CAPPED CHICKADEE',\n 'BLACK-NECKED GREBE',\n 'BLACKBURNIAM WARBLER',\n 'BLUE HERON',\n 'BOBOLINK',\n 'BROWN THRASHER',\n 'CACTUS WREN',\n 'CALIFORNIA CONDOR',\n 'CALIFORNIA GULL',\n 'CALIFORNIA QUAIL',\n 'CAPE MAY WARBLER',\n 'CHARA DE COLLAR',\n 'CHIPPING SPARROW',\n 'CINNAMON TEAL',\n 'COCK OF THE ROCK',\n 'COCKATOO',\n 'COMMON LOON',\n 'COMMON POORWILL',\n 'COMMON STARLING',\n 'COUCHS KINGBIRD',\n 'CRESTED AUKLET',\n 'CRESTED CARACARA',\n 'CROW',\n 'CROWNED PIGEON',\n 'CURL CRESTED ARACURI',\n 'DARK EYED JUNCO',\n 'DOWNY WOODPECKER',\n 'EASTERN BLUEBIRD',\n 'EASTERN ROSELLA',\n 'EASTERN TOWEE',\n 'ELEGANT TROGON',\n 'EMPEROR PENGUIN',\n 'EVENING GROSBEAK',\n 'FLAME TANAGER',\n 'FLAMINGO',\n 'FRIGATE',\n 'GLOSSY IBIS',\n 'GOLD WING WARBLER',\n 'GOLDEN CHLOROPHONIA',\n 'GOLDEN EAGLE',\n 'GOLDEN PHEASANT',\n 'GOULDIAN FINCH',\n 'GRAY CATBIRD',\n 'GRAY PARTRIDGE',\n 'GREY PLOVER',\n 'HAWAIIAN GOOSE',\n 'HOODED MERGANSER',\n 'HOOPOES',\n 'HOUSE FINCH',\n 'HOUSE SPARROW',\n 'HYACINTH MACAW',\n 'INDIGO BUNTING',\n 'JABIRU',\n 'LARK BUNTING',\n 'LILAC ROLLER',\n 'LONG-EARED OWL',\n 'MALLARD DUCK',\n 'MANDRIN DUCK',\n 'MARABOU STORK',\n 'MOURNING DOVE',\n 'MYNA',\n 'NICOBAR PIGEON',\n 'NORTHERN CARDINAL',\n 'NORTHERN FLICKER',\n 'NORTHERN GOSHAWK',\n 'NORTHERN MOCKINGBIRD',\n 'OSTRICH',\n 'PAINTED BUNTIG',\n 'PARADISE TANAGER',\n 'PARUS MAJOR',\n 'PEACOCK',\n 'PELICAN',\n 'PEREGRINE FALCON',\n 'PINK ROBIN',\n 'PUFFIN',\n 'PURPLE FINCH',\n 'PURPLE GALLINULE',\n 'PURPLE MARTIN',\n 'QUETZAL',\n 'RAINBOW LORIKEET',\n 'RED FACED CORMORANT',\n 'RED HEADED WOODPECKER',\n 'RED THROATED BEE EATER',\n 'RED WINGED BLACKBIRD',\n 'RED WISKERED BULBUL',\n 'RING-NECKED PHEASANT',\n 'ROADRUNNER',\n 'ROBIN',\n 'ROUGH LEG BUZZARD',\n 'RUBY THROATED HUMMINGBIRD',\n 'SAND MARTIN',\n 'SCARLET IBIS',\n 'SCARLET MACAW',\n 'SNOWY EGRET',\n 'SPLENDID WREN',\n 'STORK BILLED KINGFISHER',\n 'STRAWBERRY FINCH',\n 'TEAL DUCK',\n 'TIT MOUSE',\n 'TOUCHAN',\n 'TRUMPTER SWAN',\n 'TURKEY VULTURE',\n 'TURQUOISE MOTMOT',\n 'VARIED THRUSH',\n 'VENEZUELIAN TROUPIAL',\n 'VERMILION FLYCATHER',\n 'VIOLET GREEN SWALLOW',\n 'WESTERN MEADOWLARK',\n 'WILSONS BIRD OF PARADISE',\n 'WOOD DUCK',\n 'YELLOW HEADED BLACKBIRD']\npath = Path(__file__).parent\n\napp = Starlette()\napp.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])\napp.mount('/static', StaticFiles(directory='app/static'))\n\n\nasync def download_file(url, dest):\n if dest.exists(): return\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n data = await response.read()\n with open(dest, 'wb') as f:\n f.write(data)\n\n\nasync def setup_learner():\n await download_file(export_file_url, path / export_file_name)\n try:\n learn = load_learner(path, export_file_name)\n return learn\n except RuntimeError as e:\n if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:\n print(e)\n message = \"\\n\\nThis model was trained with an old version of fastai and will not work in a CPU environment.\\n\\nPlease update the fastai library in your training environment and export your model again.\\n\\nSee instructions for 'Returning to work' at https://course.fast.ai.\"\n raise RuntimeError(message)\n else:\n raise\n\n\nloop = asyncio.get_event_loop()\ntasks = [asyncio.ensure_future(setup_learner())]\nlearn = loop.run_until_complete(asyncio.gather(*tasks))[0]\nloop.close()\n\n\n@app.route('/')\nasync def homepage(request):\n html_file = path / 'view' / 'index.html'\n return HTMLResponse(html_file.open().read())\n\n\n@app.route('/analyze', methods=['POST'])\nasync def analyze(request):\n img_data = await request.form()\n img_bytes = await (img_data['file'].read())\n img = open_image(BytesIO(img_bytes))\n prediction = learn.predict(img)[0]\n return JSONResponse({'result': str(prediction)})\n\n\nif __name__ == '__main__':\n if 'serve' in sys.argv:\n uvicorn.run(app=app, host='0.0.0.0', port=5000, log_level=\"info\")\n","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"403100743","text":"\n# coding: utf-8\n\n# # Assignment 4\n# \n# Before working on this assignment please read these instructions fully. In the submission area, you will notice that you can click the link to **Preview the Grading** for each step of the assignment. This is the criteria that will be used for peer grading. Please familiarize yourself with the criteria before beginning the assignment.\n# \n# This assignment requires that you to find **at least** two datasets on the web which are related, and that you visualize these datasets to answer a question with the broad topic of **economic activity or measures** (see below) for the region of **Elst, Provincie Gelderland, Netherlands**, or **Netherlands** more broadly.\n# \n# You can merge these datasets with data from different regions if you like! For instance, you might want to compare **Elst, Provincie Gelderland, Netherlands** to Ann Arbor, USA. In that case at least one source file must be about **Elst, Provincie Gelderland, Netherlands**.\n# \n# You are welcome to choose datasets at your discretion, but keep in mind **they will be shared with your peers**, so choose appropriate datasets. Sensitive, confidential, illicit, and proprietary materials are not good choices for datasets for this assignment. You are welcome to upload datasets of your own as well, and link to them using a third party repository such as github, bitbucket, pastebin, etc. Please be aware of the Coursera terms of service with respect to intellectual property.\n# \n# Also, you are welcome to preserve data in its original language, but for the purposes of grading you should provide english translations. You are welcome to provide multiple visuals in different languages if you would like!\n# \n# As this assignment is for the whole course, you must incorporate principles discussed in the first week, such as having as high data-ink ratio (Tufte) and aligning with Cairo’s principles of truth, beauty, function, and insight.\n# \n# Here are the assignment instructions:\n# \n# * State the region and the domain category that your data sets are about (e.g., **Elst, Provincie Gelderland, Netherlands** and **economic activity or measures**).\n# * You must state a question about the domain category and region that you identified as being interesting.\n# * You must provide at least two links to available datasets. These could be links to files such as CSV or Excel files, or links to websites which might have data in tabular form, such as Wikipedia pages.\n# * You must upload an image which addresses the research question you stated. In addition to addressing the question, this visual should follow Cairo's principles of truthfulness, functionality, beauty, and insightfulness.\n# * You must contribute a short (1-2 paragraph) written justification of how your visualization addresses your stated research question.\n# \n# What do we mean by **economic activity or measures**? For this category you might look at the inputs or outputs to the given economy, or major changes in the economy compared to other regions.\n# \n# ## Tips\n# * Wikipedia is an excellent source of data, and I strongly encourage you to explore it for new data sources.\n# * Many governments run open data initiatives at the city, region, and country levels, and these are wonderful resources for localized data sources.\n# * Several international agencies, such as the [United Nations](http://data.un.org/), the [World Bank](http://data.worldbank.org/), the [Global Open Data Index](http://index.okfn.org/place/) are other great places to look for data.\n# * This assignment requires you to convert and clean datafiles. Check out the discussion forums for tips on how to do this from various sources, and share your successes with your fellow students!\n# \n# ## Example\n# Looking for an example? Here's what our course assistant put together for the **Ann Arbor, MI, USA** area using **sports and athletics** as the topic. [Example Solution File](./readonly/Assignment4_example.pdf)\n\n# In[8]:\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib \n\nsns.set_palette(sns.color_palette(\"husl\", 12))\n\n# Obtain production and revenue data\n\ndf = pd.read_csv('Nijverheid__producti_110717162813.csv', sep=';', encoding = \"ISO-8859-1\", skipfooter=1, skiprows=1)\nselect_columns = ['Onderwerpen_1',\n 'Onderwerpen_2',\n 'Bedrijfstakken/branches (SBI 2008)',\n '2006',\n '2007', \n '2008', \n '2009', \n '2010', \n '2011', \n '2012', \n '2013', \n '2014', \n '2015', \n '2016']\ndf = df[select_columns]\ndf = df.rename(columns={'Onderwerpen_1': 'Type', \n 'Onderwerpen_2': 'Subject', \n 'Bedrijfstakken/branches (SBI 2008)': 'Sector'})\n\ndf = df[df['Type'].str.contains('Indexcijfers')]\n\ntranslate_dict = {\n 'B Delfstoffenwinning': 'Mineral extraction',\n 'C Industrie': 'Industry',\n '10-12 Voedings-, genotmiddelenindustrie': 'Food and stimulant industry', \n '13-15 Textiel-, kleding-, lederindustrie': 'Textile, clothing, and leather industry',\n '16+23 Hout- en bouwmaterialenindustrie': 'Wood and construction industry',\n '17-18 Papier- en grafische industrie': 'Paper and graphical industry',\n '19-22 Raffinaderijen en chemie': 'Refineries and chemical industry',\n '24-25 Basismetaal, metaalprod.-industrie': 'Base metal and metalproduction industry',\n '26-28 Elektrotechn. en machine-industrie': 'Electrotechnical and machine industry',\n '29-30 Transportmiddelenindustrie': 'Transportation industry', \n '31 Meubelindustrie': 'Furniture industry',\n '35-36 Energie- en waterleidingbedrijven': 'Energy and water industry'}\ndf['Sector'] = df['Sector'].map(translate_dict)\n\nyears = [str(year) for year in range(2006, 2017)] \nselect_columns = ['Sector'] + years\n \ndf_production = df[df['Subject'].str.contains('Productie')][select_columns]\ndf_revenue = df[df['Subject'].str.contains('Omzet')][select_columns]\n\n# Two subplots, the axes array is 1-d\nfig, (ax1, ax2) = plt.subplots(2, 1)\n\nax1.set_title('Industry sector in the Netherlands',fontweight='bold')\n\nfor row in df_revenue.itertuples():\n sector = row[1]\n data = row[2:]\n data = [float(x) for x in data]\n if sector == 'Industry':\n ax1.plot(data, 'r', label=sector, zorder=13)\n \nax1.set_xlim([0, 10])\nax1.set_ylim([0, 140])\nax1.set_ylabel('Normalized revenue',fontweight='bold')\nax1.set_xticks(range(11))\nax1.set_xticklabels(years)\nax1.text(4.1, 90, '2010 = 100', fontsize='smaller')\n\n# Obtain vacancy data\n\ndf_raw = pd.read_csv('Vacatures__seizoenge_160717162036.csv', sep=';', encoding = \"ISO-8859-1\", skipfooter=1, skiprows=1)\n\n# Derive the header\ndf_header = df_raw.loc[0:1]\ncols = []\nfor column in df_header:\n col = ' '.join(df_header[column].values)\n col = col.replace('e kwartaal x 1 000', '')\n col = col.replace('e kwartaal* x 1 000', '')\n cols.append(col)\n\n# Derive the body and rename the columns\ndf = df_raw.loc[2:].reset_index(drop=True)\ncolumns = df.columns\nrename_dict = {}\nfor col_source, col_target in zip(columns, cols):\n if col_target == 'Perioden SBI 2008, part. bedrijven, overheid':\n col_target = 'Sector'\n rename_dict[col_source] = col_target\ndf = df.rename(columns=rename_dict)\n\nselected_columns = ['Sector', \n '2006 1', '2006 2', '2006 3', '2006 4',\n '2007 1', '2007 2', '2007 3', '2007 4',\n '2008 1', '2008 2', '2008 3', '2008 4',\n '2009 1', '2009 2', '2009 3', '2009 4',\n '2010 1', '2010 2', '2010 3', '2010 4',\n '2011 1', '2011 2', '2011 3', '2011 4',\n '2012 1', '2012 2', '2012 3', '2012 4',\n '2013 1', '2013 2', '2013 3', '2013 4',\n '2014 1', '2014 2', '2014 3', '2014 4',\n '2015 1', '2015 2', '2015 3', '2015 4',\n '2016 1', '2016 2', '2016 3', '2016 4']\n\n# Convert quarterly data to yearly data\ndf = df[selected_columns]\ndf = df.apply(pd.to_numeric, errors='ignore')\nyears = [str(year) for year in range(2006, 2017)]\nfor year in years:\n df[year] = df[year + ' 1'] + df[year + ' 2'] + df[year + ' 3'] + df[year + ' 4']\n df[year] = df[year] * 1000.0\nselected_columns = ['Sector'] + years\ndf = df[selected_columns]\n\ntranslate_dict = {\n 'A-U Alle economische activiteiten': 'Total',\n 'A Landbouw, bosbouw en visserij': 'Agriculture',\n 'B-F Nijverheid en energie': 'Energy',\n 'G-N Commerciële dienstverlening': 'Commercial industry',\n 'M-N Zakelijke dienstverlening': 'Business',\n 'O-U Niet-commerciële dienstverlening': 'Non-commercial services',\n 'C Industrie': 'Industry',\n 'F Bouwnijverheid': 'Construction',\n 'G Handel': 'Trade',\n 'H Vervoer en opslag': 'Transportation',\n 'I Horeca': 'Catering',\n 'J Informatie en communicatie': 'Information and communication',\n 'K Financiële dienstverlening': 'Financial services',\n 'L Verhuur en handel van onroerend goed': 'Property',\n 'M Specialistische zakelijke diensten': 'Speciaistic services',\n 'N Verhuur en overige zakelijke diensten': 'Rental',\n 'O Openbaar bestuur en overheidsdiensten': 'Public and government',\n 'P Onderwijs': 'Education',\n 'Q Gezondheids- en welzijnszorg': 'Healthcare',\n 'R Cultuur, sport en recreatie': 'Culture, sports and recreation',\n 'S Overige dienstverlening': 'Other services',\n 'Particuliere bedrijven': 'Companies',\n 'Overheid': 'Government'\n}\ndf['Sector'] = df['Sector'].map(translate_dict)\n \nfor row in df.itertuples():\n sector = row[1]\n data = row[2:]\n data = [float(x) for x in data]\n if sector == 'Industry':\n ax2.plot(data, 'r', label=sector, zorder=12)\n \nax2.set_xlim([0, 10])\nax2.set_ylim([0, 100000])\nax2.set_ylabel('Vacancies',fontweight='bold')\nax2.set_xlabel('Year',fontweight='bold')\nax2.set_xticks(range(11))\nax2.set_xticklabels(years)\n\nfig.set_size_inches(8, 8)\nplt.show()\nfig.savefig('industry.png')\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n","sub_path":"applied_plotting/industry_vacancies.py","file_name":"industry_vacancies.py","file_ext":"py","file_size_in_byte":10189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"139447456","text":"from cfg_crf import *\nimport unittest\nimport numpy as np\n\nclass TestCFGCRF(unittest.TestCase):\n \n def setUp(self):\n grammar = \"START\\n\\\n a b c\\n\\\n START -> A START2\\n\\\n START2 -> B C\\n\\\n A -> a A | a\\n\\\n B -> b B | b\\n\\\n C -> c C | c\"\n \n with open(\"test_grammar.txt\",'w') as f:\n f.write(grammar)\n \n self.mdl = CFGCRF(\"test_grammar.txt\",1,1,objective='ssvm',lambda_0=1.0)\n # print(self.mdl)\n # print self.mdl.rules_dict\n # print self.mdl.symbol_dict\n terminal_weights = np.array([0.0,0.0,1.0]).reshape((self.mdl.n_terminals,1))\n rule_weights = np.zeros((self.mdl.n_rules,1))\n self.mdl.set_weights(np.hstack((terminal_weights.flatten(),rule_weights.flatten())))\n\n \n self.y1 = [ 'START',['A',['a',-1,-1],['A',['a',-1,-1],['A',['a',-1,-1],-1]]],\\\n ['START2',['B',['b',-1,-1],['B',['b',-1,-1],-1]],\\\n ['C',['c',-1,-1],-1]]]\n \n self.y2 = [ 'START',['A',['a',-1,-1],['A',['a',-1,-1],-1]],\\\n ['START2',['B',['b',-1,-1],['B',['b',-1,-1],-1]],\\\n ['C',['c',-1,-1],['C',['c',-1,-1],-1]]]]\n \n n = 6\n x_terminal = np.ones(n,dtype=float).reshape((n,1))\n x_rule = []\n idx = 0\n for i in range(n):\n for j in range(i+1,n+1):\n for k in range(i+1,j+1):\n x_rule.append(1.0)\n x_rule = np.array(x_rule).reshape((len(x_rule),1))\n self.x1 = [x_terminal,x_rule]\n \n \n def test_load_grammar(self):\n expected_terminals = ['a','b','c']\n expected_non_terminals = ['START','START2','A','B','C']\n expected_grammar_dict = { 'START':[['A','START2']],\\\n 'START2':[['B','C']],\\\n 'A':[['a','A'],['a']],\\\n 'B':[['b','B'],['b']],\\\n 'C':[['c','C'],['c']]}\n \n self.assertEqual(self.mdl.terminals,expected_terminals)\n self.assertEqual(self.mdl.non_terminals,expected_non_terminals)\n self.assertDictEqual(self.mdl.grammar_dict,expected_grammar_dict)\n \n for s in self.mdl.terminals:\n idx = self.mdl.symbol_dict[s]\n self.assertEqual(self.mdl.symbol_indices[idx],-1)\n \n for s in self.mdl.non_terminals:\n idx = self.mdl.symbol_dict[s]\n for p,production in zip(range(self.mdl.symbol_indices[idx],self.mdl.symbol_indices[idx+1]),self.mdl.grammar_dict[s]):\n self.assertEqual(self.mdl.rules[p,0],self.mdl.symbol_dict[production[0]])\n if len(production) == 2:\n self.assertEqual(self.mdl.rules[p,1],self.mdl.symbol_dict[production[1]])\n else:\n self.assertEqual(self.mdl.rules[p,1],-1)\n \n \n # def test_set_weights(self):\n # w = np.random.randn(self.mdl.n_parameters)\n # self.mdl.set_weights(w)\n # got = self.mdl.get_weight_vector()\n #\n # self.assertTrue(np.all(w == got))\n \n def test_traversals(self):\n # expected_traversal = ['START','A','a','A','a','A','a','START2','B','b','B','b','C','c']\n expected_traversal = ['a','a','a','A','A','A','b','b','B','B','c','C','START2','START']\n expected_starts = [0,1,2,2,1,0,3,4,4,3,5,5,3,0]\n expected_ends = [1,2,3,3,3,3,4,5,5,5,6,6,6,6]\n expected_splits = [1,2,3,3,2,1,4,5,5,4,6,6,5,3]\n expected_leaves = ['a','a','a','b','b','c']\n \n got_traversal,got_starts,got_ends,got_splits = zip(*[[node[0],i,j,k] for node,i,j,k in self.mdl.depth_first_traversal(self.y1,return_ij=True)])\n got_leaves = self.mdl.get_leaves(self.y1)\n \n self.assertListEqual(expected_traversal,list(got_traversal))\n self.assertListEqual(expected_starts,list(got_starts))\n self.assertListEqual(expected_ends,list(got_ends))\n self.assertListEqual(expected_splits,list(got_splits))\n self.assertListEqual(expected_leaves,list(got_leaves))\n \n def test_loss(self):\n y1_leaves = ['a','a','a','b','b','c']\n y2_leaves = ['a','a','b','b','c','c']\n\n self.assertListEqual(self.mdl.get_leaves(self.y1),y1_leaves)\n self.assertListEqual(self.mdl.get_leaves(self.y2),y2_leaves)\n self.assertEqual(self.mdl.loss(self.y1,self.y2),2)\n\n def test_feature_idx(self):\n n = 10\n idx = 0\n for i in range(n):\n for j in range(i+1,n+1):\n for k in range(i+1,j+1):\n self.assertEqual(idx,self.mdl.get_feature_idx(n,i,j,k))\n idx += 1\n\n def test_sufficient_statistics(self):\n expected_terminal_statistics = np.array([3.0,2.0,1.0])\n expected_rule_statistics = np.array([1,1,2,1,1,1,0,1],dtype=float)\n expected_statistics = np.hstack((expected_terminal_statistics,expected_rule_statistics))\n ss = self.mdl.sufficient_statistics(self.x1,self.y1)\n\n self.assertListEqual(list(ss),list(expected_statistics))\n\n def test_map_inference(self):\n y_hat,score = self.mdl.map_inference(self.x1,return_score=True)\n expected_traversal = ['a','A','b','B','c','c','c','c','C','C','C','C','START2','START']\n got_traversal = [node[0] for node in self.mdl.depth_first_traversal(y_hat)]\n self.assertListEqual(expected_traversal,got_traversal)\n jf = self.mdl.sufficient_statistics(self.x1,y_hat)\n self.assertEqual(np.dot(jf,self.mdl.get_weight_vector()),score)\n \n \n def sample_tree(self,symbol):\n if self.mdl.is_terminal(symbol):\n left = right = -1\n else:\n # expansion = np.random.choice(self.mdl.grammar_dict[symbol])\n expansion_idx = np.random.randint(0,len(self.mdl.grammar_dict[symbol]))\n expansion = self.mdl.grammar_dict[symbol][expansion_idx]\n \n left = self.sample_tree(expansion[0])\n if len(expansion) == 1:\n right = -1\n else:\n right = self.sample_tree(expansion[1])\n \n return [symbol,left,right]\n \n def sample_dataset(self,n_samples):\n X = []\n Y = []\n for s in range(n_samples):\n y = self.sample_tree(\"START\")\n n = len(self.mdl.get_leaves(y))\n # print n,self.mdl.get_leaves(y)\n x0 = np.random.randn(n,1)\n x1_len = self.mdl.get_feature_idx(n,n-1,n,n) + 1\n x1 = np.random.randn(x1_len,1)\n Y.append(y)\n X.append([x0,x1])\n \n return X,Y\n \n def test_fit(self):\n # np.random.seed(1)\n n_samples = 25\n X,Y = self.sample_dataset(n_samples)\n self.mdl.fit(X,Y)\n \n def test_map_inference_2(self):\n np.random.seed(1)\n n_samples = 1\n X,Y = self.sample_dataset(n_samples)\n x,y = X[0],Y[0]\n w = np.random.randn(self.mdl.n_parameters)\n self.mdl.set_weights(w)\n # self.mdl.rule_weights[:,:] = 0.0\n \n y_hat,score = self.mdl.map_inference(x,return_score=True)\n jf_hat = self.mdl.sufficient_statistics(x,y_hat)\n jf = self.mdl.sufficient_statistics(x,y)\n self.assertAlmostEqual(np.dot(jf_hat,w),score)\n self.assertTrue(np.isclose(score,np.dot(jf,w)) or score >= np.dot(jf,w))\n \nif __name__==\"__main__\":\n unittest.main()\n ","sub_path":"conversation_detection/test_cfg_crf.py","file_name":"test_cfg_crf.py","file_ext":"py","file_size_in_byte":7679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"496436037","text":"# -*- coding: utf-8 -*\r\nimport json\r\nimport os\r\nimport datetime\r\nimport copy\r\nimport collections\r\n\r\ndef get_requests(tracefiles):#按时间顺序处理请求\r\n ret = []\r\n with open(tracefiles,'r') as f:\r\n requests = json.load(f)\r\n for request in requests:\r\n method =request['http.request.method']\r\n uri = request['http.request.uri']\r\n if ('GET' == method or 'PUT' == method) and ('blobs' in uri) and (len(uri.split('/')) >= 5):\r\n if 'http.response.written' in request:\r\n size = request['http.response.written']\r\n else:\r\n size = 1\r\n if size > 0:\r\n timestamp = datetime.datetime.strptime(request['timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ')\r\n duration = request['http.request.duration']\r\n client = request['http.request.remoteaddr']\r\n r = {\r\n 'timestamp':timestamp,\r\n 'uri':uri,\r\n 'size':size,\r\n 'method':method,\r\n 'delay':duration,\r\n 'client':client\r\n }\r\n ret.append(r)\r\n ret.sort(key=lambda x:x['timestamp'])\r\n if ret!= []:\r\n begin = ret[0]['timestamp']\r\n for r in ret:\r\n r['timestamp'] = (r['timestamp']-begin).total_seconds()\r\n return ret\r\n else:\r\n return ret\r\n\r\ndef exe_trace(file_name):\r\n requests = []\r\n# trace_files=[\"/trace/data_centers/dal09/prod-dal09-logstash-2017.06.20-0.json\",\r\n# \"/trace/data_centers/dal09/prod-dal09-logstash-2017.06.20-1.json\",\r\n# \"/trace/data_centers/dal09/prod-dal09-logstash-2017.06.20-2.json\",\r\n# \"/trace/data_centers/dal09/prod-dal09-logstash-2017.06.20-3.json\"]\r\n trace_files=[]\r\n size_list = [0,0,0,0,0,0,0,0,0,0]\r\n# root='/home/zc/trace/'+file_name\r\n root='/home/zc/old_trace/data_centers/'+file_name\r\n paths = os.listdir(root)\r\n for path in paths:\r\n trace_files.append(root+'/'+path)\r\n i = 0\r\n l = len(trace_files)\r\n count = 0\r\n j = 0\r\n datas = []\r\n for file in trace_files:\r\n if 1.*i / l > 0.01:\r\n count += 1\r\n i = 0\r\n print(str(count) + '% done')\r\n i += 1\r\n requests = get_requests(file)\r\n for request in requests:\r\n if request['size']<10:\r\n size_list[0] += 1\r\n elif request['size']<100:\r\n size_list[1] += 1\r\n elif request['size']<1000:\r\n size_list[2] += 1\r\n elif request['size']<10000:\r\n size_list[3] += 1\r\n elif request['size']<100000:\r\n size_list[4] += 1\r\n elif request['size']<1000000:\r\n size_list[5] += 1\r\n elif request['size']<10000000:\r\n size_list[6] += 1\r\n elif request['size']<100000000:\r\n size_list[7] += 1\r\n elif request['size']<1000000000:\r\n size_list[8] += 1\r\n elif request['size']<10000000000:\r\n size_list[9] += 1\r\n j += 1\r\n with open('size_'+file_name+'.txt', 'w') as f:\r\n f.write(str(j)+'\\n')\r\n f.write(str(size_list)+'\\n')\r\n\r\ndef main():\r\n filenames=['dev-mon01','prestage-mon01','syd01','fra02','stage-dal09','lon02','dal09']\r\n for number in range(6,7):\r\n exe_trace(filenames[number])\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"test_count/count_size.py","file_name":"count_size.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"123444172","text":"# -*- coding: UTF-8 -*-\n# Copyright (C) 2019, Raffaello Bonghi \n# All rights reserved\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND\n# CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,\n# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport curses\nfrom datetime import timedelta\n# Page class definition\nfrom .jtopgui import Page\n# Graphics elements\nfrom .jtopguilib import plot_name_info\n# Menu GUI pages\nfrom .jtopguimenu import strfdelta\n\n\nclass INFO(Page):\n\n def __init__(self, stdscr, jetson, refresh):\n super(INFO, self).__init__(\"INFO\", stdscr, jetson, refresh)\n\n def draw(self, key):\n \"\"\"\n Write all environment variables\n \"\"\"\n # Screen size\n height, width = self.stdscr.getmaxyx()\n # Position information\n posx = 2\n start_pos = 2\n spacing = 20\n # Up time\n uptime_string = strfdelta(timedelta(seconds=self.jetson.uptime), \"{days} days {hours}:{minutes}:{seconds}\")\n plot_name_info(self.stdscr, start_pos, posx, \"- Up Time\", uptime_string)\n start_pos += 1\n # Loop build information\n idx = 0\n # Board info\n self.stdscr.addstr(start_pos + idx, posx, \"- Board:\", curses.A_BOLD)\n for name, info in self.jetson.board[\"board\"].items():\n self.stdscr.addstr(start_pos + idx + 1, posx + 2, \"* \" + name + \":\")\n self.stdscr.addstr(start_pos + idx + 1, posx + spacing, info, curses.A_BOLD)\n idx += 1\n # Libraries info\n self.stdscr.addstr(start_pos + idx + 1, posx, \"- Libraries:\", curses.A_BOLD)\n idx += 1\n for name, info in self.jetson.board[\"libraries\"].items():\n self.stdscr.addstr(start_pos + idx + 1, posx + 2, \"* \" + name + \":\")\n self.stdscr.addstr(start_pos + idx + 1, posx + spacing, info, curses.A_BOLD)\n idx += 1\n # IP address and Hostname\n if self.jetson.local_interfaces:\n plot_name_info(self.stdscr, start_pos + idx + 1, posx, \"- Hostname\", self.jetson.local_interfaces[\"hostname\"])\n self.stdscr.addstr(start_pos + idx + 2, posx, \"- Interfaces\", curses.A_BOLD)\n idx += 3\n for name, ip in self.jetson.local_interfaces[\"interfaces\"].items():\n self.stdscr.addstr(start_pos + idx, posx + 2, \"* \" + name + \":\")\n self.stdscr.addstr(start_pos + idx, posx + spacing, ip, curses.A_BOLD)\n idx += 1\n # Author information\n plot_name_info(self.stdscr, start_pos, width - 30, \"Author\", \"Raffaello Bonghi\")\n plot_name_info(self.stdscr, start_pos + 1, width - 30, \"e-mail\", \"raffaello@rnext.it\")\n","sub_path":"jtop/gui/pinfo.py","file_name":"pinfo.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"252818726","text":"# Copyright: 2014 Dennis Schmalacker \n# License: BSD 2-clause, see LICENSE for details.\n\n\"\"\"\nbepasty-server commandline interface\n\"\"\"\n\nimport os, sys, base64, pprint\nfrom mimetypes import guess_type\nimport requests\n\nimport click\n\n@click.command()\n@click.option('-f', '--file', 'fileobj', help='File to be uploaded to a bepasty-server. If this is omitted stdin is read.')\n@click.option('-n', '--name', 'fname', help='Filename for piped input.')\n@click.option('-t', '--type', 'ftype', help='Filetype for piped input. Specified as file extension. E.g. png, txt, mp3...'\n + ' If omitted, filetype will be destinguised by filename')\ndef main(fileobj, fname, ftype):\n\n pretty = pprint.PrettyPrinter()\n\n if fileobj:\n fileobj = open(fileobj, 'rb')\n filesize = os.path.getsize(os.path.abspath(fileobj.name))\n if not fname:\n fname = fileobj.name\n stdin = False\n\n else:\n fileobj = click.get_binary_stream('stdin')\n if not fname:\n fname = ''\n stdin = True\n\n if not ftype:\n ftype, enc = guess_type(fname)\n if not ftype:\n ftype = 'application/octet-stream'\n\n offset = 0\n trans_id = ''\n while True:\n read_size = 1 * 1024 * 1024\n raw_data = fileobj.read(read_size)\n raw_data_size = len(raw_data)\n\n payload = base64.b64encode(raw_data)\n\n if stdin:\n if raw_data_size < read_size:\n filesize = offset + raw_data_size\n else:\n filesize = offset + raw_data_size + 1\n\n headers = {\n 'content-range': ('bytes %d-%d/%d' % (offset, offset+raw_data_size-1, filesize)),\n 'content-type': ftype,\n 'content-filename': fname,\n }\n headers['Content-Length'] = filesize\n if not trans_id == '':\n headers['Transaction-ID'] = trans_id\n\n response = requests.post('http://localhost:5000/api/v1/items', data=payload, headers=headers, auth=('user','foo'))\n offset = offset + raw_data_size\n if response.headers['Transaction-ID']:\n trans_id = response.headers['Transaction-ID']\n\n if raw_data_size < read_size:\n break\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bepasty_cli.py","file_name":"bepasty_cli.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"217251786","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# 该脚本用来查找Objective-C *.m文件里面的中文字符串(NSLog,EKOLogDebug等Log的除外)\n# 找到后,需要输入一个key,会自动放入Localizable.strings中(如果没有的话)。再在StringDef.h中生成kStr+key,最后在该文件中用StringDef.h里面的define\nimport os\nimport re\n\ncount = 0 # 统计的处理文件个数\nstringcount = 0\nstringcount2 = 0\nkeykey = 0\n\n# StringDef.h 的路径\nstringdefPath = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())),'CallWatch/Pods/EKOStringDefine/EKOStringDefine/StringDef.h')\n\n\n#Localizable.strings 的路径\n\nlocalizablePath = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())),'CallWatch/CallWatch/SupportingFiles/Resource/Language/zh-Hans.lproj/Localizable.strings')\n\n\n\nbaimingdan = ['Pods','Third','UMEvents.h','EKP','CallWatchTests','Testo','Logger','SettingViewController.m','LoginViewController.m','DomainViewController.m','SWViewControllerIntercepter.m','PushMessageMgr.m','EKAExtendLocation.h'] #路径白名单,路径包含这个的,不会处理\nlog = ['NSAssert','Log','@brief','EKAnalysis','DebugToast','deprecated','EKAClickEvent']\n\ndef addkeyInlocalizable(newkey,value,path,lineNum):\n\n\tdefinekey = 'kStr%s' % (newkey.capitalize())\n\n\twith open(localizablePath,'a') as f:\n\t\tf.write('\"%s\" = \"%s\";\\n' %(newkey,value))\n\n\twith open(stringdefPath,'a') as f:\n\t\tf.write('#define %s local(@\"%s\")\\n' %(definekey,newkey))\n\n\tmodityFile(path,'@\"%s\"'%(value),definekey,lineNum)\n\n\n\ndef modityFile(path,oldString,newString,lineNum):\n\t\t#将文件读取到内存中\n\n\twith open(path,\"r\",encoding=\"utf-8\") as f:\n\t\tlines = f.readlines() \n\t#写的方式打开文件\n\tlinecount = 0\n\twith open(path,\"w\",encoding=\"utf-8\") as f_w:\n\t\tfor line in lines:\n\n\t\t\tif linecount == lineNum-1 and oldString in line:\n\t\t\t\tline = line.replace(oldString,newString)\n\n\t\t\tlinecount = linecount + 1\n\t\t\n\t\t\tf_w.write(line)\n\n\ndef getDefineKey(key):\n\tprint('findKey key',key)\n\tkey = key.replace('\"','')\n\n\twith open(stringdefPath,'r') as f:\n\t\tfor line in f.readlines():\n\n\t\t\tif line.startswith('//') or r'*' in line:\n\t\t\t\tcontinue\n\n\t\t\tline1 = re.findall(r'kStr[^ ]*',line)\n\t\t\tif len(line1) != 1:\n\t\t\t\tcontinue\n\n\t\t\tval1 = re.findall(r'[^\\\\]?\\\"(.*?[^\\\\])[\"]',line)\n\n\t\t\tif len(val1) != 1:\n\t\t\t\tcontinue\n\n\t\t\tif key == val1[0]:\n\t\t\t\tprint('line10 = '+line1[0])\n\t\t\t\treturn line1[0]\n\n\treturn None\n\n\ndef findKey(key):\n\n\twith open(localizablePath,'r') as f:\n\t\tfor line in f.readlines():\n\t\t\tres = line.split('=')[1:]\n\n\t\t\tif len(res) > 0:\n\t\t\t\tstr1 = res[0].replace(' ','').replace('\\n','').replace(';','').replace('\"','')\n\t\t\t\tif str1 == key:\n\t\t\t\t\treturn line.split('=')[0:][0]\n#\t\t\t\telse:\n#\t\t\t\t \tprint(\"str1:%s key:%s\" % (str1,key))\n\n\treturn None\n\n\n\ndef openFile(path):\n\tglobal stringcount\n\tglobal stringcount2\n\tglobal keykey\n\twith open(path, 'r') as f:\n\t\tlinenum = 0\n\t\tfor line in f.readlines():\n\t\t\tlinenum = linenum + 1\n\t\t\t\n\t\t\tline = line.split('//')[0]\n\n\t\t\tshouldcontinue = False\n\n\t\t\tfor key in log:\n\t\t\t\tif key in line:\n\t\t\t\t\tshouldcontinue = True\n\t\t\t\t\tbreak\n\n\n\t\t\tif shouldcontinue:\n\t\t\t\tcontinue\n\n\t\t\tfor str1 in re.findall(r'[^\\\\]?\\\"(.*?[^\\\\])[\"]',line):\n\n\t\t\t\tif re.search(r'[\\u4e00-\\u9fa5]',str1) is not None:\n\t\t\t\t\n\t\t\t\t\t# print(str1)\n\n\t\t\t\t\tif len(str1) == 0:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tkey1 = findKey(str1)\n\t\t\t\t\tif len(str1) == 0:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif key1 is not None:\n\t\t\t\t\t\tprint('该字符串已经有了:' + str1)\n\t\t\t\t\t\tstringcount = stringcount + 1\n\n\t\t\t\t\t\ts = definekey = 'kStr%s' % (key1.replace('\"','').capitalize())\n\t\t\t\t\t\tstr1 = '@\"'+str1 + '\"'\n# modityFile(path,str1,s,linenum)\n\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewKey = 'newkey_String' + str(keykey)\n\t\t\t\t\t\tkeykey = keykey + 1\n\t\t\t\t\t\tif newKey == '':\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tstringcount2 = stringcount2 + 1\n\t\t\t\t\t\t\tprint('该字符串没有做本地化:' + str1)\n# addkeyInlocalizable(newKey,str1,path,linenum)\n\n\n\n\ndef getAllFile(path):\n\n\tglobal count\n\t\n\tfor x in os.listdir(path):\n\t\tif xtc_isBaiMingDan(x):\n\t\t\tcontinue\n\t\tpath1 = os.path.join(path,x)\n\n\t\tif os.path.isdir(path1):\n\t\t\tgetAllFile(path1)\n\t\telse:\n\t\t\tif os.path.splitext(path1)[1] == '.m' or os.path.splitext(path1)[1] == '.h' :\n\t\t\t\tcount = count + 1\n\t\t\t\topenFile(path1)\n\t\t\t\t# print(path1)\n\t\ndef xtc_isBaiMingDan(path):\n\tfor bmd in baimingdan:\n\t\tif bmd == path:\n\t\t\treturn path\n\treturn None \n\t\n\nif __name__ == '__main__':\n\n\tprint('stringdefPath = ' + stringdefPath)\n\tprint('localizablePath = ' + localizablePath)\n\n\tstartpath = os.path.abspath(os.path.dirname(os.getcwd()))\n\n\tgetAllFile(startpath)\n\tprint(\"一共有\",stringcount,\"个字符串已经有key,全局搜一下,替换一下\")\n\tprint(\"一共有\",stringcount2,\"个字符串没有key\")\n\tprint(\"一共有\",count,\"个*.m文件\")\n\tcount = stringcount + stringcount2\n\tif count > 0:\t\t\n\t\texit(-1)\n","sub_path":"jenkins_strings.py","file_name":"jenkins_strings.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"98845412","text":"#!/bin/env python3\n\n\"\"\"\nPrototype variant annotation tool created for the Tempus\nBioinformatics Technical Challenge.\nAuthor: Jacob Feldman\n\nInput: VCF file\nOutput: TSV file containing annotations on each variant. If no output is \nspecified program will print table to stdout.\n\n\"\"\"\n\nimport sys\nimport requests\nimport argparse\nimport os.path\n\n#Add arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", type=str, help=\"File path to VCF.\")\nparser.add_argument(\"-o\", type=str, help=\"Output file name.\")\nargs = parser.parse_args()\n\n#Check if VCF file exists\nif not args.i:\n\tprint(\"No input file specified.\")\n\texit()\n\nelif not os.path.isfile(args.i):\n\tprint(\"File specified does not exist.\")\n\texit()\n\ndef get_annotations(hit):\n\t\"\"\"\n\tFunction to retrieve relevant annotations from each variant hit\n\tInput: Line in VCF file\n\tOutput: Tab separated annotations\n\t\"\"\"\n\t#Retrieve information about the variant\n\tvariant = hit.split(\"\\t\")\n\tchrom = variant[0]\n\tpos = variant[1]\n\tvarID = variant[2]\n\tref = variant[3]\n\talt = variant[4]\n\n\t#Create dictionary of info field, value pairs from the INFO section\n\tinfo = variant[7].split(\";\")\n\tinfoDict = dict([x.split('=') for x in info])\n\n\t\"\"\"Type of variation. Need to list most deleterious if there are more \n\tthan one type. Here I am unsure the rank order of snp, ins, or del \n\tin terms of deleterious possibility. I think it depeds on the specific\n\tnature of the mutation.\"\"\"\n\ttypeOfVar = infoDict['TYPE'].split(',')\n\tif 'complex' in typeOfVar:\n\t\ttypeOfVar = 'complex'\n\telse:\n\t\ttypeOfVar = typeOfVar[0]\n\n\t#Depth of sequence coverage at the site of variation\n\tdepth = infoDict[\"DP\"]\n\n\t#Number of reads supporting the variant\n\tnumReads = infoDict[\"AO\"]\n\n\t#Percentage of reads supporting the variant versus those supporting reference reads\n\ttry: \n\t\tvarPercent = round(float(infoDict[\"AO\"]) / float(infoDict[\"RO\"]) * 100, 4)\n\n\texcept ZeroDivisionError: #Instance when there are zero reads supporting the reference\n\t\tvarPercent = \"n/a\"\n\n\texcept ValueError: #Instance when there are >1 values for AO or RO. I am unsure why there are more than one count values\n\t\tvarPercent = \"n/a\"\n\n\t#Allele frequency of variant from Broad Institute ExAC Project API\n\t#Also inluded is the gene(s) with which the variant is associated with\n\tres = requests.get(f\"http://exac.hms.harvard.edu/rest/variant/variant/{chrom}-{pos}-{ref}-{alt}\")\n\tvariantData = res.json()\n\n\ttry:\n\t\talleleFreq = round(variantData[\"allele_freq\"], 4)\n\t\tgene = ','.join(variantData[\"genes\"])\n\n\texcept KeyError: #Option if there is no data from the api\n\t\talleleFreq = \"No data\"\n\t\tgene = \"No data\"\n\n\n\treturn f\"{chrom}\\t{pos}\\t{varID}\\t{typeOfVar}\\t{depth}\\t{numReads}\\t{varPercent}\\t{alleleFreq}\\t{gene}\\n\"\n#--------------------------------------------------------------------------------------------------------------\n\n#Create basic output table structure\noutput_table = \"Chrom\\tPos\\tID\\tType\\tDepth\\tNum Reads Supporting Variant\\tPercent Variant/Reference Reads\\tAllele Freq of Variant\\tGenes\\n\"\n\n#Process VCF file\nwith open(args.i, \"r\") as vcf_file:\n\n\tline = vcf_file.readline()\n\n\twhile line.startswith(\"##\"): #Bypass header lines\n\t\tline = vcf_file.readline()\n\n\t#Write to file if output file is specified, else print to stdout\t\n\tif args.o:\n\t\toutput = open(args.o, \"w\")\n\t\toutput.write(output_table)\n\n\t\tfor line in vcf_file: \n\t\t\toutput.write(get_annotations(line))\n\n\t\toutput.close()\n\n\telse:\n\t\tprint(output_table)\n\t\tfor line in vcf_file: \n\t\t\tprint(get_annotations(line))\n\nvcf_file.close()\n\n\"\"\"\nThank you Tempus bioinformatics team for taking the time to review my code!\n-Jacob\n\"\"\"","sub_path":"tempus_bioinformatics_challenge.py","file_name":"tempus_bioinformatics_challenge.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"377953633","text":"import numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow.keras import backend as K\nimport tensorflow.keras as keras\nfrom tensorflow.keras.layers import Dense, Conv1D, UpSampling1D, MaxPooling1D, AveragePooling1D\nfrom tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.models import Sequential, Model\n\nfrom ML_Train.Custom_Layers import Dropout_Live, Sampling\n\ndef create_encoder(\n hidden_nodes, \n codings_size,\n input_shape\n ):\n inputs = Input(shape=input_shape)\n x = inputs\n \n for i in range(len(hidden_nodes)):\n x = Dense(hidden_nodes[i], \n activation='selu')(x)\n x = BatchNormalization()(x)\n \n codings_mean = Dense(codings_size)(x)\n codings_log_var = Dense(codings_size)(x)\n codings = Sampling()([codings_mean, codings_log_var])\n \n variational_encoder = Model(\n inputs=[inputs],\n outputs=[codings_mean,codings_log_var,codings])\n \n latent_loss = -0.5 * K.sum(1 + codings_log_var - K.exp(codings_log_var) - K.square(codings_mean),axis=-1)\n \n return variational_encoder,latent_loss,inputs\n\ndef create_decoder(\n hidden_nodes,\n codings_size,\n input_shape\n ):\n \n hidden_nodes.reverse()\n decoder_inputs = Input(shape=[codings_size])\n x = decoder_inputs\n \n for j in range(len(hidden_nodes)):\n x = Dense(hidden_nodes[j],activation='selu')(x)\n \n\n x = Dense(input_shape[0],activation='sigmoid')(x)\n x = Reshape(input_shape)(x)\n\n decoder = Model(inputs=[decoder_inputs],outputs=[x])\n \n return decoder\n \ndef create_autoencoder(param_dict):\n \n input_shape = tuple(param_dict[\"input_shape\"])\n hidden_nodes = param_dict[\"hidden_nodes\"]\n codings_size = param_dict[\"codings_size\"]\n \n encoder,latent_loss,inputs = create_encoder(hidden_nodes,codings_size,input_shape)\n decoder = create_decoder(hidden_nodes,codings_size,input_shape)\n \n _,_,codings = encoder(inputs)\n reconstructions = decoder(codings)\n variational_ae = Model(inputs=[inputs],outputs=[reconstructions])\n variational_ae.add_loss(K.mean(latent_loss) / input_shape[0])\n \n return variational_ae, encoder, decoder\n ","sub_path":"ML_Train/.ipynb_checkpoints/Variational_Vanilla_AE-checkpoint.py","file_name":"Variational_Vanilla_AE-checkpoint.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"169496240","text":"file1 = open('lib/game/main.js')\nfile1_contents = file1.read()\n\ndef get_middle_text(line, string_start, string_end):\n\ttemp = line.split(string_start)[1]\n\treturn temp.split(string_end)[0]\n\nresult = get_middle_text(file1_contents, 'this.START_OBFUSCATION;', 'this.END_OBFUSCATION;')\n\nfile2 = open('domainlock.js','w')\nfile2.write(result)\n\nfile2.close()\nfile1.close()\n","sub_path":"prep_domainlock.py","file_name":"prep_domainlock.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"117222659","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Projet Splaby'O\n Projet Python 2020-2021 de 1ere année et AS DUT Informatique Orléans\n\n\"\"\"\nimport random\n\n\ndef dummy_ai(laby_dico):\n \"\"\"\n IA aléatoire mise par défaut sur les joueurs automatiques\n\n :param laby_dico: ce paramètre n'est pas utilisé\n :return: un ordre pour le joueur sous la forme d'une chaine de caractères\n \"\"\"\n actions = ['D', 'D', 'D', 'D', 'D', 'D', 'D', 'C', 'C', 'C']\n directions = ['E', 'O', 'S', 'N', 'X']\n positions = ['1', '3', '5']\n dir_peint = random.choice(directions)\n res = 'P' + dir_peint\n action = random.choice(actions)\n if action == 'D':\n direction = random.choice(directions[:-1])\n return res + action + direction\n if action == 'C':\n tourne = random.randint(0, 4)\n tournage = 'H' * tourne\n res += 'T' + tournage\n res += random.choice(directions)\n res += random.choice(positions)\n return res\n\n\ndef Joueur(nom, couleur, reserve_initiale=20, surface=0, type_joueur='O', objet=0, temps_restant=0, ia=dummy_ai):\n \"\"\"\n creer un nouveau joueur portant le nom passé en paramètre.\n\n :param nom: une chaine de caractères donnant le nom du joueur\n :param couleur: une chaine de caractères donnant donnant la couleur du joueur\n :param reserve_initiale: un entier indiquant la réserve de peinture du joueur\n :param surface: un entier qui indique combien de case du labyrinthe sont peintes de la couleur du joueur\n :param type_joueur: un caractère 'H' pour humain 'O' pour ordinateur\n :param objet: un entier compris entre 0 et 3 indiquant l'objet possédé actuellement par le joueur (0 => pas d'objet)\n :param temps_restant: le nombre de tours restant avant que l'objet possédé par le joueur est encore valide\n :param ia: une fonction indiquant quelle fonction appeler pour lancer l'IA associée à un joueur de type Ordinateur\n :return: le joueur possédant les caractéristiques passées en paramètre.\n \"\"\"\n res = dict()\n res[\"nom\"] = nom\n res[\"couleur\"] = couleur\n res[\"reserve\"] = reserve_initiale\n res[\"surface\"] = surface\n res[\"type_joueur\"] = type_joueur\n res[\"objet\"] = objet\n res[\"temps_restant\"] = temps_restant\n res[\"ia\"] = ia\n return res\n\ndef ajouter_objet(joueur, objet, temps):\n \"\"\"\n ajoute un objet au joueur et initialise le temps de validité de cet objet\n\n :param joueur: le joueur à modifier\n :param objet: un entier strictement positif indiquant l'objet attribué au joueur\n :param temps: le temps de validité de l'objet\n :return: la fonction ne retourne rien mais modifie le joueur\n \"\"\"\n joueur[\"objet\"] = objet\n joueur[\"temps_restant\"] = temps\n return None\n\ndef mise_a_jour_temps(joueur):\n \"\"\"\n enlève une unité de temps à la durée de vie de l'objet possédé par le joueur.\n Attention, si cette durée de vie passe à 0 il faut retirer l'objet du joueur\n\n :param joueur: le joueur à modifier\n :return: la fonction ne retourne rien mais modifie le joueur\n \"\"\"\n if joueur[\"temps_restant\"] > 0: joueur[\"temps_restant\"] -= 1\n if get_temps_restant(joueur) == 0: ajouter_objet(joueur, 0, 0)\n\n\ndef set_type_joueur(joueur, type_joueur):\n \"\"\"\n permet de \"forcer\" le type du joueur (humain ou ordinateur)\n\n :param joueur: le joueur à modifier\n :param type_joueur: un caractère 'H' pour humain 'O' pour ordinateur\n :return: la fonction ne retourne rien mais modifie le joueur\n \"\"\"\n joueur[\"type_joueur\"] = type_joueur\n return None\n\n\ndef get_type_joueur(joueur):\n \"\"\"\n retourne le type du joueur\n\n :param joueur: le joueur\n :return: résultat un caratère 'H' pour humain et 'O' pour ordinateur\n \"\"\"\n return joueur[\"type_joueur\"]\n\n\ndef set_fonction_ia(joueur, la_fonction):\n \"\"\"\n definit la fonction à appeler pour que ce joueur joue automatiquement\n\n :param joueur: le joueur\n :param la_fonction: une fonction qui prend en paramètre un dictionnaire donnant l'état du jeu\n retourne l\n :return: la fonction ne retourne rien mais modifie le joueur\n \"\"\"\n joueur[\"ia\"] = la_fonction\n return None\n\ndef jouer_ia(joueur, etat_jeu):\n \"\"\"\n appelle la fonction de l'ia associée au joueur et retourne son résultat\n\n :param joueur: le joueur\n :param etat_jeu: un dictionnaire donnant l'état du jeu\n :return: un ordre pour le joueur sous la forme d'une chaine de caractères\n \"\"\"\n fonction = joueur['ia'](etat_jeu)\n return fonction\n\n\ndef set_surface(joueur, surface):\n \"\"\"\n mis à jour la surface recouverte du joueur\n\n :param joueur: le joueur\n :param surface: un entier positif ou nul\n :return: la fonction ne retourne rien mais modifie le joueur\n \"\"\"\n\n joueur[\"surface\"] = surface\n\n\ndef get_surface(joueur):\n \"\"\"\n retourne la surface recouverte du joueur\n\n :param joueur: le joueur\n :return: un entier positif ou nul\n \"\"\"\n if type(joueur) == tuple:\n return joueur[1]['surface']\n return joueur[\"surface\"]\n\n\ndef get_objet_joueur(joueur):\n \"\"\"\n retourne l'objet possédé par le joueur\n\n :param joueur: le joueur\n :return: un entier positif ou nul (0 indique que le joueur ne possède pas d'objet)\n \"\"\"\n if type(joueur) == tuple:\n return joueur[1]['objet']\n return joueur['objet']\n\n\ndef get_couleur_joueur(joueur):\n \"\"\"\n retourne la couleur du joueur\n\n :param joueur: le joueur\n :return: une chaine de caractère indiquant la couleur du joueur\n \"\"\"\n if type(joueur) == tuple:\n return joueur[1]['couleur']\n return joueur[\"couleur\"]\n\n\ndef get_nom_joueur(joueur):\n \"\"\"\n retourne le nom du joueur\n\n :param joueur: le joueur\n :return: une chaine de caractère indiquant le nom du joueur\n \"\"\"\n if type(joueur) == tuple:\n return joueur[1]['nom']\n return joueur[\"nom\"]\n\n\ndef get_reserve_peinture(joueur):\n \"\"\"\n retourne le nombre de points du joueur\n\n :param joueur: le joueur\n :return: un entier indiquant le nombre d'unités de peintures possédé par le joueur\n \"\"\"\n if type(joueur) == tuple:\n return joueur[1]['reserve']\n return joueur[\"reserve\"]\n\n\ndef get_temps_restant(joueur):\n \"\"\"\n retourne le temps restant pour l'objet que le joueur possède actuellement\n\n :param joueur: le joueur\n :return: un entier indiquant le temps restant pour l'objet que possède le joueur\n O si le joueur ne possède pas d'objet\n \"\"\"\n return joueur[\"temps_restant\"]\n\n\ndef ajouter_peinture(joueur, nb_unites):\n \"\"\"\n ajoute ou enlève des unités de peintures dans la réserve du joueur\n ATTENTION la plus petite valeur pour la réserve est 0 et ne peut donc jamais devenir négative\n\n :param joueur: le joueur\n :param nb_unites: un entier relatif (positif ou négatif)\n :return: la fonction ne retourne rien mais modifie le joueur\n \"\"\"\n joueur[\"reserve\"] += nb_unites\n if joueur[\"reserve\"] <= 0 : joueur[\"reserve\"] = 0\n\n\ndef comparer(joueur1, joueur2):\n \"\"\"\n compare deux joueurs en fonction de la surface qu'il possède et en cas d'égalité, en fonction de la réserve\n\n :param joueur1: un joueur\n :param joueur2: un autre joueur\n :return: -1 si joueur1joueur2 et 0 si les deux joueurs ont la même surface et la même réserve\n \"\"\"\n if type(joueur1) == tuple:\n joueur1 = joueur1[1]\n if type(joueur2) == tuple:\n joueur2 = joueur2[1]\n if get_surface(joueur1) > get_surface(joueur2):\n return 1\n elif get_surface(joueur1) < get_surface(joueur2):\n return -1\n elif get_surface(joueur1) == get_surface(joueur2) :\n if joueur1[\"reserve\"] >joueur2[\"reserve\"]:\n return 1\n elif joueur1[\"reserve\"] == joueur2[\"reserve\"] :\n return 0\n else : return -1\n\n\n","sub_path":"joueur.py","file_name":"joueur.py","file_ext":"py","file_size_in_byte":7908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"229506771","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 31 11:03:57 2019\n\n@author: riteshsharma\n\"\"\"\nimport sys\n\n\n#this method calculates the distance between stars (x1-x2)^2 + (y1-y2)^2\ndef distance(star1, star2):\n x1 = int(star1[0])\n y1 = int(star1[1])\n \n x2 = int(star2[0])\n y2 = int(star2[1])\n \n result = ((x1 - x2) * (x1 - x2)) + ((y1- y2) * (y1 - y2))\n return result\n \n#this method finds the majority by keeping diameter as a constraint.\ndef findMajority(galaxy, d):\n x = list()\n y = list()\n galaxyList = list()\n \n if(len(galaxy) == 0):\n return None\n \n elif(len(galaxy) == 1):\n return (galaxy[0])\n\n else:\n i = 0\n while(i < len(galaxy)):\n if(i < (len(galaxy) - 1)):\n if(distance(galaxy[i] , galaxy[i+1]) <= d):\n galaxyList.append(galaxy[i])\n else:\n y = galaxy[i]\n i = i + 2\n \n x = findMajority(galaxyList, d)\n \n if(x == 0):\n if(not len(galaxy) % 2 == 0):\n count = 0\n \n for arr in galaxy:\n if(distance(y, arr) <= d):\n count = count + 1\n \n if(count > (len(galaxy)/2)):\n y.append(count)\n return y\n else:\n return None\n \n else:\n return None\n \n else:\n count = 0\n for arr in galaxy:\n if(distance(x, arr) <= d):\n count = count + 1\n \n if(count > (len(galaxy)/2)):\n x.append(count)\n return x\n \n else:\n return None\n \n\ndef main():\n d = 0\n boolean = False\n starList = list()\n #data = \"20 7\\n2 2\\n3 2\\n1 1\\n1 2\\n1 3\\n101 101\\n100 100\\n102 102\\n3 3\"\n #for line in data.split('\\n'):\n for line in sys.stdin.read().split('\\n'):\n if not line:\n continue\n \n if not boolean:\n temp = list()\n temp = line.split()\n d = int(temp[0])\n d = d * d\n \n boolean = True\n else:\n star = list()\n temp = list()\n temp = line.split()\n star.append(temp[0])\n star.append(temp[1])\n starList.append(star)\n output = findMajority(starList, d)\n if(output == None):\n print(\"NO\")\n else:\n print(output.pop())\n\nmain()\n \n \n\n","sub_path":"untitled3.py","file_name":"untitled3.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"479197617","text":"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport pytest\n\nfrom common.tf_layer_test_class import CommonTFLayerTest\n\n\nclass TestConcat(CommonTFLayerTest):\n def create_concat_net(self, shape, axis, ir_version):\n \"\"\"\n Tensorflow net IR net\n\n Input->Concat => Input->Concat\n\n \"\"\"\n\n #\n # Create Tensorflow model\n #\n\n import tensorflow as tf\n\n tf.compat.v1.reset_default_graph()\n\n # Create the graph and model\n with tf.compat.v1.Session() as sess:\n\n ax = axis\n\n input_shape_x = shape.copy()\n # reshaping\n if len(input_shape_x) >= 3:\n input_shape_x.append(input_shape_x.pop(1))\n\n # TODO: add concat with const inputs to check fusing (as in ONNX)\n\n x = tf.compat.v1.placeholder(tf.float32, input_shape_x, 'Input')\n y = tf.compat.v1.placeholder(tf.float32, input_shape_x, 'Input') # Input_1 in graph_def\n\n concat = tf.concat([x, y], axis=ax, name='Operation')\n concat_shape = concat.shape.as_list()\n\n tf.compat.v1.global_variables_initializer()\n tf_net = sess.graph_def\n\n #\n # Create reference IR net\n # Please, specify 'type': 'Input' for input node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n # Format axis to positive value\n concat_ax = axis if axis >= 0 else axis + len(shape)\n if len(shape) >= 3:\n # Permute shape to (N,C,...) format and compute correct axis value\n order = [0, len(concat_shape) - 1] + list(range(1, len(concat_shape) - 1))\n concat_shape = [concat_shape[i] for i in order]\n concat_ax = order.index(concat_ax)\n\n ref_net = None\n\n return tf_net, ref_net\n\n # TODO: create tests for concat with 1 input and multiple inputs\n\n test_data_1D = [dict(shape=[1], axis=0),\n dict(shape=[1], axis=-1)]\n\n @pytest.mark.parametrize(\"params\", test_data_1D)\n @pytest.mark.nightly\n def test_concat_1D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_concat_net(**params, ir_version=ir_version),\n ie_device, precision, ir_version, temp_dir=temp_dir)\n\n test_data_2D = [dict(shape=[1, 224], axis=0),\n dict(shape=[1, 224], axis=-1)]\n\n @pytest.mark.parametrize(\"params\", test_data_2D)\n @pytest.mark.nightly\n def test_concat_2D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_concat_net(**params, ir_version=ir_version),\n ie_device, precision, ir_version, temp_dir=temp_dir)\n\n test_data_3D = [pytest.param(dict(shape=[1, 3, 224], axis=0), marks=pytest.mark.xfail(reason=\"*-19053\")),\n pytest.param(dict(shape=[1, 3, 224], axis=-1), marks=pytest.mark.xfail(reason=\"*-19053\")),\n pytest.param(dict(shape=[1, 3, 224], axis=2), marks=pytest.mark.xfail(reason=\"*-19053\"))]\n\n @pytest.mark.parametrize(\"params\", test_data_3D)\n @pytest.mark.nightly\n def test_concat_3D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_concat_net(**params, ir_version=ir_version),\n ie_device, precision, ir_version, temp_dir=temp_dir)\n\n test_data_4D = [dict(shape=[1, 3, 100, 224], axis=0),\n dict(shape=[1, 3, 100, 224], axis=-1),\n dict(shape=[1, 3, 100, 224], axis=2)]\n\n @pytest.mark.parametrize(\"params\", test_data_4D)\n @pytest.mark.nightly\n @pytest.mark.precommit\n def test_concat_4D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_concat_net(**params, ir_version=ir_version),\n ie_device, precision, ir_version, temp_dir=temp_dir)\n\n test_data_5D = [dict(shape=[1, 3, 50, 100, 224], axis=0),\n dict(shape=[1, 3, 50, 100, 224], axis=-1),\n dict(shape=[1, 3, 50, 100, 224], axis=2)]\n\n @pytest.mark.parametrize(\"params\", test_data_5D)\n @pytest.mark.nightly\n def test_concat_5D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_concat_net(**params, ir_version=ir_version),\n ie_device, precision, ir_version, temp_dir=temp_dir)\n","sub_path":"tests/layer_tests/tensorflow_tests/test_tf_Concat.py","file_name":"test_tf_Concat.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"314653608","text":"import pandas as pd\n\ndf = pd.read_csv(\"../dataset/2017/2017_01_light.csv\")\nfor i in range(2, 7):\n print(\"Load Month:\", i)\n df2 = pd.read_csv(\"../dataset/2017/2017_{:02d}_light.csv\".format(i))\n df = df.append(df2)\n\ndf.dropna(axis=1, how=\"all\", inplace=True)\ndf.drop([\"MONTH\", \"DAY_OF_MONTH\", \"DAY_OF_WEEK\", \"UNIQUE_CARRIER\", \"ORIGIN_AIRPORT_ID\", \"DEST_AIRPORT_ID\", \"ARR_TIME\", \"ARR_DELAY\", \"CANCELLED\", \"DIVERTED\", \"DISTANCE\", \"WEATHER_DELAY\", \"NAS_DELAY\", \"SECURITY_DELAY\", \"LATE_AIRCRAFT_DELAY\"], axis=1, inplace=True)\n\ndf.to_csv(\"../dataset/arima/arima_test_dataset.csv\", index=False)","sub_path":"P4/models/11 - prepare_test_dataset_2017.py","file_name":"11 - prepare_test_dataset_2017.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"497193679","text":"import socket\nimport threading\nimport time\n\n# Lists to store connection.\nall_connection = []\nall_address = []\n\n# To make the code look better.\nprint(\"\\nWelcome to the chat-room server.\")\nprint(\"\\nInitialization ...\")\ntime.sleep(1)\n\n\n# Create socket to connect to other computers\ndef create_socket():\n try:\n global host\n global port\n global s\n\n host = socket.gethostname()\n port = 8080\n\n print(f'\\nYour host/ip name is: {host}')\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n except socket.error as msg:\n print(\"Socket creation error: \" + str(msg))\n\n\n# Binding socket server/Listen to the port\ndef bind_socket():\n try:\n global host\n global port\n global s\n\n print(\"\\nBinding the port \" + str(port))\n\n s.bind((host, port))\n s.listen(5)\n print(\"\\nWaiting for any incoming connections ... \")\n\n except socket.error as msg:\n print(\"Socket binding error\" + str(msg) + \"\\n\" + \"Retrying ... \")\n bind_socket()\n\n\n# Function that send messages to all clients.\ndef sending_message(conn, address):\n\n global data\n\n # Storing many connection with a for loop\n while True:\n\n data = conn.recv(1024)\n\n for connection in all_connection:\n connection.send(data)\n\n if not data:\n print(str(address[0]) + ':' + str(address[1]), \"Disconnected\")\n all_connection.remove(conn)\n conn.close()\n break\n\n\n# Function that threads many connection.\ndef accepting_connection():\n\n global s_name\n\n while True:\n try:\n conn, address = s.accept()\n\n # Showing clients name.\n s_name = conn.recv(1024)\n s_name = s_name.decode()\n\n # Threading by calling the sending_message function.\n conn_thread = threading.Thread(target=sending_message, args=(conn, address))\n conn_thread.daemon = True\n conn_thread.start()\n\n all_connection.append(conn)\n all_address.append(address)\n\n print(\"========================================================\")\n print(\"\\nConnection received!\")\n print(f\"\\n-{s_name},has successfully connected to the chat-room\")\n print(str(address[0]) + ':' + str(address[1]), \"Connected\")\n\n except socket.error as msg:\n print(\"Error accepting connections\" + str(msg))\n\n\ndef main():\n\n create_socket()\n bind_socket()\n accepting_connection()\n\n\nmain()\n\n","sub_path":"Chat_Room/Chat_Room_Server.py","file_name":"Chat_Room_Server.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"611025587","text":"from torch.utils.data import Dataset\nimport numpy as np\nimport pickle as pkl\nimport torch\nimport h5py\n\n\nL = 5000 # context length flanking a position\n\n\nclass MatchingDataset(Dataset):\n \"\"\"\n When using dataloader, the batch size must be an even number.\n \"\"\"\n def __init__(self, dataset='canonical_dataset.txt'):\n self.current_ch = None\n self.cached_ch = None\n self.data = []\n with open(dataset, 'r') as f:\n s = f.read().strip().split('\\n')\n for gene in s: # all the positions are 1-based,\n # jn_start is the end position of exon and jn_end is the start position of exon.\n name, _, ch, direction, _, _, jn_start, jn_end = gene.split('\\t')\n jn_start, jn_end = map(lambda x: np.fromstring(x, sep=',', dtype='i')-1, (jn_start, jn_end))\n for i in range(len(jn_start)):\n if jn_start[i] < L:\n continue\n self.data.append((ch, name, direction, jn_start[i]-1, i))\n self.data.append((ch, name, direction, jn_end[i]-1, i))\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n ch, name, direction, pos, segment_idx = self.data[idx]\n if self.current_ch != ch:\n self.current_ch = ch\n self.cached_ch = pkl.load(open(self.current_ch+'.pkl', 'rb'))\n seq = self.cached_ch[pos-L: pos+L+1]\n if direction == '-':\n seq = (5 - seq[::-1]) % 5\n idx = seq != 0\n segment = np.zeros((4, 2*L + 1), dtype='f')\n segment[seq[idx]-1, idx] = 1\n return torch.from_numpy(segment)\n\n\nclass SpliceDataset(Dataset):\n def __init__(self, h5_file='canonical_dataset.h5', dset='train'):\n with h5py.File(h5_file, 'r') as f:\n self.data = f['{}_data'.format(dset)][:]\n self.idx_mapping = f['idx_mapping_{}'.format(dset)][:]\n\n def __len__(self):\n return len(self.idx_mapping)\n\n def __getitem__(self, idx):\n SL = 5000\n CL_max = 10000\n start = self.idx_mapping[idx]\n data = self.data[start: start+SL+CL_max, :]\n segment = np.zeros((4, SL+CL_max), dtype='f')\n idx = data[:, 0] != 0\n segment[data[idx, 0]-1, idx] = 1\n return torch.from_numpy(segment)\n\n\n\n\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"494836329","text":"\ndef main():\n return\n\ndef get_geneid_symbol_mapping(file_name):\n \"\"\"\n wget ftp://ftp.ncbi.nih.gov/gene/DATA/GENE_INFO/Mammalia/Homo_sapiens.gene_info.gz\n zcat Homo_sapiens.gene_info.gz | cut -f 2,3 > geneid_to_symbol.txt\n To parse Homo_sapiens.gene_info (trimmed to two colums) file from NCBI \n \"\"\"\n f = open(file_name)\n f.readline()\n geneid_to_names = {}\n name_to_geneid = {}\n for line in f:\n geneid, symbol = line.split(\"\\t\")\n geneid = geneid.strip()\n symbol = symbol.strip()\n if geneid == \"\" or symbol == \"\":\n continue\n geneid_to_names.setdefault(geneid, set()).add(symbol) \n if symbol in name_to_geneid: \n if int(geneid) >= int(name_to_geneid[symbol]):\n continue\n print (name_to_geneid[symbol], geneid, symbol)\n name_to_geneid[symbol] = geneid\n f.close()\n return geneid_to_names, name_to_geneid\n\n\ndef get_unigene_to_geneids(file_name, prefix = \"Hs.\"):\n \"\"\"\n To parse gene2unigene file from NCBI\n \"\"\"\n f = open(file_name)\n unigene_to_geneids = {}\n f.readline()\n for line in f:\n geneid, unigene = line.strip().split(\"\\t\")\n if not unigene.startswith(prefix):\n continue\n unigene_to_geneids.setdefault(unigene, set()).add(geneid)\n #for unigene, geneids in unigene_to_geneids.iteritems():\n # if len(geneids) > 1:\n # print unigene, geneids\n return unigene_to_geneids\n\n\ndef get_geneid_to_pubmeds(file_name, tax_id = \"9606\"):\n \"\"\"\n To parse gene2pubmed file from NCBI \n \"\"\"\n f = open(file_name)\n geneid_to_pubmeds = {}\n f.readline()\n for line in f:\n tax, geneid, pubmed_id = line.strip().split(\"\\t\")\n if tax != tax_id:\n continue\n geneid_to_pubmeds.setdefault(geneid, set()).add(pubmed_id)\n return geneid_to_pubmeds\n\n\n \n","sub_path":"parse_ncbi.py","file_name":"parse_ncbi.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"108672618","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n if len(preorder) == 0:\n return None\n\n # build root note\n root = TreeNode(preorder[0])\n middle = inorder.index(preorder[0])\n # call recursively\n root.left = self.buildTree(preorder[1:middle+1], inorder[:middle])\n root.right = self.buildTree(preorder[middle+1:], inorder[middle+1:])\n return root\n\n\n","sub_path":"Sec8_Tree/q0105.py","file_name":"q0105.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"328547840","text":"# Clock\nimport time\nwhile True: \n print(\"Ready to Study? (Yes or No)\")\n study = input()\n if study == \"Yes\":\n print(\"How long would you like to set the pomodoro?\")\n work_time = float(input())*60\n print(\"ready?\")\n input()\n start_time = int(time.time())\n while(time.time() - start_time < work_time):\n time.sleep(.5)\n print((int(time.time())-start_time)%60)\n time.sleep(.5)\n print(int(time.time())-start_time)\n print(\"Break time!\")\n\n\n\n\n\n\n#I want this clock to work for me as a study tool\n# Every time I run the program I want it to ask me how long to set the pomodoro\n\n","sub_path":"Pyfolder/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"274176909","text":"def main():\n argument_spec = eseries_host_argument_spec()\n argument_spec.update(dict(state=dict(required=True, choices=['present', 'absent']), target=dict(required=False, default=None), target_type=dict(required=False, choices=['host', 'group']), lun=dict(required=False, type='int'), volume_name=dict(required=True)))\n module = AnsibleModule(argument_spec=argument_spec)\n state = module.params['state']\n target = module.params['target']\n target_type = module.params['target_type']\n lun = module.params['lun']\n ssid = module.params['ssid']\n validate_certs = module.params['validate_certs']\n vol_name = module.params['volume_name']\n user = module.params['api_username']\n pwd = module.params['api_password']\n api_url = module.params['api_url']\n if (not api_url.endswith('/')):\n api_url += '/'\n volume_map = get_volumes(module, ssid, api_url, user, pwd, 'volumes', validate_certs)\n thin_volume_map = get_volumes(module, ssid, api_url, user, pwd, 'thin-volumes', validate_certs)\n volref = None\n for vol in volume_map:\n if (vol['label'] == vol_name):\n volref = vol['volumeRef']\n if (not volref):\n for vol in thin_volume_map:\n if (vol['label'] == vol_name):\n volref = vol['volumeRef']\n if (not volref):\n module.fail_json(changed=False, msg=('No volume with the name %s was found' % vol_name))\n host_and_group_mapping = get_host_and_group_map(module, ssid, api_url, user, pwd, validate_certs)\n desired_lun_mapping = dict(mapRef=host_and_group_mapping[target_type][target], lun=lun, volumeRef=volref)\n lun_mappings = get_lun_mappings(ssid, api_url, user, pwd, validate_certs)\n if (state == 'present'):\n if (desired_lun_mapping in lun_mappings):\n module.exit_json(changed=False, msg='Mapping exists')\n else:\n result = create_mapping(module, ssid, desired_lun_mapping, vol_name, api_url, user, pwd, validate_certs)\n module.exit_json(changed=True, **result)\n elif (state == 'absent'):\n if (desired_lun_mapping in lun_mappings):\n result = remove_mapping(module, ssid, desired_lun_mapping, api_url, user, pwd, validate_certs)\n module.exit_json(changed=True, msg='Mapping removed')\n else:\n module.exit_json(changed=False, msg='Mapping absent')","sub_path":"Data Set/bug-fixing-5/8515db85889b0b88c3be5fdb36c91de4782aa357-

-fix.py","file_name":"8515db85889b0b88c3be5fdb36c91de4782aa357-
-fix.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"318743650","text":"# compare 2 elements in pair to each other and move forward and sort, at the end, last element is sorted\r\n# repeat on all other elements\r\n\r\n# if data is already ok\r\n# for space concern\r\n# easy implementation\r\n\r\n# poor time\r\n\r\n# learn more about direct swapping in python\r\n\r\n# O(n^2), O(1)\r\ndef bubblesort(list):\r\n for i in range(len(list) - 1):\r\n for j in range(len(list)-i-1):\r\n if list[j] > list[j+1]:\r\n list[j], list[j+1] = list[j+1], list[j]\r\n\r\nl = [5,4,3,2,1,0,-1,9,8,7,6,33,100,-5]\r\nbubblesort(l)\r\nprint(l)\r\n","sub_path":"zzz_dsa/python_dsa_1/068_bubble_sort.py","file_name":"068_bubble_sort.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"24260107","text":"#!/usr/bin/env python3\n\ndef read_schedule(machines,production_machines,pmachinesperstage,jobs,startingtime,roundtime,timebetweenmachines,schedule,duration):\n # int int int int matrix int int matrix matrix\n \"\"\"\n imports\n \"\"\"\n import numpy as np\n import time\n \"\"\"\n Definitions of opening variables\n \"\"\"\n stopwatch = -1\n Finish_schedule = np.zeros(production_machines)\n inqueue = [[] for i in range(0,production_machines)]\n outqueue = [[] for i in range(0,production_machines)]\n machine_load = np.zeros(machines)\n \"\"\"\n production_finish = [np.zeros[pmachinesperstage[i]] for i in range(production_machines)]\n production_load = [np.zeros[pmachinesperstage[i]] for i in range(production_machines)]\n \"\"\"\n production_finish = np.zeros((production_machines, pmachinesperstage))\n production_load = np.zeros((production_machines, pmachinesperstage))\n depot_out = jobs\n depot_in = 0\n depot_outqueue = []\n startmachine = np.zeros(machines)\n list_of_jobs = []\n\n for i in range(0,jobs):\n depot_outqueue.append(i+1)\n\n for m in range(0,machines):\n for l in range(0,roundtime):\n startmachine[m] += startingtime[m][l]*l\n\n for i in range(0,jobs):\n list_of_jobs.append(i+1)\n schedule.append(list_of_jobs)\n\n while depot_in < jobs:\n stopwatch+=1\n for m in range(0,machines):\n position = ((stopwatch-startmachine[m])%roundtime)/timebetweenmachines\n if position == 0:\n if machine_load[m] > 0:\n depot_in += 1\n machine_load[m] = 0\n if machine_load[m] == 0:\n if depot_out > 0:\n take_job = 0\n tries = 0\n while take_job == 0 and tries < jobs:\n if schedule[0][tries] in depot_outqueue:\n take_job = schedule[0][tries]\n machine_load[m]=take_job\n depot_outqueue.remove(take_job)\n depot_out-=1\n break\n else:\n tries += 1\n elif position%1 == 0:\n position = int(position)\n if machine_load[m] > 0:\n inqueue[position-1].append(machine_load[m])\n machine_load[m] = 0\n if machine_load[m] == 0:\n if len(outqueue[position-1]) > 0:\n take_job = 0\n tries = 0\n while take_job == 0 and tries < jobs:\n if schedule[position][tries] in outqueue[position-1]:\n take_job = schedule[position][tries]\n machine_load[m]=take_job\n outqueue[position-1].remove(take_job)\n break\n else:\n tries += 1\n for p in range(0,production_machines):\n for s in range(0,pmachinesperstage):\n #for s in range(0,pmachinesperstage[p])\n if production_load[p][s] > 0:\n if production_finish[p][s] <= stopwatch:\n outqueue[p].append(production_load[p][s])\n production_load[p][s] = 0\n Finish_schedule[p] += 1\n if production_load[p][s] == 0:\n if len(inqueue[p]) > 0:\n next_job = (schedule[p][int(Finish_schedule[p])])\n if next_job in inqueue[p]:\n production_load[p][s] = next_job\n production_finish[p][s] = stopwatch+duration[p][next_job-1]\n inqueue[p].remove(next_job)\n \"\"\"\n Final display and return\n \"\"\"\n schedule.pop(production_machines)\n return stopwatch\n\ndef main():\n read_schedule(1,3,1,3,[[1,0,0,0,0,0,0,0]],8,2,[[1,2,3],[1,2,3],[1,3,2]],[[10,12,11],[5,6,7],[1,2,3]])\n read_schedule(1,3,1,3,[[0,0,1,0,0,0,0,0]],8,2,[[1,2,3],[1,2,3],[1,3,2]],[[10,12,11],[5,6,7],[1,2,3]])\n read_schedule(1,3,1,3,[[0,1,0,0,0,0,0,0]],8,2,[[1,2,3],[1,2,3],[1,3,2]],[[10,12,11],[5,6,7],[1,2,3]])\n read_schedule(1,3,1,3,[[1,0,0,0,0,0,0,0]],8,2,[[1,2,3],[1,2,3],[1,3,2]],[[10,12,11],[5,6,7],[1,2,3]])\n read_schedule(1,3,1,3,[[0,0,0,1,0,0,0,0]],8,2,[[1,2,3],[1,2,3],[1,3,2]],[[10,12,11],[5,6,7],[1,2,3]])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"readschedule_old.py","file_name":"readschedule_old.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"442228317","text":"\"\"\"Jetcake database rebuild\n\nRevision ID: 681329152a1a\nRevises: \nCreate Date: 2020-03-14 19:04:37.671581\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '681329152a1a'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('users',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('email', sa.String(length=128), nullable=False),\n sa.Column('name', sa.String(length=128), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('name')\n )\n op.create_table('bookmarks',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('buri', sa.String(length=128), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('questions',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('question', sa.String(length=256), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('question')\n )\n op.create_table('answers',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('answer', sa.Text(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('question_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['question_id'], ['questions.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('answers')\n op.drop_table('questions')\n op.drop_table('bookmarks')\n op.drop_table('users')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/681329152a1a_jetcake_database_rebuild.py","file_name":"681329152a1a_jetcake_database_rebuild.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"265585509","text":"# Gist example of IB wrapper ...\n#\n# Download API from http://interactivebrokers.github.io/#\n#\n# Install python API code /IBJts/source/pythonclient $ python3 setup.py install\n#\n# Note: The test cases, and the documentation refer to a python package called IBApi,\n# but the actual package is called ibapi. Go figure.\n#\n# Get the latest version of the gateway:\n# https://www.interactivebrokers.com/en/?f=%2Fen%2Fcontrol%2Fsystemstandalone-ibGateway.php%3Fos%3Dunix\n# (for unix: windows and mac users please find your own version)\n#\n# Run the gateway\n#\n# user: edemo\n# pwd: demo123\n#\n# Now I'll try and replicate the historical data example\n\nfrom ibapi.wrapper import EWrapper\nfrom ibapi.client import EClient\nfrom ibapi.execution import ExecutionFilter\nfrom ibapi.contract import Contract as IBcontract\nfrom ibapi.order import Order\nfrom ibapi.contract import ComboLeg as IBcomboLeg\n\nfrom pandas import DataFrame\nimport sys\nfrom threading import Thread\nimport queue\nimport time\nimport datetime\nfrom copy import deepcopy\nimport logging\n\n## these are just arbitrary numbers in leiu of a policy on this sort of thing\nDEFAULT_MARKET_DATA_ID=50\nDEFAULT_GET_CONTRACT_ID=43\nDEFAULT_EXEC_TICKER=78\n\n## marker for when queue is finished\nFINISHED = object()\nSTARTED = object()\nTIME_OUT = object()\n\n## This is the reqId IB API sends when a fill is received\nFILL_CODE=-1\n\n\"\"\"\nNext section is 'scaffolding'\n\n\"\"\"\n\nclass finishableQueue(object):\n \"\"\"\n Creates a queue which will finish at some point\n \"\"\"\n\n def __init__(self, queue_to_finish):\n\n self._queue = queue_to_finish\n self.status = STARTED\n\n def get(self, timeout):\n \"\"\"\n Returns a list of queue elements once timeout is finished, or a FINISHED flag is received in the queue\n\n :param timeout: how long to wait before giving up\n :return: list of queue elements\n \"\"\"\n contents_of_queue=[]\n finished=False\n\n while not finished:\n try:\n current_element = self._queue.get(timeout=timeout)\n if current_element is FINISHED:\n finished = True\n self.status = FINISHED\n else:\n contents_of_queue.append(current_element)\n ## keep going and try and get more data\n\n except queue.Empty:\n ## If we hit a time out it's most probable we're not getting a finished element any time soon\n ## give up and return what we have\n finished = True\n self.status = TIME_OUT\n\n\n return contents_of_queue\n\n def timed_out(self):\n return self.status is TIME_OUT\n\n\"\"\"\nMergable objects are used to capture order and execution information which comes from different sources and needs\n glueing together\n\"\"\"\n\n## marker to show a mergable object hasn't got any attributes\nNO_ATTRIBUTES_SET=object()\n\nclass mergableObject(object):\n \"\"\"\n Generic object to make it easier to munge together incomplete information about orders and executions\n \"\"\"\n\n def __init__(self, id, **kwargs):\n \"\"\"\n\n :param id: master reference, has to be an immutable type\n :param kwargs: other attributes which will appear in list returned by attributes() method\n \"\"\"\n\n self.id=id\n attr_to_use=self.attributes()\n\n for argname in kwargs:\n if argname in attr_to_use:\n setattr(self, argname, kwargs[argname])\n else:\n print(\"Ignoring argument passed %s: is this the right kind of object? If so, add to .attributes() method\" % argname)\n\n def attributes(self):\n ## should return a list of str here\n ## eg return [\"thingone\", \"thingtwo\"]\n return NO_ATTRIBUTES_SET\n\n def _name(self):\n return \"Generic Mergable object - \"\n\n def __repr__(self):\n\n attr_list = self.attributes()\n if attr_list is NO_ATTRIBUTES_SET:\n return self._name()\n\n return self._name()+\" \".join([ \"%s: %s\" % (attrname, str(getattr(self, attrname))) for attrname in attr_list\n if getattr(self, attrname, None) is not None])\n\n def merge(self, details_to_merge, overwrite=True):\n \"\"\"\n Merge two things\n\n self.id must match\n\n :param details_to_merge: thing to merge into current one\n :param overwrite: if True then overwrite current values, otherwise keep current values\n :return: merged thing\n \"\"\"\n\n if self.id!=details_to_merge.id:\n raise Exception(\"Can't merge details with different IDS %d and %d!\" % (self.id, details_to_merge.id))\n\n arg_list = self.attributes()\n if arg_list is NO_ATTRIBUTES_SET:\n ## self is a generic, empty, object.\n ## I can just replace it wholesale with the new object\n\n new_object = details_to_merge\n\n return new_object\n\n new_object = deepcopy(self)\n\n for argname in arg_list:\n my_arg_value = getattr(self, argname, None)\n new_arg_value = getattr(details_to_merge, argname, None)\n\n if new_arg_value is not None:\n ## have something to merge\n if my_arg_value is not None and not overwrite:\n ## conflict with current value, don't want to overwrite, skip\n pass\n else:\n setattr(new_object, argname, new_arg_value)\n\n return new_object\n\n\nclass orderInformation(mergableObject):\n \"\"\"\n Collect information about orders\n\n master ID will be the orderID\n\n eg you'd do order_details = orderInformation(orderID, contract=....)\n \"\"\"\n\n def _name(self):\n return \"Order - \"\n\n def attributes(self):\n return ['contract','order','orderstate','status',\n 'filled', 'remaining', 'avgFillPrice', 'permid',\n 'parentId', 'lastFillPrice', 'clientId', 'whyHeld']\n\n\nclass execInformation(mergableObject):\n \"\"\"\n Collect information about executions\n\n master ID will be the execid\n\n eg you'd do exec_info = execInformation(execid, contract= ... )\n \"\"\"\n\n def _name(self):\n return \"Execution - \"\n\n def attributes(self):\n return ['contract','ClientId','OrderId','time','AvgPrice','Price','AcctNumber',\n 'Shares','Commission', 'commission_currency', 'realisedpnl']\n\n\nclass list_of_mergables(list):\n \"\"\"\n A list of mergable objects, like execution details or order information\n \"\"\"\n\n\n def merged_dict(self):\n \"\"\"\n Merge and remove duplicates of a stack of mergable objects with unique ID\n\n Essentially creates the union of the objects in the stack\n\n :return: dict of mergableObjects, keynames .id\n \"\"\"\n\n ## We create a new stack of order details which will contain merged order or execution details\n new_stack_dict = {}\n\n for stack_member in self:\n id = stack_member.id\n\n if id not in new_stack_dict.keys():\n ## not in new stack yet, create a 'blank' object\n ## Note this will have no attributes, so will be replaced when merged with a proper object\n new_stack_dict[id] = mergableObject(id)\n\n existing_stack_member = new_stack_dict[id]\n\n ## add on the new information by merging\n ## if this was an empty 'blank' object it will just be replaced with stack_member\n new_stack_dict[id] = existing_stack_member.merge(stack_member)\n\n return new_stack_dict\n\n\n def blended_dict(self, stack_to_merge):\n \"\"\"\n Merges any objects in new_stack with the same ID as those in the original_stack\n\n :param self: list of mergableObject or inheritors thereof\n :param stack_to_merge: list of mergableObject or inheritors thereof\n\n :return: dict of mergableObjects, keynames .id\n \"\"\"\n\n ## We create a new dict stack of order details which will contain merged details\n\n new_stack = {}\n\n ## convert the thing we're merging into a dictionary\n stack_to_merge_dict = stack_to_merge.merged_dict()\n\n for stack_member in self:\n id = stack_member.id\n new_stack[id] = deepcopy(stack_member)\n\n if id in stack_to_merge_dict.keys():\n ## add on the new information by merging without overwriting\n new_stack[id] = stack_member.merge(stack_to_merge_dict[id], overwrite=False)\n\n return new_stack\n\n\n## Just to make the code more readable\n\nclass list_of_execInformation(list_of_mergables):\n pass\n\nclass list_of_orderInformation(list_of_mergables):\n pass\n\n\"\"\"\nNow into the main bit of the code; Wrapper and Client objects\n\"\"\"\n\nclass TestWrapper(EWrapper):\n \"\"\"\n The wrapper deals with the action coming back from the IB gateway or TWS instance\n\n We override methods in EWrapper that will get called when this action happens, like currentTime\n\n Extra methods are added as we need to store the results in this object\n \"\"\"\n\n def __init__(self):\n self._my_contract_details = {}\n self._my_requested_execution = {}\n self._my_price_details = {}\n self._my_option_chain = {}\n self._my_option_data = {}\n\n ## We set these up as we could get things coming along before we run an init\n self._my_executions_stream = queue.Queue()\n self._my_commission_stream = queue.Queue()\n self._my_open_orders = queue.Queue()\n self._my_positions = queue.Queue()\n self._my_errors = queue.Queue()\n\n ## error handling code\n def init_error(self):\n error_queue=queue.Queue()\n self._my_errors = error_queue\n\n def get_error(self, timeout=5):\n if self.is_error():\n try:\n return self._my_errors.get(timeout=timeout)\n except queue.Empty:\n return None\n\n return None\n\n def is_error(self):\n an_error_if=not self._my_errors.empty()\n return an_error_if\n\n def error(self, id, errorCode, errorString):\n ## Overriden method\n errormsg = \"IB error id %d errorcode %d string %s\" % (id, errorCode, errorString)\n self._my_errors.put(errormsg)\n\n def position(self, account:str, contract:IBcontract, position:float,\n avgCost:float):\n \"\"\"This event returns real-time positions for all accounts in\n response to the reqPositions() method.\"\"\"\n\n self._my_positions.put(vars())\n\n ## get price data\n def init_pricedetails(self, reqId):\n price_details_queue = self._my_price_details[reqId] = queue.Queue()\n\n return price_details_queue\n\n def tickPrice(self, reqId:int , tickType:int, price:float,\n attrib:str):\n \"\"\"Market data tick price callback. Handles all price related ticks.\"\"\"\n\n if reqId not in self._my_price_details.keys():\n self.init_pricedetails(reqId)\n self._my_price_details[reqId].put(vars())\n #self.logAnswer(vars())\n\n ## get option chain\n def init_optionchain(self, reqId):\n price_details_queue = self._my_option_chain[reqId] = queue.Queue()\n\n return price_details_queue\n\n def securityDefinitionOptionParameter(self, reqId:int, exchange:str,\n underlyingConId:int, tradingClass:str, multiplier:str,\n expirations, strikes):\n \"\"\"Market data tick price callback. Handles all price related ticks.\"\"\"\n\n if reqId not in self._my_option_chain.keys():\n self.init_optionchain(reqId)\n self._my_option_chain[reqId].put(vars())\n #self.logAnswer(vars())\n\n ## get option data\n def init_option_data(self, reqId):\n option_data_queue = self._my_option_data[reqId] = queue.Queue()\n\n return option_data_queue\n\n def tickOptionComputation(self, reqId:int, tickType:int ,\n impliedVol:float, delta:float, optPrice:float, pvDividend:float,\n gamma:float, vega:float, theta:float, undPrice:float):\n \"\"\"This function is called when the market in an option or its\n underlier moves. TWS's option model volatilities, prices, and\n deltas, along with the present value of dividends expected on that\n options underlier are received.\"\"\"\n\n if reqId not in self._my_option_data.keys():\n self.init_option_data(reqId)\n self._my_option_data[reqId].put(vars())\n #print(reqId,vars())\n #self.logAnswer(vars())\n\n ## get contract details code\n def init_contractdetails(self, reqId):\n contract_details_queue = self._my_contract_details[reqId] = queue.Queue()\n\n return contract_details_queue\n\n def contractDetails(self, reqId, contractDetails):\n ## overridden method\n super().contractDetails(reqId, contractDetails)\n\n if reqId not in self._my_contract_details.keys():\n self.init_contractdetails(reqId)\n\n self._my_contract_details[reqId].put(contractDetails)\n\n def contractDetailsEnd(self, reqId):\n ## overriden method\n if reqId not in self._my_contract_details.keys():\n self.init_contractdetails(reqId)\n\n self._my_contract_details[reqId].put(FINISHED)\n\n # orders\n def init_open_orders(self):\n open_orders_queue = self._my_open_orders = queue.Queue()\n\n return open_orders_queue\n\n\n def orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permid,\n parentId, lastFillPrice, clientId, whyHeld):\n\n order_details = orderInformation(orderId, status=status, filled=filled,\n avgFillPrice=avgFillPrice, permid=permid,\n parentId=parentId, lastFillPrice=lastFillPrice, clientId=clientId,\n whyHeld=whyHeld)\n\n self._my_open_orders.put(order_details)\n\n\n def openOrder(self, orderId, contract, order, orderstate):\n \"\"\"\n Tells us about any orders we are working now\n\n overriden method\n \"\"\"\n\n order_details = orderInformation(orderId, contract=contract, order=order, orderstate = orderstate)\n self._my_open_orders.put(order_details)\n\n\n def openOrderEnd(self):\n \"\"\"\n Finished getting open orders\n\n Overriden method\n \"\"\"\n\n self._my_open_orders.put(FINISHED)\n\n\n \"\"\" Executions and commissions\n\n requested executions get dropped into single queue: self._my_requested_execution[reqId]\n Those that arrive as orders are completed without a relevant reqId go into self._my_executions_stream\n All commissions go into self._my_commission_stream (could be requested or not)\n\n The *_stream queues are permanent, and init when the TestWrapper instance is created\n\n \"\"\"\n\n\n def init_requested_execution_data(self, reqId):\n execution_queue = self._my_requested_execution[reqId] = queue.Queue()\n\n return execution_queue\n\n def access_commission_stream(self):\n ## Access to the 'permanent' queue for commissions\n\n return self._my_commission_stream\n\n def access_executions_stream(self):\n ## Access to the 'permanent' queue for executions\n\n return self._my_executions_stream\n\n\n def commissionReport(self, commreport):\n \"\"\"\n This is called if\n\n a) we have submitted an order and a fill has come back\n b) We have asked for recent fills to be given to us\n\n However no reqid is ever passed\n\n overriden method\n\n :param commreport:\n :return:\n \"\"\"\n\n commdata = execInformation(commreport.execId, Commission=commreport.commission,\n commission_currency = commreport.currency,\n realisedpnl = commreport.realizedPNL)\n\n\n ## there are some other things in commreport you could add\n ## make sure you add them to the .attributes() field of the execInformation class\n\n ## These always go into the 'stream' as could be from a request, or a fill thats just happened\n self._my_commission_stream.put(commdata)\n\n\n def execDetails(self, reqId, contract, execution):\n \"\"\"\n This is called if\n\n a) we have submitted an order and a fill has come back (in which case reqId will be FILL_CODE)\n b) We have asked for recent fills to be given to us (reqId will be\n\n See API docs for more details\n \"\"\"\n ## overriden method\n\n execdata = execInformation(execution.execId, contract=contract,\n ClientId=execution.clientId, OrderId=execution.orderId,\n time=execution.time, AvgPrice=execution.avgPrice,\n AcctNumber=execution.acctNumber, Shares=execution.shares,\n Price = execution.price)\n\n ## there are some other things in execution you could add\n ## make sure you add them to the .attributes() field of the execInformation class\n\n reqId = int(reqId)\n\n ## We eithier put this into a stream if its just happened, or store it for a specific request\n if reqId==FILL_CODE:\n self._my_executions_stream.put(execdata)\n else:\n self._my_requested_execution[reqId].put(execdata)\n\n\n\n def execDetailsEnd(self, reqId):\n \"\"\"\n No more orders to look at if execution details requested\n \"\"\"\n self._my_requested_execution[reqId].put(FINISHED)\n\n\n ## order ids\n def init_nextvalidid(self):\n\n orderid_queue = self._my_orderid_data = queue.Queue()\n\n return orderid_queue\n\n def nextValidId(self, orderId):\n \"\"\"\n Give the next valid order id\n\n Note this doesn't 'burn' the ID; if you call again without executing the next ID will be the same\n\n If you're executing through multiple clients you are probably better off having an explicit counter\n\n \"\"\"\n if getattr(self, '_my_orderid_data', None) is None:\n ## getting an ID which we haven't asked for\n ## this happens, IB server just sends this along occassionally\n self.init_nextvalidid()\n\n self._my_orderid_data.put(orderId)\n\n\n\nclass TestClient(EClient):\n \"\"\"\n The client method\n\n We don't override native methods, but instead call them from our own wrappers\n \"\"\"\n def __init__(self, wrapper):\n ## Set up with a wrapper inside\n EClient.__init__(self, wrapper)\n\n self._market_data_q_dict = {}\n self._commissions=list_of_execInformation()\n\n def resolve_ib_contract(self, ibcontract, reqId=DEFAULT_GET_CONTRACT_ID):\n\n \"\"\"\n From a partially formed contract, returns a fully fledged version\n\n :returns fully resolved IB contract\n \"\"\"\n\n ## Make a place to store the data we're going to return\n contract_details_queue = finishableQueue(self.init_contractdetails(reqId))\n\n print(\"Getting full contract details from the server... \")\n\n self.reqContractDetails(reqId, ibcontract)\n\n ## Run until we get a valid contract(s) or get bored waiting\n MAX_WAIT_SECONDS = 10\n new_contract_details = contract_details_queue.get(timeout = MAX_WAIT_SECONDS)\n\n while self.wrapper.is_error():\n print(self.get_error())\n\n if contract_details_queue.timed_out():\n print(\"Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour\")\n\n if len(new_contract_details)==0:\n print(\"Failed to get additional contract details: returning unresolved contract\")\n return ibcontract\n\n if len(new_contract_details)>1:\n print(\"got multiple contracts using first one\")\n\n new_contract_details=new_contract_details[0]\n\n resolved_ibcontract=new_contract_details.summary\n\n return resolved_ibcontract\n\n\n def get_next_brokerorderid(self):\n \"\"\"\n Get next broker order id\n\n :return: broker order id, int; or TIME_OUT if unavailable\n \"\"\"\n\n ## Make a place to store the data we're going to return\n orderid_q = self.init_nextvalidid()\n\n self.reqIds(-1) # -1 is irrelevant apparently (see IB API docs)\n\n ## Run until we get a valid contract(s) or get bored waiting\n MAX_WAIT_SECONDS = 10\n try:\n brokerorderid = orderid_q.get(timeout=MAX_WAIT_SECONDS)\n except queue.Empty:\n print(\"Wrapper timeout waiting for broker orderid\")\n brokerorderid = TIME_OUT\n\n while self.wrapper.is_error():\n print(self.get_error(timeout=MAX_WAIT_SECONDS))\n\n return brokerorderid\n\n\n def place_new_IB_order(self, ibcontract, order, orderid=None):\n \"\"\"\n Places an order\n\n Returns brokerorderid\n\n \"\"\"\n\n ## We can eithier supply our own ID or ask IB to give us the next valid one\n if orderid is None:\n print(\"Getting orderid from IB\")\n orderid = self.get_next_brokerorderid()\n\n if orderid is TIME_OUT:\n raise Exception(\"I couldn't get an orderid from IB, and you didn't provide an orderid\")\n\n print(\"Using order id of %d\" % orderid)\n\n ## Note: It's possible if you have multiple traidng instances for orderids to be submitted out of sequence\n ## in which case IB will break\n\n # Place the order\n self.placeOrder(\n orderid, # orderId,\n ibcontract, # contract,\n order # order\n )\n\n return orderid\n\n\n def any_open_orders(self):\n \"\"\"\n Simple wrapper to tell us if we have any open orders\n \"\"\"\n\n return len(self.get_open_orders()) > 0\n\n\n def get_open_orders(self):\n \"\"\"\n Returns a list of any open orders\n \"\"\"\n\n ## store the orders somewhere\n open_orders_queue = finishableQueue(self.init_open_orders())\n\n ## You may prefer to use reqOpenOrders() which only retrieves orders for this client\n self.reqAllOpenOrders()\n\n ## Run until we get a terimination or get bored waiting\n MAX_WAIT_SECONDS = 5\n open_orders_list = list_of_orderInformation(open_orders_queue.get(timeout = MAX_WAIT_SECONDS))\n\n while self.wrapper.is_error():\n print(self.get_error())\n\n if open_orders_queue.timed_out():\n print(\"Exceeded maximum wait for wrapper to confirm finished whilst getting orders\")\n\n ## open orders queue will be a jumble of order details, turn into a tidy dict with no duplicates\n open_orders_dict = open_orders_list.merged_dict()\n\n return open_orders_dict\n\n\n def get_executions_and_commissions(self, reqId=DEFAULT_EXEC_TICKER, execution_filter = ExecutionFilter()):\n \"\"\"\n Returns a list of all executions done today with commission data\n \"\"\"\n\n ## store somewhere\n execution_queue = finishableQueue(self.init_requested_execution_data(reqId))\n\n ## We can change ExecutionFilter to subset different orders\n ## note this will also pull in commissions but we would use get_executions_with_commissions\n self.reqExecutions(reqId, execution_filter)\n\n ## Run until we get a terimination or get bored waiting\n MAX_WAIT_SECONDS = 10\n exec_list = list_of_execInformation(execution_queue.get(timeout = MAX_WAIT_SECONDS))\n\n while self.wrapper.is_error():\n print(self.get_error())\n\n if execution_queue.timed_out():\n print(\"Exceeded maximum wait for wrapper to confirm finished whilst getting exec / commissions\")\n\n ## Commissions will arrive seperately. We get all of them, but will only use those relevant for us\n commissions = self._all_commissions()\n\n ## glue them together, create a dict, remove duplicates\n all_data = exec_list.blended_dict(commissions)\n\n return all_data\n\n\n def _recent_fills(self):\n \"\"\"\n Returns any fills since we last called recent_fills\n\n :return: list of executions as execInformation objects\n \"\"\"\n\n ## we don't set up a queue but access the permanent one\n fill_queue = self.access_executions_stream()\n\n list_of_fills=list_of_execInformation()\n\n while not fill_queue.empty():\n MAX_WAIT_SECONDS = 5\n try:\n next_fill = fill_queue.get(timeout=MAX_WAIT_SECONDS)\n list_of_fills.append(next_fill)\n except queue.Empty:\n ## corner case where Q emptied since we last checked if empty at top of while loop\n pass\n\n ## note this could include duplicates and is a list\n return list_of_fills\n\n\n def recent_fills_and_commissions(self):\n \"\"\"\n Return recent fills, with commissions added in\n\n :return: dict of execInformation objects, keys are execids\n \"\"\"\n\n recent_fills = self._recent_fills()\n commissions = self._all_commissions() ## we want all commissions\n\n ## glue them together, create a dict, remove duplicates\n all_data = recent_fills.blended_dict(commissions)\n\n return all_data\n\n\n def _recent_commissions(self):\n \"\"\"\n Returns any commissions that are in the queue since we last checked\n\n :return: list of commissions as execInformation objects\n \"\"\"\n\n ## we don't set up a queue, as there is a permanent one\n comm_queue = self.access_commission_stream()\n\n list_of_comm=list_of_execInformation()\n\n while not comm_queue.empty():\n MAX_WAIT_SECONDS = 5\n try:\n next_comm = comm_queue.get(timeout=MAX_WAIT_SECONDS)\n list_of_comm.append(next_comm)\n except queue.Empty:\n ## corner case where Q emptied since we last checked if empty at top of while loop\n pass\n\n ## note this could include duplicates and is a list\n return list_of_comm\n\n\n def _all_commissions(self):\n \"\"\"\n Returns all commissions since we created this instance\n\n :return: list of commissions as execInformation objects\n \"\"\"\n\n original_commissions = self._commissions\n latest_commissions = self._recent_commissions()\n\n all_commissions = list_of_execInformation(original_commissions + latest_commissions)\n\n self._commissions = all_commissions\n\n # note this could include duplicates and is a list\n return all_commissions\n\n\n def cancel_order(self, orderid):\n\n ## Has to be an order placed by this client. I don't check this here -\n ## If you have multiple IDs then you you need to check this yourself.\n\n self.cancelOrder(orderid)\n\n ## Wait until order is cancelled\n start_time=datetime.datetime.now()\n MAX_WAIT_TIME_SECONDS = 10\n\n finished = False\n\n while not finished:\n if orderid not in self.get_open_orders():\n ## finally cancelled\n finished = True\n\n if (datetime.datetime.now() - start_time).seconds > MAX_WAIT_TIME_SECONDS:\n print(\"Wrapper didn't come back with confirmation that order was cancelled!\")\n finished = True\n\n ## return nothing\n\n def cancel_all_orders(self):\n\n ## Cancels all orders, from all client ids.\n ## if you don't want to do this, then instead run .cancel_order over named IDs\n self.reqGlobalCancel()\n\n start_time=datetime.datetime.now()\n MAX_WAIT_TIME_SECONDS = 10\n\n finished = False\n\n while not finished:\n if not self.any_open_orders():\n ## all orders finally cancelled\n finished = True\n if (datetime.datetime.now() - start_time).seconds > MAX_WAIT_TIME_SECONDS:\n print(\"Wrapper didn't come back with confirmation that all orders were cancelled!\")\n finished = True\n\n ## return nothing\n\nclass TestApp(TestWrapper, TestClient):\n def __init__(self, ipaddress, portid, clientid):\n TestWrapper.__init__(self)\n TestClient.__init__(self, wrapper=self)\n\n self.connect(ipaddress, portid, clientid)\n #print(\"serverVersion:%s connectionTime:%s\" % (self.serverVersion(),self.twsConnectionTime()))\n\n thread = Thread(target = self.run)\n thread.start()\n\n setattr(self, \"_thread\", thread)\n\n self.init_error()\n\n def disconnect(self):\n \"\"\"Call this function to terminate the connections with TWS.\n Calling this function does not cancel orders that have already been\n sent.\"\"\"\n\n self.setConnState(EClient.DISCONNECTED)\n if self.conn is not None:\n logging.info(\"disconnecting\")\n try:\n self.conn.disconnect()\n self.wrapper.connectionClosed()\n self.reset()\n self._\n except:\n pass\n\n def sellLast(self, orderInfo):\n\n takeProfitLimitPrice = orderInfo.order.lmtPrice\n contract3 = orderInfo.contract\n self.reqMktData(1032, contract3, \"\", False, False, [])\n time.sleep(3)\n lastPrice = None\n try:\n for k in list(self._my_price_details[1032].queue):\n t = dict(k)\n if t['tickType'] == 4:\n lastPrice = t['price']\n if t['tickType'] == 9 and lastPrice == None:\n lastPrice = t['price']\n print(t)\n takeProfitLimitPrice = lastPrice\n takeProfit = orderInfo.order\n takeProfit.lmtPrice = takeProfitLimitPrice\n takeProfit.transmit = True\n self.place_new_IB_order(contract3, takeProfit, orderid=takeProfit.orderId)\n except:\n print(\"No pricing available for {} at this time.\".format(contract3.symbol))\n\n def quote(self, c):\n self.reqMktData(1032, c, \"\", False, False, [])\n time.sleep(5)\n lastPrice = None\n try:\n for k in list(self._my_price_details[1032].queue):\n t = dict(k)\n if t['tickType'] == 4:\n lastPrice = t['price']\n if t['tickType'] == 9 and lastPrice == None:\n lastPrice = t['price']\n print(t)\n except:\n print(\"Price is not currently available\")\n return None\n return lastPrice\n\n\n def buy6030(self, sym, direction = \"Bull\", exp = \"\", budget = 500):\n\n if direction == \"Bull\":\n right = \"Put\"\n else:\n right = \"Call\"\n\n if exp == \"\":\n d = datetime.date.today()\n d += datetime.timedelta(10)\n while d.weekday() != 4:\n d += datetime.timedelta(1)\n exp = d.strftime(\"%Y%m%d\")\n\n contract1 = IBcontract()\n contract1.secType = \"STK\"\n contract1.symbol = sym\n contract1.exchange = \"ISLAND\"\n\n contract2 = IBcontract()\n contract2.secType = \"OPT\"\n contract2.symbol = sym\n contract2.exchange = \"SMART\"\n contract2.lastTradeDateOrContractMonth = exp\n contract2.right = right\n contract2.multiplier = 100\n\n self.reqMktData(1032, contract1, \"\", False, False, [])\n contract1.exchange = \"SMART\"\n self.reqMktData(1033, contract1, \"\", False, False, [])\n d = self.reqContractDetails(1202, contract2)\n time.sleep(1)\n #print(d)\n\n print(\"=\"*40)\n print()\n print(\"{} Price Details:\".format(sym))\n lastPrice = None\n try:\n for k in list(self._my_price_details[1032].queue):\n t = dict(k)\n if t['tickType'] == 4:\n lastPrice = t['price']\n if t['tickType'] == 9 and lastPrice == None:\n lastPrice = t['price']\n print(t)\n except:\n try:\n for k in list(self._my_price_details[1033].queue):\n t = dict(k)\n if t['tickType'] == 4:\n lastPrice = t['price']\n if t['tickType'] == 9 and lastPrice == None:\n lastPrice = t['price']\n print(t)\n except:\n print(\"No stock prices available for {} at this time.\".format(sym))\n return\n\n if lastPrice == None:\n print(\"No stock prices available for {} at this time.\".format(sym))\n return\n\n # print()\n # print(\"{0} Last Price: ${1:4.2f}\".format(sym, lastPrice))\n # print()\n\n rID = 1100\n df = DataFrame()\n print(\"Contract Details:\")\n try:\n cDetails = self._my_contract_details[1202].queue\n except:\n print(\"Contract details for {} are not available at this time.\".format(sym))\n return\n for k in list(cDetails):\n t = list(str(k).split(','))\n # print(t)\n try:\n if lastPrice * 1.10 > float(t[4]) > lastPrice * 0.90:\n df[rID] = t\n contract3 = IBcontract()\n contract3.secType = \"OPT\"\n contract3.symbol = sym\n contract3.exchange = \"CBOE2\"\n contract3.lastTradeDateOrContractMonth = exp\n contract3.strike = float(t[4])\n contract3.right = right\n contract3.multiplier = 100\n self.reqMarketDataType(2)\n self.reqMktData(rID, contract3, \"\", False, False, [])\n rID = rID + 1\n except:\n pass\n if rID == 1100:\n print(\"No option prices available for {} at this time.\".format(sym))\n return\n\n df = df.transpose()\n # print(df)\n # print(\"Getting option details for {0:2d} strikes:\".format(len(df)))\n # print()\n\n time.sleep(1)\n\n df['undPrice'] = [\"\"] * len(df)\n df['optPrice'] = [\"\"] * len(df)\n df['delta'] = [\"\"] * len(df)\n df['strike'] = [\"\"] * len(df)\n df['delta60'] = [\"\"] * len(df)\n for s in df.index:\n #self.cancelMktData(s)\n try:\n for k in list(self._my_option_data[s].queue):\n t = dict(k)\n #print(s,t)\n if t['delta']:\n try:\n df.loc[s, 'conId'] = int(df.loc[s, 0])\n df.loc[s, 'strike'] = float(df.loc[s, 4])\n df.loc[s, 'undPrice'] = t['undPrice']\n df.loc[s, 'optPrice'] = t['optPrice']\n df.loc[s, 'delta'] = abs(t['delta'])\n df.loc[s, 'delta60'] = abs(abs(t['delta']) - 0.60)\n except:\n pass\n except:\n print(\"No option prices available for {} at this time.\".format(sym))\n return\n\n # print(df.loc[:,['conId',3,'strike','undPrice','delta','delta60']].sort_values(['strike']))\n # print()\n d60 = df.loc[df['delta60'] == df['delta60'].min()].index.min()\n # print(\"Sell a {} with the {:7.2f} strike\".format(right,df.strike[d60]))\n\n t30 = (df.delta[d60] - 0.3)\n p = df.loc[df.delta > t30].delta.min()\n d30plus = df.loc[df.delta == p].index.min()\n m = df.loc[df.delta < t30].delta.max()\n d30min = df.loc[df.delta == m].index.min()\n if abs(df.delta[d30plus] - t30) > abs(df.delta[d30min] - t30):\n d30 = d30min\n else:\n d30 = d30plus\n\n # Order variables\n #####\n cdelta = df.delta[d60] - df.delta[d30]\n lim = abs(df.strike[d60] - df.strike[d30]) * 0.35\n try:\n cOptPrice = df.optPrice[d60] - df.optPrice[d30]\n if abs(cOptPrice) < abs(lim*0.95):\n print(\"Spread Combo price for {} is too low.\".format(sym))\n return True\n quantity = int(budget / 100 / cOptPrice)\n if quantity == 0:\n print(\"Spread Combo for {} is above the budget of ${}\".format(sym,budget))\n return True\n except:\n quantity = 1\n takeProfitLimitPrice = lim * 0.\n stopLossPrice = lim * 1.50\n action = \"SELL\"\n #parentOrderId = 101\n\n # print(\"Buy a {} with the {:7.2f} strike \".format(right,df.strike[d30]))\n # print(\"Combo delta is {:5.3f}\".format(cdelta))\n # print(\"Combo limit price is ${:7.2f}\".format(lim))\n # print(\"Combo Expiry is {}\".format(exp))\n # print()\n print(\n \"{} - Price: ${:7.2f} - Sell a {} {:7.2f}/{:7.2f} {} Spread - Limit price: ${:5.2f} - Combo delta: {:5.3f}\".\n format(sym, lastPrice, exp, df.strike[d60], df.strike[d30], right, lim, cdelta))\n\n #\n # Send order for the Spread above\n ####\n\n contract3 = IBcontract()\n contract3.secType = \"BAG\"\n contract3.symbol = sym\n contract3.exchange = \"SMART\"\n contract3.currency = \"USD\"\n\n leg1 = IBcomboLeg()\n leg1.conId = int(df.conId[d60]) # Sell the delta 60 option\n leg1.ratio = 1\n leg1.action = \"SELL\" if action == \"BUY\" else \"BUY\"\n leg1.exchange = \"SMART\"\n\n leg2 = IBcomboLeg()\n leg2.conId = int(df.conId[d30]) # Buy the delta 30 option as protection\n leg2.ratio = 1\n leg2.action = \"BUY\" if action == \"BUY\" else \"SELL\"\n leg2.exchange = \"SMART\"\n\n contract3.comboLegs = []\n contract3.comboLegs.append(leg1)\n contract3.comboLegs.append(leg2)\n\n order3 = Order()\n order3.action = action\n order3.orderType = \"LMT\"\n order3.totalQuantity = quantity\n order3.lmtPrice = lim\n order3.tif = 'DAY'\n order3.transmit = False\n\n parentOrderId = self.place_new_IB_order(contract3, order3, orderid=None)\n\n takeProfit = Order()\n takeProfit.action = \"SELL\" if action == \"BUY\" else \"BUY\"\n takeProfit.orderType = \"LMT\"\n takeProfit.totalQuantity = quantity\n takeProfit.lmtPrice = takeProfitLimitPrice\n takeProfit.parentId = parentOrderId\n takeProfit.tif = 'GTC'\n takeProfit.transmit = False\n self.place_new_IB_order(contract3, takeProfit, orderid=None)\n\n stopLoss = Order()\n stopLoss.action = \"SELL\" if action == \"BUY\" else \"BUY\"\n stopLoss.orderType = \"STP\"\n # Stop trigger price\n stopLoss.auxPrice = stopLossPrice\n stopLoss.totalQuantity = quantity\n stopLoss.parentId = parentOrderId\n stopLoss.tif = 'GTC'\n # In this case, the low side order will be the last child being sent. Therefore, it needs to set this attribute to True\n # to activate all its predecessors\n stopLoss.transmit = True\n self.place_new_IB_order(contract3, stopLoss, orderid=None)\n time.sleep(1)\n return True\n\n def tradeReport(self):\n self.commissionReport('Feb12')\n","sub_path":"IBapiMod.py","file_name":"IBapiMod.py","file_ext":"py","file_size_in_byte":39185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"623423056","text":"import uuid\n\nimport models\nfrom cache import redis_db\nfrom db import db\nfrom exceptions import DeviceAlreadyExists, UnknownDevice\n\nfrom . import UserService\n\n\nclass DeviceService:\n\n @staticmethod\n def get_device_by_id(device_id):\n device = models.Device.query.filter_by(id=uuid.UUID(device_id)).first()\n if not device:\n raise UnknownDevice\n return device\n\n @staticmethod\n def create_device_auth_request(email) -> (str, str):\n device_id = str(uuid.uuid4())\n device_auth_id = str(uuid.uuid4())\n\n data = {\n 'email': email,\n 'device_id': device_id\n }\n\n redis_db.hset(name=device_auth_id, mapping=data)\n redis_db.expire(name=device_auth_id, time=60 * 60 * 3)\n\n return device_id, device_auth_id\n\n @staticmethod\n def is_device_registered(email, device_id):\n user = UserService.get_user_by_email(email)\n\n device_exists = db.session.query(models.User, models.Device).filter(models.Device.id == device_id).one_or_none()\n if device_exists:\n return True\n\n return False\n\n @staticmethod\n def authorize_device(device_auth_id):\n device_data = redis_db.hgetall(str(device_auth_id))\n\n if not device_data:\n raise UnknownDevice\n\n email = device_data['email']\n device_id = device_data['device_id']\n\n user = UserService.get_user_by_email(email)\n\n device_is_already_registered = db.session.query(models.User, models.Device).filter(models.Device.id == device_id).one_or_none()\n if device_is_already_registered:\n raise DeviceAlreadyExists\n\n device = models.Device(id=device_id, user_id=user.id)\n db.session.add(device)\n db.session.commit()\n\n @classmethod\n def delete_device(cls, device_id):\n device = cls.get_device_by_id(device_id)\n if device:\n db.session.delete(device)\n db.session.commit()\n else:\n raise UnknownDevice\n\n","sub_path":"images/auth-api/src/services/device_service.py","file_name":"device_service.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"142655629","text":"# 隐含概率分布-石霭青\n\n\n# 要求输入:option_data(50etf欧式期权信息,DataFrame或其他格式,要求至少包含行权价格、看涨/看跌、剩余存续期、期权收盘价)、\n# 无风险利率r(shibor 3M)\n# 标的资产价格S\n# option_data示例见同级文件夹中附图\n\n\n# encoding: utf8\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nfrom sklearn.linear_model import LinearRegression\nfrom scipy.optimize import leastsq\nfrom scipy import sparse\nimport math\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import norm\nfrom scipy import interpolate\nimport sys\n\nfrom scipy.misc import derivative\n\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\n\nimport scipy as sp\n\nfrom scipy.optimize import curve_fit\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib import pylab\n\noption_data, r, S = 0, 0, 0 # 获得输入,option_data-DataFrame,r-double, S-double\nq = 0 #红利\n\n\n# 提取有效信息\ndef getInfo(option_data, r):\n option_data = option_data[['us_name', 'option_code', 'exe_type', 'strike_price', 'call_put', 'expiredate']]\n option_data = option_data[option_data['expiredate'] > option_data['expiredate'].min()]\n #option_data['option_price'] # 当天收盘价, 要求数据输入\n option_data['expiredate'] = option_data['expiredate'] / 365\n option_data['q'] = 0\n option_data['SHIBOR(3M)'] = r\n option_data.columns = ['期权标的', '期权代码', '期权类型', '行权价格', '看涨/看跌', '剩余存续期', '期权收盘价', '红利q', 'SHIBOR(3M)']\n option_data.index = range(len(option_data))\n return option_data\n\n\n# 分离认购认沽期权\ndef splitCallPut(option_data):\n data_call = option_data[option_data['看涨/看跌'] == '认购']\n data_put = option_data[option_data['看涨/看跌'] == '认沽']\n return data_call,data_put\n\n\n# 计算标的资产价格(计算隐含波动率时用)\ndef assetPrice(c, p, K, r, t):\n S=c+K*math.exp(-r*t)-p\n return S\n\n\n# 计算data_call, data_put的标的资产价格\ndef assetPriceCallPut(data_call, data_put, r):\n data_call['标的资产价格'] = 0.00\n data_put['标的资产价格'] = 0.00\n for i in data_call.index:\n for j in data_put.index:\n if data_call['行权价格'][i] == data_put['行权价格'][j] and data_call['剩余存续期'][i] == data_put['剩余存续期'][j]:\n c = data_call['期权收盘价'][i]\n p = data_put['期权收盘价'][j]\n K = data_call['行权价格'][i]\n r = r / 100\n t = data_call['剩余存续期'][i]\n S = assetPrice(c, p, K, r, t)\n data_call['标的资产价格'][i] = S\n data_put['标的资产价格'][j] = S\n return data_call, data_put\n\n\n# 筛选虚值状态期权用以研究\ndef getOutofMoney(data_call, data_put):\n call_data = data_call[data_call['行权价格'] > data_call['标的资产价格']]\n put_data = data_put[data_put['行权价格'] < data_put['标的资产价格']]\n return call_data, put_data\n\n\n# 虚值状态看涨期权隐含波动率\ndef ImpVolCall(MktPrice, Strike, Expiry, Asset, IntRate, Dividend, Sigma, error):\n n = 1\n Volatility = Sigma # 初始值\n dv = error + 1\n while abs(dv) > error:\n d1 = np.log(Asset / Strike) + (IntRate - Dividend + 0.5 * Volatility ** 2) * Expiry\n d1 = d1 / (Volatility * np.sqrt(Expiry))\n d2 = d1 - Volatility * np.sqrt(Expiry)\n PriceError = Asset * math.exp(-Dividend * Expiry) * norm.cdf(d1) - Strike * math.exp(\n -IntRate * Expiry) * norm.cdf(d2) - MktPrice\n Vega1 = Asset * np.sqrt(Expiry / 3.1415926 / 2) * math.exp(-0.5 * d1 ** 2)\n dv = PriceError / Vega1\n Volatility = Volatility - dv\n n = n + 1\n if n > 300:\n ImpVolCall = 0.0\n break\n ImpVolCall = Volatility\n return ImpVolCall\n\n\n# 虚值状态看跌期权隐含波动率\ndef ImpVolPut(MktPrice, Strike, Expiry, Asset, IntRate, Dividend, Sigma, error):\n n = 1\n Volatility = Sigma # 初始值\n dv = error + 1\n while abs(dv) > error:\n d1 = np.log(Asset / Strike) + (IntRate - Dividend + 0.5 * Volatility ** 2) * Expiry\n d1 = d1 / (Volatility * np.sqrt(Expiry))\n d2 = d1 - Volatility * np.sqrt(Expiry)\n PriceError = -Asset * math.exp(-Dividend * Expiry) * norm.cdf(-d1) + Strike * math.exp(\n -IntRate * Expiry) * norm.cdf(-d2) - MktPrice\n Vega1 = Asset * np.sqrt(Expiry / 3.1415926 / 2) * math.exp(-0.5 * d1 ** 2)\n dv = PriceError / Vega1\n Volatility = Volatility - dv\n n = n + 1\n if n > 300:\n ImpVolPut = 0.0\n break\n ImpVolPut = Volatility\n return ImpVolPut\n\n\n# 计算call_data, put_data各自的隐含波动率\ndef impVolCal(call_data, put_data):\n # call\n call_data.index = range(len(call_data))\n Sigma, error = 1, 0.001\n for j in range(len(call_data)):\n MktPrice = call_data.loc[j, '期权收盘价']\n Strike = call_data.loc[j, '行权价格']\n Expiry = call_data.loc[j, '剩余存续期']\n Asset = call_data.loc[j, '标的资产价格']\n IntRate = call_data.loc[j, 'SHIBOR(3M)'] / 100\n Dividend = call_data.loc[j, '红利q']\n volatility = ImpVolCall(MktPrice, Strike, Expiry, Asset, IntRate, Dividend, Sigma, error)\n call_data.loc[j, '隐含波动率'] = volatility\n # put\n put_data.index = range(len(put_data))\n Sigma, error = 1, 0.001\n for j in range(len(put_data)):\n MktPrice = put_data.loc[j, '期权收盘价']\n Strike = put_data.loc[j, '行权价格']\n Expiry = put_data.loc[j, '剩余存续期']\n Asset = put_data.loc[j, '标的资产价格']\n IntRate = put_data.loc[j, 'SHIBOR(3M)'] / 100\n Dividend = put_data.loc[j, '红利q']\n volatility = ImpVolPut(MktPrice, Strike, Expiry, Asset, IntRate, Dividend, Sigma, error)\n put_data.loc[j, '隐含波动率'] = volatility\n return call_data, put_data\n\n\n# 取有效信息,拼接,按行权价格和剩余存续期(还有多久到期)排序\ndef getRes(call_data, put_data):\n res_df = pd.concat([put_data[['行权价格', '剩余存续期', '隐含波动率', '看涨/看跌']], call_data[['行权价格', '剩余存续期', '隐含波动率', '看涨/看跌']]])\n res_df = res_df.sort_values(['行权价格', '剩余存续期'])\n res_df.index = range(len(res_df))\n return res_df\n\n\n# 将以上res_df转化为方差矩阵,在行权价方向上进行样条插值,在剩余存续期方向上进行线性插值。\n# 然后将得到的方差矩阵再转化为波动率矩阵\ndef impVolInterp(res_df):\n # 转化为方差矩阵\n res = res_df\n res['隐含方差'] = res['隐含波动率'] ** 2 * res['剩余存续期']\n spline_data = res[(res['行权价格'] <= res['行权价格'].max()) & (res['行权价格'].min() <= res['行权价格'])]\n vol_mat = []\n for j in list(spline_data['行权价格'].unique()):\n vol_mat.append(list(spline_data[spline_data['行权价格'] == j]['隐含方差']))\n vol_mat = pd.DataFrame(vol_mat)\n vol_after_k = pd.DataFrame([])\n # 在行权价方向上进行样条插值\n for j in range(vol_mat.shape[1]):\n k = np.array(list(spline_data['行权价格'].unique()))\n kmesh = np.linspace(k.min(), k.max(), 300)\n volinter1 = interpolate.spline(k, np.array(vol_mat[j]), kmesh)\n vol_after_k['期限' + str(j)] = volinter1\n # tck = interpolate.splrep(k,np.array(vol_mat[j]))\n # volinter1 = interpolate.splev(kmesh, tck)\n # vol_after_k['期限'+str(j)] = volinter1\n vol_after_k.index = kmesh\n # 在剩余存续期方向上进行线性插值\n tt = np.array(list(res['剩余存续期'].unique()))\n tt.sort()\n tmesh = np.linspace(tt.min(), tt.max(), 300)\n res_kt = []\n for j in vol_after_k.index:\n volinter2 = np.interp(tmesh, tt, np.array(vol_after_k.loc[j, :]))\n res_kt.append(volinter2)\n vol_after_kt = pd.DataFrame(res_kt)\n vol_after_kt.index = vol_after_k.index\n vol_after_kt.columns = tmesh\n for j in vol_after_kt.index:\n vol_after_kt.loc[j, :] = np.sqrt(np.array(vol_after_kt.loc[j, :]) / tmesh)\n return vol_after_kt, tmesh, kmesh\n\n\n# 看涨看跌平价转换,然后计算看涨期权定价\ndef callPricing(vol_after_kt, r, splitPrice, tmesh, kmesh, S):\n call_price = vol_after_kt.copy()\n for j in tmesh:\n for i in kmesh:\n v = vol_after_kt.loc[i, j]\n d1 = np.log(S / i) + (r - q + 0.5 * v ** 2) * j\n d1 = d1 / (v * np.sqrt(j))\n d2 = d1 - v * np.sqrt(j)\n if i >= splitPrice:\n # 看涨\n call_price.loc[i, j] = S * math.exp(-q * j) * norm.cdf(d1) - i * math.exp(-r * j) * norm.cdf(d2)\n else:\n # 看跌\n # temp_put_price = -S*math.exp(-q*j)*norm.cdf(-d1)+i*math.exp(-r*j)*norm.cdf(d2)\n # call_price.loc[i,j] = -i*math.exp(-r*j)+temp_put_price+S\n temp_put_price = -S * math.exp(-q * j) * norm.cdf(-d1) + i * math.exp(-r * j) * norm.cdf(-d2)\n call_price.loc[i, j] = -i * math.exp(-r * j) + temp_put_price + S * math.exp(-q * j)\n return call_price\n\n\n# 隐含概率分布(这里选取的是最近到期的日子)\ndef impProbability(call_price, S):\n x = np.array(call_price.index)\n t = call_price.columns[0]\n y = np.array(list(call_price.iloc[:, 0]))\n h = x[1] - x[0]\n d2 = []\n # 差分\n for i in range(1, len(y) - 1):\n d2.append(math.exp(r * t) * ((y[i - 1] + y[i + 1] - 2 * y[i]) / (h ** 2)))\n xx = x[1:len(x) - 1]\n # 可以画个概率分布图\n # plot2 = plt.plot(xx, d2, 'b', label='diff')\n # plt.xlabel('X')\n # plt.ylabel('Y')\n # plt.legend(loc=3) # 设置图示的位置\n # plt.title('density of price(2017-10-17)') # 设置标题\n # plt.show()\n # 转化为收益率概率分布\n rp = []\n rx = []\n for i in range(len(xx)):\n RT = np.log(xx[i] / S)\n rx.append(RT)\n rp.append(d2[i] * S * math.exp(RT))\n # 概率分布图\n # plot2 = plt.plot(rx, rp, 'b', label='diff')\n # plt.xlabel('RT')\n # plt.ylabel('density')\n # plt.legend(loc=3) # 设置图示的位置\n # plt.title('density of RT(2017-10-17)') # 设置标题\n # plt.show() # 显示图片\n return rx, rp\n\n\n# 拟合函数\ndef fit_func1(k, x):\n # k, b = a\n # return k * x + b\n # c!=0\n a, b, c = k\n # return (((1+c*(x-a)/b)**(-1-1/c))/b)*np.exp((1+c*(x-a)/b)**(-1/c))\n # c==0\n return np.exp((a - x) / b) * np.exp(-np.exp((a - x) / b)) / b\n\n\ndef fit_func2(k, x):\n # k, b = a\n # return k * x + b\n # c!=0\n a, b, c = k\n # return (((1+c*(x-a))**(-1-1/c))/b)*np.exp(-np.exp((a-x)/b))\n # c==0\n return np.exp((a + x) / b) * np.exp(-np.exp((a + x) / b)) / b\n\n\n# 残差\ndef dist1(k, x, y):\n return fit_func1(k, x) - y\n\n\ndef dist2(k, x, y):\n return fit_func2(k, x) - y\n\n\n# GEV广义极值分布填充尾部\ndef gevTail(rx, rp):\n x_range = np.arange(len(rx))\n\n # 目前计算的是-6.6%~4.2%收益率之间的概率密度\n\n rx = np.array(rx)\n rp = np.array(rp)\n\n # 填充右尾巴\n x90 = int(np.percentile(x_range, 90))\n x91 = int(np.percentile(x_range, 91))\n x92 = int(np.percentile(x_range, 92))\n x93 = int(np.percentile(x_range, 93))\n x94 = int(np.percentile(x_range, 94))\n x95 = int(np.percentile(x_range, 95))\n x96 = int(np.percentile(x_range, 96))\n x97 = int(np.percentile(x_range, 97))\n x98 = int(np.percentile(x_range, 98))\n x99 = int(np.percentile(x_range, 99))\n x100 = int(np.percentile(x_range, 100))\n\n fit_x_right = np.array([x90, x91, x92, x93, x94, x95, x96, x97, x98, x99, x100])\n fit_r_right = []\n fit_p_right = []\n\n for i in fit_x_right:\n fit_r_right.append(rx[i])\n fit_p_right.append(rp[i])\n\n fit_r_right = np.array(fit_r_right)\n fit_p_right = np.array(fit_p_right)\n\n plt.figure(figsize=(15, 10))\n plt.title(u'GEV right tail')\n plt.xlabel(u'RT')\n plt.ylabel(u'density')\n plt.plot(fit_r_right, fit_p_right, 'k.')\n plt.plot(rx, rp, 'b', label='diff')\n\n par = [1, 1, 0]\n\n var = leastsq(dist1, par, args=(fit_r_right, fit_p_right))\n a, b, c = var[0]\n # print(a, b, c)\n\n right_predict_x = np.linspace(rx.min(), rx.max() * 4, 800)\n right_predict_y = np.exp((a - right_predict_x) / b) * np.exp(-np.exp((a - right_predict_x) / b)) / b\n\n # 填充左尾巴\n x0 = int(np.percentile(x_range, 0.1))\n x1 = int(np.percentile(x_range, 1))\n x2 = int(np.percentile(x_range, 2))\n x3 = int(np.percentile(x_range, 3))\n x4 = int(np.percentile(x_range, 4))\n x5 = int(np.percentile(x_range, 5))\n x6 = int(np.percentile(x_range, 6))\n x7 = int(np.percentile(x_range, 7))\n x8 = int(np.percentile(x_range, 8))\n x9 = int(np.percentile(x_range, 9))\n x10 = int(np.percentile(x_range, 10))\n\n fit_x_left = np.array([x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10])\n fit_r_left = []\n fit_p_left = []\n\n for i in fit_x_left:\n fit_r_left.append(rx[i])\n fit_p_left.append(rp[i])\n\n fit_r_left = np.array(fit_r_left)\n fit_p_left = np.array(fit_p_left)\n\n plt.figure(figsize=(15, 10))\n plt.title(u'GEV left tail')\n plt.xlabel(u'RT')\n plt.ylabel(u'density')\n plt.plot(fit_r_left, fit_p_left, 'k.')\n plt.plot(rx, rp, 'b', label='diff')\n\n par = [-0.015511914703435948, 0.02563136681478838, 1.0]\n\n var = leastsq(dist2, par, args=(fit_r_left, fit_p_left))\n a1, b1, c1 = var[0]\n print(a1, b1, c1)\n\n left_predict_x = np.linspace(rx.min() * 3, rx.max(), 1000)\n left_predict_y = np.exp((a1 + left_predict_x) / b1) * np.exp(-np.exp((a1 + left_predict_x) / b1)) / b1\n\n # 填充尾部后的图\n plt.figure(figsize=(20, 10))\n plt.title(u'GEV tail')\n plt.xlabel(u'RT')\n plt.ylabel(u'density')\n plt.plot(fit_r_left, fit_p_left, 'k.')\n plt.plot(fit_r_right, fit_p_right, 'k.')\n plt.plot(rx, rp, 'b', label='diff')\n\n plt.plot(right_predict_x, right_predict_y, 'c', linestyle=\":\")\n\n plt.plot(left_predict_x, left_predict_y, 'y', linestyle=\":\")\n\n plt.show()\n\n # 截取完整概率分布\n get_right_x = right_predict_x[right_predict_x >= rx.max()]\n get_left_x = left_predict_x[left_predict_x <= rx.min()]\n finish_x = np.hstack((get_left_x, rx))\n finish_x = np.hstack((finish_x, get_right_x))\n finish_y = np.hstack((np.exp((a1 + get_left_x) / b1) * np.exp(-np.exp((a1 + get_left_x) / b1)) / b1, rp))\n finish_y = np.hstack((finish_y, np.exp((a - get_right_x) / b) * np.exp(-np.exp((a - get_right_x) / b)) / b))\n\n # 完整概率分布图\n plt.figure(figsize=(20, 10))\n plt.title(u'GEV tail')\n plt.xlabel(u'RT')\n plt.ylabel(u'density')\n plt.plot(finish_x, finish_y, 'b', label='diff')\n plt.show()\n # 至此,期权隐含概率分布施工完成\n return finish_x, finish_y\n\n\n# 期权隐含概率分布所携带的信息\n# 期权隐含概率分布的隐含矩:二阶矩-隐含波动率、三阶矩-隐含偏度、四阶矩-隐含峰度\ndef impInfo(finish_y):\n mean_p = finish_y.mean()\n temp_y = pd.Series(finish_y)\n\n imp_vol = temp_y.std()\n imp_skew = temp_y.skew()\n imp_kurt = temp_y.kurt()\n\n return imp_vol, imp_skew, imp_kurt\n\n\n\ndef tail_risk(option_data, r, S):\n\n # 调用以上函数进行研究\n\n data_call, data_put = splitCallPut(option_data)\n\n data_call, data_put = assetPriceCallPut(data_call, data_put, r)\n\n call_data, put_data = getOutofMoney(data_call, data_put)\n\n call_data, put_data = impVolCal(call_data, put_data)\n\n res_df = getRes(call_data, put_data)\n\n vol_after_kt, tmesh, kmesh = impVolInterp(res_df)\n\n # 这里可以画一个50etf波动率曲面了,示例代码如下\n # pylab.style.use('ggplot')\n # maturityMesher, strikeMesher = np.meshgrid(tmesh, kmesh)\n # pylab.figure(figsize = (12,7))\n # ax = pylab.gca(projection = '3d')\n # surface1 = ax.plot_surface(strikeMesher, maturityMesher, vol_after_kt*100, cmap = cm.jet)\n # pylab.colorbar(surface1, shrink=0.75)\n # pylab.title(\"50ETF期权波动率曲面(2017-10-17)\")\n # pylab.xlabel('Strike')\n # pylab.ylabel('Maturity')\n # ax.set_zlabel('Volatility(%)')\n # pylab.show()\n\n call_price = callPricing(vol_after_kt, r ,call_data['行权价格'].min(), tmesh, kmesh, S)\n\n # 这里可以画一个欧式看涨期权定价曲面,示例代码如下\n # pylab.style.use('ggplot')\n # maturityMesher, strikeMesher = np.meshgrid(tmesh, kmesh)\n # pylab.figure(figsize = (12,7))\n # ax = pylab.gca(projection = '3d')\n # surface2 = ax.plot_surface(strikeMesher, maturityMesher, call_price, cmap = cm.jet)\n # pylab.colorbar(surface2, shrink=0.75)\n # pylab.title(\"欧式看涨期权定价函数(2017-10-17)\")\n # pylab.xlabel('Strike')\n # pylab.ylabel('Maturity')\n # ax.set_zlabel('Call Price')\n # pylab.show()\n\n rx, rp = impProbability(call_price, S)\n\n ror, probability = gevTail(rx, rp)\n\n imp_vol, imp_skew, imp_kurt = impInfo(probability)\n\n return tmesh, kmesh, vol_after_kt, call_price, ror, probability, imp_vol, imp_skew, imp_kurt\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"tail/imp_prob_distribution.py","file_name":"imp_prob_distribution.py","file_ext":"py","file_size_in_byte":17509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"340341519","text":"from datetime import datetime\n\nfrom api.services.view_models import MongoObject\n\n\ndef map_json_to_object(json):\n user = json['user'].lower()\n list = json['list'].lower()\n beverages = []\n for bev in json['beverages']:\n tempjson = {\"name\": bev}\n beverages.append(tempjson)\n mongo_object = MongoObject(user, list, beverages)\n if json['imageUrl'] is not None:\n mongo_object.imageUrl = json['imageUrl']\n if json['displayName'] is not None:\n mongo_object.displayName = json['displayName']\n mongo_object.dateinserted = datetime.utcnow().isoformat()\n mongo_object.dateupdated = datetime.utcnow().isoformat()\n return mongo_object","sub_path":"bevrand.playlistapi/api/services/custom_mapper.py","file_name":"custom_mapper.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"463521579","text":"import logging\nimport os\n\nimport hydra\nfrom omegaconf import OmegaConf\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.loggers import NeptuneLogger\nfrom torchvision.transforms import transforms\n\nfrom hiding_adversarial_attacks._neptune.utils import get_neptune_logger\nfrom hiding_adversarial_attacks.callbacks.neptune_callback import NeptuneLoggingCallback\nfrom hiding_adversarial_attacks.config.classifier_training_config import (\n ClassifierTrainingConfig,\n)\nfrom hiding_adversarial_attacks.config.config_validator import ConfigValidator\nfrom hiding_adversarial_attacks.data_modules.utils import (\n VisionDataModuleUnionType,\n get_data_module,\n)\nfrom hiding_adversarial_attacks.utils import get_model\n\nlogger = logging.getLogger(__file__)\n\n\ndef train(\n data_module: VisionDataModuleUnionType,\n neptune_logger: NeptuneLogger,\n config: ClassifierTrainingConfig,\n):\n train_loader = data_module.train_dataloader()\n validation_loader = data_module.val_dataloader()\n\n checkpoint_callback = hydra.utils.instantiate(config.classifier.model_checkpoint)\n\n neptune_callback = NeptuneLoggingCallback(\n log_path=config.log_path, trash_run=config.trash_run\n )\n trainer = Trainer(\n gpus=config.gpus,\n logger=neptune_logger,\n callbacks=[checkpoint_callback, neptune_callback],\n )\n\n model = get_model(config)\n\n trainer.fit(model, train_loader, validation_loader)\n\n\ndef test(data_module, neptune_logger: NeptuneLogger, config: ClassifierTrainingConfig):\n test_loader = data_module.test_dataloader()\n\n trainer = Trainer(gpus=config.gpus, logger=neptune_logger)\n\n if len(config.checkpoint) == 0 or not os.path.isfile(config.checkpoint):\n raise SystemExit(\n \"---- ERROR: Please specify a valid checkpoint path. Exiting. -----\"\n )\n model = get_model(config).load_from_checkpoint(config.checkpoint)\n\n trainer.test(model, test_loader, ckpt_path=\"best\")\n\n\n@hydra.main(config_name=\"classifier_training_config\")\ndef run(config: ClassifierTrainingConfig) -> None:\n config_validator = ConfigValidator()\n config_validator.validate(config)\n\n logger.info(\"Starting train_classifier.py\")\n logger.info(f\"cwd: {os.getcwd()}\")\n logger.info(\"**** Parameters: ******\")\n logger.info(OmegaConf.to_yaml(config))\n\n data_module = get_data_module(\n data_set=config.data_set.name,\n data_path=config.data_set.external_path,\n download=config.download,\n batch_size=config.batch_size,\n val_split=config.val_split,\n transform=transforms.ToTensor(),\n random_seed=config.random_seed,\n )\n\n experiment_name = config.data_set.name\n config.tags.append(config.data_set.name)\n config.tags.append(\"test\" if config.test else \"train\")\n if config.trash_run:\n config.tags.append(\"trash\")\n neptune_logger = get_neptune_logger(config, experiment_name, list(config.tags))\n\n config.log_path = os.path.join(\n config.log_path, neptune_logger.name, neptune_logger.version\n )\n\n if config.test:\n test(data_module, neptune_logger, config)\n else:\n train(data_module, neptune_logger, config)\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"src/hiding_adversarial_attacks/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"102132517","text":"# -*- coding: utf8 -*-\n\"\"\"\n.. module:: burpui.utils\n :platform: Unix\n :synopsis: Burp-UI utils module.\n\n.. moduleauthor:: Ziirish \n\n\"\"\"\nimport math\nimport string\nimport sys\nimport zipfile\nimport tarfile\nimport logging\n\nfrom inspect import currentframe, getouterframes\n\nif sys.version_info >= (3, 0):\n long = int # pragma: no cover\n\n\nclass human_readable(long):\n \"\"\"define a human_readable class to allow custom formatting\n format specifiers supported :\n em : formats the size as bits in IEC format i.e. 1024 bits (128 bytes) = 1Kib\n eM : formats the size as Bytes in IEC format i.e. 1024 bytes = 1KiB\n sm : formats the size as bits in SI format i.e. 1000 bits = 1kb\n sM : formats the size as bytes in SI format i.e. 1000 bytes = 1KB\n cm : format the size as bit in the common format i.e. 1024 bits (128 bytes) = 1Kb\n cM : format the size as bytes in the common format i.e. 1024 bytes = 1KB\n\n code from: http://code.activestate.com/recipes/578323-human-readable-filememory-sizes-v2/\n \"\"\"\n def __format__(self, fmt): # pragma: no cover\n # is it an empty format or not a special format for the size class\n if fmt == \"\" or fmt[-2:].lower() not in [\"em\", \"sm\", \"cm\"]:\n if fmt[-1].lower() in ['b', 'c', 'd', 'o', 'x', 'n', 'e', 'f', 'g', '%']:\n # Numeric format.\n return long(self).__format__(fmt)\n else:\n return str(self).__format__(fmt)\n\n if sys.version_info >= (3, 0):\n chars = string.ascii_lowercase\n else:\n chars = string.lowercase\n # work out the scale, suffix and base\n factor, suffix = (8, \"b\") if fmt[-1] in chars else (1, \"B\")\n base = 1024 if fmt[-2] in [\"e\", \"c\"] else 1000\n\n # Add the i for the IEC format\n suffix = \"i\" + suffix if fmt[-2] == \"e\" else suffix\n\n mult = [\"\", \"K\", \"M\", \"G\", \"T\", \"P\"]\n\n val = float(self) * factor\n i = 0 if val < 1 else int(math.log(val, base)) + 1\n v = val / math.pow(base, i)\n v, i = (v, i) if v > 0.5 else (v * base, i - 1)\n\n # Identify if there is a width and extract it\n width = \"\" if fmt.find(\".\") == -1 else fmt[:fmt.index(\".\")]\n precis = fmt[:-2] if width == \"\" else fmt[fmt.index(\".\"):-2]\n\n # do the precision bit first, so width/alignment works with the suffix\n if float(self) == 0:\n return \"{0:{1}f}\".format(v, precis)\n t = (\"{0:{1}f}\" + mult[i] + suffix).format(v, precis)\n\n return \"{0:{1}}\".format(t, width) if width != \"\" else t\n\n\nif sys.version_info >= (3, 0): # pragma: no cover\n class BUIlogger(logging.Logger):\n padding = 0\n \"\"\"Logger class for more convenience\"\"\"\n def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None):\n \"\"\"\n Try to guess where was call the function\n \"\"\"\n cf = currentframe()\n caller = getouterframes(cf)\n cpt = 0\n size = len(caller)\n me = __file__\n if me.endswith('.pyc'):\n me = me[:-1]\n # It's easy to get the _logger parent function because it's the\n # following frame\n while cpt < size - 1:\n (_, filename, _, function_name, _, _) = caller[cpt]\n if function_name == '_logger' and filename == me:\n cpt += 1\n break\n cpt += 1\n cpt += self.padding\n (frame, filename, line_number, function_name, lines, index) = caller[cpt]\n return super(BUIlogger, self).makeRecord(name, level, filename, line_number, msg, args, exc_info, func=function_name, extra=extra, sinfo=sinfo)\nelse:\n class BUIlogger(logging.Logger):\n padding = 0\n \"\"\"Logger class for more convenience\"\"\"\n def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):\n \"\"\"Try to guess where was call the function\"\"\"\n cf = currentframe()\n caller = getouterframes(cf)\n cpt = 0\n size = len(caller)\n me = __file__\n if me.endswith('.pyc'):\n me = me[:-1]\n # It's easy to get the _logger parent function because it's the\n # following frame\n while cpt < size - 1:\n (_, filename, _, function_name, _, _) = caller[cpt]\n if function_name == '_logger' and filename == me:\n cpt += 1\n break\n cpt += 1\n cpt += self.padding\n (frame, filename, line_number, function_name, lines, index) = caller[cpt]\n return super(BUIlogger, self).makeRecord(name, level, filename, line_number, msg, args, exc_info, func=function_name, extra=extra)\n\n\nclass BUIlogging(object):\n logger = None\n monkey = None\n padding = 0\n \"\"\"Provides a generic logging method for all modules\"\"\"\n def _logger(self, level, msg, *args):\n \"\"\"generic logging method so that the logging is backend-independent\"\"\"\n if self.logger and self.logger.getEffectiveLevel() <= logging.getLevelName(level.upper()):\n sav = None\n if not self.monkey:\n self.monkey = BUIlogger(__name__)\n # bui-agent overrides the _logger function so we add a padding offset\n self.monkey.padding = self.padding\n # dynamically monkey-patch the makeRecord function\n sav = self.logger.makeRecord\n self.logger.makeRecord = self.monkey.makeRecord\n self.logger.log(logging.getLevelName(level.upper()), msg, *args)\n self.logger.makeRecord = sav\n\n\nclass BUIcompress():\n \"\"\"Provides a context to generate any kind of archive supported by burp-ui\"\"\"\n def __init__(self, name, archive, zip64=False): # pragma: no cover\n self.name = name\n self.archive = archive\n self.zip64 = zip64\n\n def __enter__(self):\n self.arch = None\n if self.archive == 'zip':\n self.arch = zipfile.ZipFile(self.name, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=self.zip64)\n elif self.archive == 'tar.gz':\n self.arch = tarfile.open(self.name, 'w:gz')\n elif self.archive == 'tar.bz2':\n self.arch = tarfile.open(self.name, 'w:bz2')\n return self\n\n def __exit__(self, type, value, traceback):\n self.arch.close()\n\n def append(self, path, arcname):\n if self.archive == 'zip':\n self.arch.write(path, arcname)\n elif self.archive in ['tar.gz', 'tar.bz2']:\n self.arch.add(path, arcname=arcname, recursive=False)\n\n\ndef basic_login_from_request(request, app):\n if app.auth != 'none':\n creds = request.headers.get('Authorization')\n if creds:\n creds = creds.replace('Basic ', '', 1)\n try:\n import base64\n login, password = base64.b64decode(creds.encode('utf-8')).decode('utf-8').split(':')\n except: # pragma: no cover\n pass\n if login:\n user = app.uhandler.user(login)\n if user.active and user.login(login, password):\n from flask.ext.login import login_user\n login_user(user)\n return user\n\n return None\n","sub_path":"burpui/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"539783241","text":"\n\nfrom xai.brain.wordbase.nouns._liquidator import _LIQUIDATOR\n\n#calss header\nclass _LIQUIDATORS(_LIQUIDATOR, ):\n\tdef __init__(self,): \n\t\t_LIQUIDATOR.__init__(self)\n\t\tself.name = \"LIQUIDATORS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"liquidator\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_liquidators.py","file_name":"_liquidators.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"204539086","text":"#!/usr/bin/python3 \n#coding:utf-8\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport re\nimport random\nimport webbrowser\nimport requests\nimport os\nfrom urllib.request import urlretrieve\nimport multiprocessing as mp\nimport time\nfrom urllib.request import urlopen, urljoin\nimport asyncio\nimport aiohttp\n\ndef test1():\n # if has Chinese, apply decode()\n html = urlopen(\n \"https://morvanzhou.github.io/static/scraping/basic-structure.html\"\n ).read().decode('utf-8')\n print(html)\n\n res = re.findall(r\"(.+?)\", html)\n print(\"\\nPage title is: \", res[0])\n\n res = re.findall(r\"

(.*?)

\", html, flags=re.DOTALL) # re.DOTALL if multi line\n print(\"\\nPage paragraph is: \", res[0])\n\n res = re.findall(r'href=\"(.*?)\"', html)\n print(\"\\nAll links: \", res)\n\ndef test2():\n # if has Chinese, apply decode()\n html = urlopen(\"https://morvanzhou.github.io/static/scraping/basic-structure.html\").read().decode('utf-8')\n # print(html)\n soup = BeautifulSoup(html, features='lxml')\n print(soup.h1)\n all_href = soup.find_all('a')\n all_href = [l['href'] for l in all_href]\n print('\\n', all_href)\n\ndef test3():\n # if has Chinese, apply decode()\n html = urlopen(\"https://morvanzhou.github.io/static/scraping/list.html\").read().decode('utf-8')\n # print(html)\n soup = BeautifulSoup(html, features='lxml')\n\n # use class to narrow search\n month = soup.find_all('li', {\"class\": \"month\"})\n for m in month:\n print(m.get_text())\n\n jan = soup.find('ul', {\"class\": 'jan'})\n print(jan)\n d_jan = jan.find_all('li') # use jan as a parent\n for d in d_jan:\n print(d.get_text())\n\ndef test4():\n # if has Chinese, apply decode()\n html = urlopen(\"https://morvanzhou.github.io/static/scraping/table.html\").read().decode('utf-8')\n soup = BeautifulSoup(html, features='lxml')\n\n img_links = soup.find_all(\"img\", {\"src\": re.compile(r'.*?\\.jpg')})\n for link in img_links:\n print(link['src'])\n course_links = soup.find_all('a', {'href': re.compile('https://morvan.*')})\n for link in course_links:\n print(link['href'])\n\ndef test5():\n base_url = \"https://baike.baidu.com\"\n his = [\"/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB/5162711\"]\n for i in range(20):\n url = base_url + his[-1]\n\n html = urlopen(url).read().decode('utf-8')\n soup = BeautifulSoup(html, features='lxml')\n print(i, soup.find('h1').get_text(), ' url: ', his[-1])\n\n # find valid urls\n sub_urls = soup.find_all(\"a\", {\"target\": \"_blank\", \"href\": re.compile(\"/item/(%.{2})+$\")})\n\n if len(sub_urls) != 0: \n his.append(random.sample(sub_urls, 1)[0]['href'])\n else:\n # no valid sub link found\n his.pop()\n\ndef test6():\n param = {\"wd\": \"莫烦Python\"} # 搜索的信息\n r = requests.get('http://www.baidu.com/s', params=param)\n webbrowser.open(r.url)\n\n payload = {'username': 'Morvan', 'password': 'password'}\n r = requests.post('http://pythonscraping.com/pages/cookies/welcome.php', data=payload)\n print(r.cookies.get_dict())\n r = requests.get('http://pythonscraping.com/pages/cookies/profile.php', cookies=r.cookies)\n print(r.text)\n\n session = requests.Session()\n payload = {'username': 'Morvan', 'password': 'password'}\n r = session.post('http://pythonscraping.com/pages/cookies/welcome.php', data=payload)\n print(r.cookies.get_dict())\n r = session.get(\"http://pythonscraping.com/pages/cookies/profile.php\")\n print(r.text)\n\ndef test7():\n # os.makedirs('./img/', exist_ok=True)\n IMAGE_URL = \"https://morvanzhou.github.io/static/img/description/learning_step_flowchart.png\"\n # urlretrieve(IMAGE_URL, './img/image1.png')\n\n # r = requests.get(IMAGE_URL)\n # with open('./img/image2.png', 'wb') as f:\n # f.write(r.content)\n\n r = requests.get(IMAGE_URL, stream=True) # stream loading \n with open('./img/image3.png', 'wb') as f:\n for chunk in r.iter_content(chunk_size=32): #分块下载,下一点,存一点\n f.write(chunk)\n\ndef download_instence():\n URL = \"http://www.nationalgeographic.com.cn/animals/\"\n html = requests.get(URL).text\n soup = BeautifulSoup(html, 'lxml')\n img_ul = soup.find_all('ul', {\"class\": \"img_list\"})\n print(img_ul)\n for ul in img_ul:\n imgs = ul.find_all('img')\n for img in imgs:\n url = img['src']\n r = requests.get(url, stream=True)\n image_name = url.split('/')[-1]\n with open('./img/%s' % image_name, 'wb') as f:\n for chunk in r.iter_content(chunk_size=128):\n f.write(chunk)\n print('Saved %s' % image_name)\n\nbase_url = 'https://morvanzhou.github.io/'\n\ndef crawl(url):\n response = urlopen(url)\n return response.read().decode('utf-8')\n\ndef parse(html):\n soup = BeautifulSoup(html, 'lxml')\n urls = soup.find_all('a', {\"href\": re.compile('^/.+?/$')})\n title = soup.find('h1').get_text().strip()\n page_urls = set([urljoin(base_url, url['href']) for url in urls]) # 去重\n url = soup.find('meta', {'property': \"og:url\"})['content']\n return title, page_urls, url\n\ndef normal_crawl():\n unseen = set([base_url,])\n seen = set()\n count, t1 = 1, time.time()\n while len(unseen) != 0: # still get some url to visit\n if restricted_crawl and len(seen) > 20:\n break \n print('\\nDistributed Crawling...')\n htmls = [crawl(url) for url in unseen]\n\n print('\\nDistributed Parsing...')\n results = [parse(html) for html in htmls]\n\n print('\\nAnalysing...')\n seen.update(unseen) # seen the crawled\n unseen.clear() # nothing unseen\n\n for title, page_urls, url in results:\n print(count, title, url)\n count += 1\n unseen.update(page_urls - seen) # get new url to crawl\n print('Total time: %.1f s' % (time.time()-t1, )) # 53 s\n\ndef distributr_crawl():\n unseen = set([base_url,])\n seen = set()\n pool = mp.Pool(4) \n count, t1 = 1, time.time()\n while len(unseen) != 0: # still get some url to visit\n if restricted_crawl and len(seen) > 20:\n break\n print('\\nDistributed Crawling...')\n crawl_jobs = [pool.apply_async(crawl, args=(url,)) for url in unseen]\n htmls = [j.get() for j in crawl_jobs] # request connection\n\n print('\\nDistributed Parsing...')\n parse_jobs = [pool.apply_async(parse, args=(html,)) for html in htmls]\n results = [j.get() for j in parse_jobs] # parse html\n\n print('\\nAnalysing...')\n seen.update(unseen) # seen the crawled\n unseen.clear() # nothing unseen\n\n for title, page_urls, url in results:\n print(count, title, url)\n count += 1\n unseen.update(page_urls - seen) # get new url to crawl\n print('Total time: %.1f s' % (time.time()-t1, )) # 16 s !!!\n\nURL = 'https://morvanzhou.github.io/'\n\nasync def job(t): # async 形式的功能\n print('Start job ', t)\n await asyncio.sleep(t) # 等待 \"t\" 秒, 期间切换其他任务\n print('Job ', t, ' takes ', t, ' s')\n\nasync def main1(loop): # async 形式的功能\n tasks = [\n loop.create_task(job(t)) for t in range(1, 3)\n ] # 创建任务, 但是不执行\n await asyncio.wait(tasks) # 执行并等待所有任务完成\ndef async_test1():\n t1 = time.time()\n loop = asyncio.get_event_loop() # 建立 loop\n loop.run_until_complete(main1(loop)) # 执行 loop\n loop.close() # 关闭 loop\n print(\"Async total time : \", time.time() - t1)\n\nasync def job2(session):\n response = await session.get(URL) # 等待并切换\n return str(response.url)\n\nasync def main2(loop):\n async with aiohttp.ClientSession() as session: # 官网推荐建立 Session 的形式\n tasks = [loop.create_task(job2(session)) for _ in range(2)]\n finished, unfinished = await asyncio.wait(tasks)\n all_results = [r.result() for r in finished] # 获取所有结果\n print(all_results)\ndef async_test2():\n t1 = time.time()\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main2(loop))\n loop.close()\n print(\"Async total time:\", time.time() - t1)\n\nif __name__ == '__main__':\n async_test2()\n\n\n\n\n\n","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":8676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"453272742","text":"###############################################################################\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n###############################################################################\nimport os\nimport sys\nimport onnx\nimport unittest\nimport keras2onnx\nimport numpy as np\nfrom keras2onnx.proto import keras, is_tf_keras\nfrom distutils.version import StrictVersion\n\n\nworking_path = os.path.abspath(os.path.dirname(__file__))\ntmp_path = os.path.join(working_path, 'temp')\n\n\nclass TestKerasApplications(unittest.TestCase):\n\n def setUp(self):\n self.model_files = []\n\n def tearDown(self):\n for fl in self.model_files:\n os.remove(fl)\n\n @staticmethod\n def asarray(*a):\n return np.array([a], dtype='f')\n\n @staticmethod\n def get_temp_file(name):\n if not os.path.exists(tmp_path):\n os.mkdir(tmp_path)\n return os.path.join(tmp_path, name)\n\n def run_onnx_runtime(self, case_name, onnx_model, data, expected, rtol=1.e-3, atol=1.e-6):\n temp_model_file = TestKerasApplications.get_temp_file('temp_' + case_name + '.onnx')\n onnx.save_model(onnx_model, temp_model_file)\n try:\n import onnxruntime\n sess = onnxruntime.InferenceSession(temp_model_file)\n except ImportError:\n return True\n\n if not isinstance(expected, list):\n expected = [expected]\n\n data = data if isinstance(data, list) else [data]\n input_names = sess.get_inputs()\n # to avoid too complicated test code, we restrict the input name in Keras test cases must be\n # in alphabetical order. It's always true unless there is any trick preventing that.\n feed = zip(sorted(i_.name for i_ in input_names), data)\n actual = sess.run(None, dict(feed))\n res = all(np.allclose(expected[n_], actual[n_], rtol=rtol, atol=atol) for n_ in range(len(expected)))\n if res and temp_model_file not in self.model_files: # still keep the failed case files for the diagnosis.\n self.model_files.append(temp_model_file)\n\n if not res:\n for n_ in range(len(expected)):\n expected_list = expected[n_].flatten()\n actual_list = actual[n_].flatten()\n diff_list = abs(expected_list - actual_list)\n count_total = len(expected_list)\n count_error = 0\n\n for e_, a_, d_ in zip(expected_list, actual_list, diff_list):\n if d_ > atol + rtol * abs(a_):\n if count_error < 10: # print the first 10 mismatches\n print(\n \"case = \" + case_name + \", result mismatch for expected = \" + str(e_) +\n \", actual = \" + str(a_), file=sys.stderr)\n count_error = count_error + 1\n\n print(\"case = \" + case_name + \", \" +\n str(count_error) + \" mismatches out of \" + str(count_total) + \" for list \" + str(n_),\n file=sys.stderr)\n\n return res\n\n def _test_keras_model(self, model, model_name='onnx_conversion', rtol=1.e-3, atol=1.e-5, img_size=224):\n preprocess_input = keras.applications.resnet50.preprocess_input\n image = keras.preprocessing.image\n\n img_path = os.path.join(os.path.dirname(__file__), 'data', 'street.jpg')\n try:\n img = image.load_img(img_path, target_size=(img_size, img_size))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n preds = model.predict(x)\n onnx_model = keras2onnx.convert_keras(model, model.name)\n self.assertTrue(self.run_onnx_runtime(model_name, onnx_model, x, preds, rtol=rtol, atol=atol))\n except FileNotFoundError:\n self.assertTrue(False, 'The image data does not exist.')\n\n def test_MobileNet(self):\n mobilenet = keras.applications.mobilenet\n model = mobilenet.MobileNet(weights='imagenet')\n self._test_keras_model(model)\n\n @unittest.skipIf(StrictVersion(keras.__version__.split('-')[0]) < StrictVersion(\"2.2.3\"),\n \"There is no mobilenet_v2 module before keras 2.2.3.\")\n def test_MobileNetV2(self):\n mobilenet_v2 = keras.applications.mobilenet_v2\n model = mobilenet_v2.MobileNetV2(weights='imagenet')\n self._test_keras_model(model)\n\n def test_ResNet50(self):\n from keras.applications.resnet50 import ResNet50\n model = ResNet50(include_top=True, weights='imagenet')\n self._test_keras_model(model)\n\n def test_InceptionV3(self):\n from keras.applications.inception_v3 import InceptionV3\n model = InceptionV3(include_top=True, weights='imagenet')\n self._test_keras_model(model, img_size=299)\n\n def test_DenseNet121(self):\n from keras.applications.densenet import DenseNet121\n model = DenseNet121(include_top=True, weights='imagenet')\n self._test_keras_model(model)\n\n def test_Xception(self):\n from keras.applications.xception import Xception\n model = Xception(include_top=True, weights='imagenet')\n self._test_keras_model(model, atol=5e-3, img_size=299)\n\n def test_ACGAN(self):\n # An ACGAN generator from https://github.com/eriklindernoren/Keras-GAN/blob/master/acgan/acgan.py\n latent_dim = 100\n num_classes = 10\n channels = 1\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(128 * 7 * 7, activation=\"relu\", input_dim=latent_dim))\n model.add(keras.layers.Reshape((7, 7, 128)))\n model.add(keras.layers.BatchNormalization(momentum=0.8))\n model.add(keras.layers.UpSampling2D())\n model.add(keras.layers.Conv2D(128, kernel_size=3, padding=\"same\"))\n model.add(keras.layers.Activation(\"relu\"))\n model.add(keras.layers.BatchNormalization(momentum=0.8))\n model.add(keras.layers.UpSampling2D())\n model.add(keras.layers.Conv2D(64, kernel_size=3, padding=\"same\"))\n model.add(keras.layers.Activation(\"relu\"))\n model.add(keras.layers.BatchNormalization(momentum=0.8))\n model.add(keras.layers.Conv2D(channels, kernel_size=3, padding='same'))\n model.add(keras.layers.Activation(\"tanh\"))\n\n noise = keras.layers.Input(shape=(latent_dim,))\n label = keras.layers.Input(shape=(1,), dtype='int32')\n label_embedding = keras.layers.Flatten()(keras.layers.Embedding(num_classes, 100)(label))\n\n model_input = keras.layers.multiply([noise, label_embedding])\n img = model(model_input)\n\n keras_model = keras.models.Model([noise, label], img)\n x = np.random.rand(1, 100).astype(np.float32)\n y = np.random.rand(1, 1).astype(np.int32)\n\n expected = keras_model.predict([x, y])\n onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)\n\n self.assertTrue(self.run_onnx_runtime(onnx_model.graph.name, onnx_model, [x, y], expected))\n\n def test_BIGAN(self):\n # A BIGAN discriminator model from https://github.com/eriklindernoren/Keras-GAN/blob/master/bigan/bigan.py\n latent_dim = 100\n img_shape = (28, 28, 1)\n z = keras.layers.Input(shape=(latent_dim, ))\n img = keras.layers.Input(shape=img_shape)\n d_in = keras.layers.concatenate([z, keras.layers.Flatten()(img)])\n\n model = keras.layers.Dense(1024)(d_in)\n model = keras.layers.LeakyReLU(alpha=0.2)(model)\n model = keras.layers.Dropout(0.5)(model)\n model = keras.layers.Dense(1024)(model)\n model = keras.layers.LeakyReLU(alpha=0.2)(model)\n model = keras.layers.Dropout(0.5)(model)\n model = keras.layers.Dense(1024)(model)\n model = keras.layers.LeakyReLU(alpha=0.2)(model)\n model = keras.layers.Dropout(0.5)(model)\n validity = keras.layers.Dense(1, activation=\"sigmoid\")(model)\n\n keras_model = keras.models.Model([z, img], validity)\n x = np.random.rand(5, 100).astype(np.float32)\n y = np.random.rand(5, 28, 28, 1).astype(np.float32)\n onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)\n\n expected = keras_model.predict([x, y])\n self.assertTrue(self.run_onnx_runtime(onnx_model.graph.name, onnx_model, [x, y], expected))\n\n def test_addition_rnn(self):\n # An implementation of sequence to sequence learning for performing addition\n # from https://github.com/keras-team/keras/blob/master/examples/addition_rnn.py\n DIGITS = 3\n MAXLEN = DIGITS + 1 + DIGITS\n HIDDEN_SIZE = 128\n BATCH_SIZE = 128\n CHARS_LENGTH = 12\n\n for RNN in [keras.layers.LSTM, keras.layers.GRU, keras.layers.SimpleRNN]:\n model = keras.models.Sequential()\n model.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, CHARS_LENGTH)))\n model.add(keras.layers.RepeatVector(DIGITS + 1))\n model.add(RNN(HIDDEN_SIZE, return_sequences=True))\n model.add(keras.layers.TimeDistributed(keras.layers.Dense(CHARS_LENGTH, activation='softmax')))\n onnx_model = keras2onnx.convert_keras(model, model.name)\n x = np.random.rand(BATCH_SIZE, MAXLEN, CHARS_LENGTH).astype(np.float32)\n expected = model.predict(x)\n self.assertTrue(self.run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected))\n\n if __name__ == \"__main__\":\n unittest.main()\n","sub_path":"applications/test_keras_applications.py","file_name":"test_keras_applications.py","file_ext":"py","file_size_in_byte":9587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"77780240","text":"# Copyright 2021, Gregory Happ, All rights reserved.\n# ridge regression from scratch\n# recommended methods for most users are .fit, .predict, .score, and .summary\n# it is highly recommended to scale your independent variables\n\nclass ridge():\n \n def __init__(self, LAMBDA=1):\n self.LAMBDA = LAMBDA\n return None\n \n # multiply two matrices (used in .fit)\n def __matmul(self, A, B):\n nrows, ncols = len(A), len(B[0])\n product = [[0 for col in range(ncols)] for row in range(nrows)]\n for nrow in range(nrows):\n for ncol in range(ncols):\n new_element = 0\n for a, b in zip(A[nrow], [brow[ncol] for brow in B]):\n new_element += a * b\n product[nrow][ncol] = new_element\n return product\n \n # transpose (used in .fit)\n def __transpose(self, A):\n return [[A[col][row] for col in range(len(A))] for row in range(len(A[0]))]\n\n # multiply every element in a list by a value (used in .__inverse)\n def __lmultiply(self, l, factor):\n return [factor * element for element in l]\n\n # subtract two lists (used in .__inverse)\n def __subtract(self, l1, l2):\n return [el1 - el2 for el1, el2 in zip(l1, l2)]\n \n # inverse (used in .fit)\n def __inverse(self, A):\n from copy import copy\n M = copy(A)\n Identity = [[1.0 if col == row else 0.0 for col in range(len(A[0]))] for row in range(len(A))] # create an identity matrix\n I = copy(Identity)\n while M != Identity: # we keep trying to solve it until the matrix is the identity matrix\n for i in range(len(M)): # loop through each row\n if M[i][i] != 1: # if the row doesn't have a 1 where it is supposed to, we need to do some operations:\n counter = i\n while M[i][i] != 1:\n if counter >= len(M):\n counter = 0\n else:\n if M[counter][i] != 0:\n k = (M[i][i] - 1) / M[counter][i]\n rowmul, Irowmul = self.__lmultiply(M[counter], k), self.__lmultiply(I[counter], k)\n M[i], I[i] = self.__subtract(M[i], rowmul), self.__subtract(I[i], Irowmul)\n else:\n counter += 1\n for j in range(len(M)): # once we have a 1 in the correct index for that row, we make all other rows have 0's for that column\n if i != j and M[j][i] != 0:\n k = M[j][i]\n rowmul, Irowmul = self.__lmultiply(M[i], k), self.__lmultiply(I[i], k)\n M[j], I[j] = self.__subtract(M[j], rowmul), self.__subtract(I[j], Irowmul) \n return I\n \n # add two matrices (used in .fit)\n def __matadd(self, A, B):\n nrows, ncols = len(A), len(B[0])\n sum = [[0 for col in range(ncols)] for row in range(nrows)]\n for nrow in range(nrows):\n for ncol in range(ncols):\n sum[nrow][ncol] = A[nrow][ncol] + B[nrow][ncol]\n return sum\n \n # multiply a matrix by a value (used in .coef_error)\n def __multiply(self, A, k):\n nrows, ncols = len(A), len(A[0])\n product = [[0 for col in range(ncols)] for row in range(nrows)]\n for row in range(nrows):\n for col in range(ncols):\n product[row][col] = A[row][col] * k\n return product\n \n # train model\n def fit(self, X, y):\n Xs = [[1] + row for row in X]\n t = self.__transpose(Xs)\n XT_X = self.__matmul(t, Xs)\n lambda_I = [[self.LAMBDA if col == row else 0.0 for col in range(len(XT_X[0]))] for row in range(len(XT_X))] # create an identity matrix\n lambda_I[0][0] = 0 # we don't penalize the intercept\n self.coef_ = [List[0] for List in self.__matmul(self.__matmul(self.__inverse(self.__matadd(XT_X, lambda_I)), t), [[Y] for Y in y])]\n self.intercept_ = self.coef_[0]\n self.coef_ = self.coef_[1:]\n \n # predict\n def predict(self, X):\n result = []\n for List in X:\n prediction = self.intercept_\n for i in range(len(self.coef_)):\n prediction += self.coef_[i] * List[i]\n result.append(prediction)\n return result\n \n # get RSS (Residual Sum of Squares) (used in .RSE)\n def __getRSS(self, X, y):\n ymean = sum(y) / len(y)\n predictions = self.predict(X)\n RSS = 0.0\n for y_i, ypred_i in zip(y, predictions):\n RSS += (y_i - ypred_i)**2\n return RSS\n \n # get RSS (Residual Sum of Squares) and TSS (Total Sum of Squares) (used in .score, .F, and .F_Statistic)\n def __getRSSandTSS(self, X, y):\n ymean = sum(y) / len(y)\n predictions = self.predict(X)\n RSS = 0.0\n TSS = 0.0\n for y_i, ypred_i in zip(y, predictions):\n RSS += (y_i - ypred_i)**2\n TSS += (y_i - ymean)**2\n return RSS, TSS\n \n # R^2\n def score(self, X, y):\n RSS, TSS = self.__getRSSandTSS(X, y)\n return 1.0 - (RSS / TSS)\n \n # adjusted R^2\n def adjusted_r2(self, X, y):\n n = len(y)\n return 1.0 - (((1.0 - self.score(X, y))*(n - 1.0))/(n - len(self.coef_) - 1.0))\n \n # F-Value\n def F(self, X, y):\n RSS, TSS = self.__getRSSandTSS(X, y)\n return ((RSS - TSS) / -len(self.coef_)) / ((TSS) / len(y))\n\n # Residual Standard Error\n def RSE(self, X, y):\n return self.__getRSS(X, y) / (len(y) - len(self.coef_))\n\n # get Model Sum of Squares (used in .F_Statistic)\n def __getSSM(self, X, y):\n ymean = sum(y) / len(y)\n predictions = self.predict(X)\n SSM = 0.0\n for ypred_i in predictions:\n SSM += (ypred_i - ymean)**2\n return SSM\n\n # get critical value for F-Test\n def F_Statistic(self, X, y):\n RSS, TSS = self.__getRSSandTSS(X, y)\n SSM = self.__getSSM(X, y)\n k = len(self.coef_)\n DFM, DFE = len(y) - k, k - 1\n return (SSM/DFM)/(RSS/DFE)\n \n # get Variance (used in .__getStdev)\n def __getVariance(self, arr):\n mean = sum(arr)/len(arr)\n variances = []\n for i in arr:\n variances.append((i-mean)**2)\n return sum(variances)/len(variances)\n \n # get standard deviation (used in .F_Test)\n def __getStdev(self, arr):\n return self.__getVariance(arr)**0.5\n \n # get p-value (used in .F_Test)\n def __getP(self, x, mean, stdev):\n import math\n P = 0.5 * (1 + math.erf((x-mean)/(stdev*(2**0.5))))\n return P\n \n # F Test\n def F_Test(self, X, y):\n F = self.F(X, y)\n F_Statistic = self.F_Statistic(X, y)\n return 1.0 - self.__getP(F, F_Statistic, self.__getStdev(y))\n \n # get the standard errors of the coeficients\n def coef_error(self, X, y):\n predictions = self.predict(X)\n errors = []\n for y_i, ypred_i in zip(y, predictions):\n errors.append(y_i - ypred_i)\n Xs = [[1] + row for row in X]\n t = self.__transpose(Xs)\n arr = self.__multiply(self.__inverse(self.__matmul(t, Xs)), self.__getVariance(errors)**2)\n result = []\n for l in arr:\n result.append(sum(l))\n return result[0], result[1:]\n \n # get the t-values for the coeficients\n def t(self, X, y):\n intercept_se, coef_se = self.coef_error(X, y)\n ts = [self.intercept_ / intercept_se]\n for coef, se in zip(self.coef_, coef_se):\n ts.append(coef / se)\n return ts[0], ts[1:]\n \n # get the p-values for the coeficients\n def p(self, X, y):\n intercept_t, coef_t = self.t(X, y)\n stdev = self.__getStdev(y)\n ps = [1.0 - self.__getP(intercept_t, 0, stdev)]\n for t in coef_t:\n ps.append(1.0 - self.__getP(t, 0, stdev))\n return ps[0], ps[1:]\n\n # get \"stars\" for statistical significance tables (used in .summary)\n def __stars(self, pval):\n if pval < 0.001:\n stars = \"***\"\n elif pval < 0.01:\n stars = \"**\"\n elif pval < 0.05:\n stars = \"*\"\n else:\n stars = \"\"\n return stars\n\n \n # display a summary of the results in an academic journal style format\n def summary(self, X, y, colnames=None, title=None, trim=91):\n intercept = self.intercept_\n coefs = self.coef_\n se = self.coef_error(X, y)\n p = self.p(X, y)\n N = len(y)\n r2 = self.score(X, y)\n adjr2 = self.adjusted_r2(X, y)\n RSE = self.RSE(X, y)\n F = self.F(X, y)\n F_Test = self.F_Test(X, y)\n df = N - len(coefs)\n STRING = \"\"\n STRING += \"-\"*trim + '\\n'\n if title == None:\n title = \" \"*55 + \"Ridge Regression (L2 Regularization)\"\n STRING += title + '\\n'\n STRING += \"-\"*trim + '\\n'\n if colnames == None:\n colnames = []\n for x in range(1, len(coefs)+1):\n colnames.append(\"X\" + str(x))\n for var in range(len(coefs)):\n stars = self.__stars(p[1][var])\n STRING += colnames[var] + \" \"*(54-len(colnames[var])) + \" \" + str(coefs[var])+stars + '\\n'\n STRING += \" \"*55 + \"(\"+str(se[1][var])+\")\" + '\\n'\n stars = self.__stars(p[0])\n STRING += \"Constant\" + \" \"*47 + str(intercept)+stars + '\\n'\n STRING += \" \"*55 + \"(\"+str(se[0])+\")\" + '\\n'\n STRING += \"N\" + \" \"*54 + str(N) + '\\n'\n STRING += \"R^2\" + \" \"*52 + str(r2) + '\\n'\n STRING += \"Adjusted R^2\" + \" \"*43 + str(adjr2) + '\\n'\n STRING += \"Residual Standard Error\" + \" \"*32 + str(RSE) + \"(df = \" + str(df) + \")\" + '\\n'\n stars = self.__stars(F_Test)\n STRING += \"F Statistic\" + \" \"*44 + str(F)+stars + \"(df = \" + str(len(coefs)) + \"; \" + str(df) + \")\" + '\\n'\n STRING += \"-\"*trim + '\\n'\n STRING += \" \"*55 + \"***: p < 0.001\" + '\\n'\n STRING += \" \"*55 + \"**: p < 0.01\" + '\\n'\n STRING += \" \"*55 + \"*: p < 0.05\" + '\\n'\n return STRING\n","sub_path":"Ridge Regression (L2 Regularization)/ridge.py","file_name":"ridge.py","file_ext":"py","file_size_in_byte":10212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"386571469","text":"# -*- coding: utf-8 -*-\n## @package cliparser.py\n# @author Guilherme N. Ramos (gnramos@unb.br)\n#\n# Parses CLI arguments to provide Adapter and Controller instances.\n\nfrom argparse import ArgumentParser\nfrom zmq import Context as zmq_Context\n\nfrom adapter import (Adapter, DEFAULT_GHOST_AGENT, DEFAULT_LAYOUT,\n DEFAULT_NUMBER_OF_GHOSTS, DEFAULT_NUMBER_OF_LEARNING_RUNS,\n DEFAULT_NUMBER_OF_TEST_RUNS, DEFAULT_PACMAN_AGENT)\nfrom agents import DEFAULT_NOISE\nfrom controller import Controller\n\n## @todo properly include communication module from parent folder\nimport sys\nsys.path.insert(0, '..')\nfrom communication import (InprocServer, TCPServer, DEFAULT_CLIENT_ADDRESS,\n DEFAULT_TCP_PORT)\n\n\ndef get_Adapter(context=None, endpoint=None):\n \"\"\"Parses arguments and returns an Adapter.\"\"\"\n parser = ArgumentParser(description='Run Pac-Man adapter system.')\n parser.add_argument('-g', '--graphics', dest='graphics', default=False,\n action='store_true',\n help='display graphical user interface')\n parser.add_argument('-o', '--output', dest='output_file', type=str,\n help='results output file')\n\n group = parser.add_argument_group('Experimental Setup')\n group.add_argument('--ghost-agent', dest='ghost_agent', type=str,\n choices=['random', 'ai'], default=DEFAULT_GHOST_AGENT,\n help='select ghost agent')\n group.add_argument('-l', '--learn-num', dest='learn_runs', type=int,\n default=DEFAULT_NUMBER_OF_LEARNING_RUNS,\n help='number of games to learn from')\n group.add_argument('--layout', dest='layout', type=str,\n default=DEFAULT_LAYOUT, choices=['classic', 'medium'],\n help='Game layout')\n group.add_argument('--noise', dest='noise', type=int,\n default=DEFAULT_NOISE,\n help='introduce noise in position measurements')\n group.add_argument('--num-ghosts', dest='num_ghosts',\n type=int, choices=range(1, 5),\n default=DEFAULT_NUMBER_OF_GHOSTS,\n help='number of ghosts in game')\n group.add_argument('--pacman-agent', dest='pacman_agent', type=str,\n choices=['random', 'ai', 'eater'],\n default=DEFAULT_PACMAN_AGENT,\n help='select Pac-Man agent')\n group.add_argument('--policy-file', dest='policy_file',\n type=lambda s: unicode(s, 'utf8'),\n help='load and save Pac-Man policy from the given file')\n group.add_argument('-t', '--test-num', dest='test_runs', type=int,\n default=DEFAULT_NUMBER_OF_TEST_RUNS,\n help='number of games to test learned policy')\n\n group = parser.add_argument_group('Communication')\n group.add_argument('--addr', dest='address', type=str,\n default=DEFAULT_CLIENT_ADDRESS,\n help='Client address to connect to adapter (TCP '\n 'connection)')\n group.add_argument('--port', dest='port', type=int,\n default=DEFAULT_TCP_PORT,\n help='Port to connect to controller (TCP connection)')\n\n args, unknown = parser.parse_known_args()\n\n if context and endpoint:\n connection = 'inproc://{}'.format(endpoint)\n else:\n context = zmq_Context()\n connection = 'tcp://{}:{}'.format(args.address, args.port)\n\n adapter = Adapter(pacman_agent=args.pacman_agent,\n ghost_agent=args.ghost_agent,\n num_ghosts=args.num_ghosts,\n noise=args.noise,\n policy_file=args.policy_file,\n layout_map=args.layout,\n learn_runs=args.learn_runs,\n test_runs=args.test_runs,\n output_file=args.output_file,\n graphics=args.graphics,\n context=context, connection=connection)\n\n return adapter\n\n\ndef get_Controller(context=None, endpoint=None):\n \"\"\"Parses arguments and returns a Controller.\n\n If no server is given, instantiates a TCPServer.\"\"\"\n parser = ArgumentParser(description='Run Pac-Man controller system.')\n parser.add_argument('--port', dest='port', type=int,\n default=DEFAULT_TCP_PORT,\n help='TCP port to connect to adapter')\n args, unknown = parser.parse_known_args()\n\n\n if context and endpoint:\n binding = 'inproc://{}'.format(endpoint)\n else:\n context = zmq_Context()\n binding = 'tcp://*:{}'.format(args.port)\n\n return Controller(context, binding)\n","sub_path":"pacman/cliparser.py","file_name":"cliparser.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"398762905","text":"from DateTime import DateTime\nfrom LogFilter import LogFilter\n\n\nclass Log:\n code = ''\n date = DateTime\n response_size = 0\n\n def __init__(self, log):\n self.Parse(log)\n\n def Parse(self, log):\n self.code = ''.join(log.split()[13:14])\n self.date = DateTime(''.join(log.split()[8:9])[1:])\n self.response_size = int(''.join(log.split()[-1:]))\n\n\n\nclass LogAnalyzer:\n\n log_list = []\n path_to_log_file = ''\n filter = LogFilter()\n filtered_logs = []\n logs = []\n\n def __init__(self, args):\n self.ParseArgs(args)\n\n def OpenFile(self):\n with open(self.path_to_log_file, 'r') as data_file:\n self.logs = data_file.readlines()\n\n def ParseArgs(self, args):\n iterator = 0\n\n for arg in args:\n if arg == '--from':\n parsed_date = DateTime('')\n parsed_date.ArgDateParse(args[iterator + 1])\n self.filter.date_from = parsed_date\n elif arg == '--to':\n parsed_date = DateTime('')\n parsed_date.ArgDateParse(args[iterator + 1])\n self.filter.date_to = parsed_date\n self.path_to_log_file = args[iterator + 2]\n iterator += 1\n\n def Parse(self):\n for line in self.logs[1:]:\n log_line = Log(line)\n self.log_list.append(log_line)\n\n def FilterByDate(self):\n for log in self.log_list:\n int_date = log.date.DateToInt()\n if self.filter.date_from.DateToInt() <= int_date <= self.filter.date_to.DateToInt():\n self.filtered_logs.append(log)\n\n def GetRequestCount(self):\n if len(self.filtered_logs) == 0:\n return 'No info'\n return len(self.filtered_logs)\n\n def GetAverageRequestsPerSecond(self):\n seconds_count = 1\n if len(self.filtered_logs) == 0:\n return 'No info'\n buf_seconds = self.filtered_logs[0].date.DateToInt()\n\n for log in self.filtered_logs:\n if log.date.DateToInt() != buf_seconds:\n seconds_count += 1\n buf_seconds = log.date.DateToInt()\n return self.GetRequestCount() / seconds_count\n\n def GetUniqueResponseCodeCount(self):\n if len(self.filtered_logs) == 0:\n return 'No info'\n\n codes = {\n\n }\n\n for log in self.filtered_logs:\n if codes.get(log.code) is None:\n codes[log.code] = 1\n else:\n codes[log.code] += 1\n return codes\n\n def GetAverageResponsesSize(self):\n if len(self.filtered_logs) == 0:\n return 'No info'\n response_count = 0\n for responses in self.filtered_logs:\n response_count += responses.response_size\n return response_count / self.GetRequestCount()","sub_path":"LogAnalyzer.py","file_name":"LogAnalyzer.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"559864845","text":"from gevent import monkey; monkey.patch_all()\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), *(['..' + os.sep] * 2))))\n\nfrom datetime import date, timedelta, datetime\nimport gevent\nimport socket\nimport sys\nimport time\nimport threading\nimport pandas as pd\nimport numpy as np\nfrom scipy.signal import find_peaks, peak_prominences\nfrom pymongo import MongoClient\n\nfrom morning.back_data import holidays\nfrom morning_server import stock_api\nfrom morning_server import stream_readwriter\nfrom morning_server import message\nfrom morning.pipeline.converter import dt\nfrom utils import time_converter\nfrom configs import db\n\nMAVG=20\nSTATE_NONE=0\nSTATE_BULL=1\nSTATE_BUY=2\n\ncode_dict = dict()\nreport = []\n\ndef get_past_data(reader, code, from_date, until_date):\n past_data = stock_api.request_stock_day_data(reader, code, from_date, until_date)\n return past_data\n\n\ndef convert_data_readable(code, past_data):\n converted_data = []\n avg_prices = np.array([])\n avg_volumes = np.array([])\n for p in past_data:\n converted = dt.cybos_stock_day_tick_convert(p)\n converted['code'] = code\n avg_prices = np.append(avg_prices, np.array([converted['close_price']]))\n avg_volumes = np.append(avg_volumes, np.array([converted['volume']]))\n\n if len(avg_prices) == MAVG:\n converted['moving_average'] = avg_prices.mean()\n avg_prices = avg_prices[1:]\n converted['volume_average'] = avg_volumes.mean()\n avg_volumes = avg_volumes[1:]\n else:\n converted['moving_average'] = 0\n converted['avg_volumes'] = 0\n\n converted_data.append(converted)\n\n return converted_data\n\n\ndef print_code_dict(code, d):\n print('DATE', d, '\\t', 'CODE', code, '\\t', 'STATE', code_dict[code]['state'], '\\t', 'BOUGHT PRICE', code_dict[code]['bought_price'])\n\n\ndef start_today_trading(reader, market_code, today):\n for code in market_code:\n data = get_past_data(reader, code, today, today)\n data =convert_data_readable(code, data)\n if len(data) != 1:\n continue\n\n data = data[0]\n if data['close_price'] < 900:\n continue\n\n state = code_dict[code]['state']\n\n if state == STATE_NONE:\n profit = (data['close_price'] - data['start_price']) / data['start_price'] * 100\n if profit > 25 and data['amount'] > 10000000000:\n past_data = get_past_data(reader, code, today - timedelta(days=90), holidays.get_yesterday(today))\n past_data = convert_data_readable(code, past_data)\n if len(past_data) < 60:\n #print(today, 'short data')\n continue\n past_data = past_data[-60:]\n past_max_amount = max([d['amount'] for d in past_data])\n past_max_price = max([d['highest_price'] for d in past_data])\n #print(past_max_price, past_max_amount, data['highest_price'], data['amount'])\n if data['highest_price'] > past_max_price and data['amount'] >= past_max_amount * 5: \n code_dict[code]['state'] = STATE_BULL\n code_dict[code]['amount'] = data['amount']\n code_dict[code]['count'] = 0\n code_dict[code]['buy_price'] = data['close_price']\n code_dict[code]['cut_price'] = (data['close_price'] - data['start_price']) / 3 + data['start_price']\n print_code_dict(code, today)\n elif state == STATE_BULL:\n #print(data['close_price'], code_dict[code]['buy_price'])\n if data['lowest_price'] < code_dict[code]['cut_price']:\n code_dict[code]['state'] = STATE_NONE\n print_code_dict(code, today)\n elif code_dict[code]['count'] > 3 and code_dict[code]['buy_price'] < data['close_price']:\n #elif code_dict[code]['count'] >= 5:\n code_dict[code]['state'] = STATE_BUY\n code_dict[code]['bought_price'] = data['close_price']\n print_code_dict(code, today)\n else:\n code_dict[code]['count'] += 1\n elif state == STATE_BUY:\n if data['highest_price'] - data['start_price'] == 0:\n body_ratio = 1\n else:\n body_ratio = (data['close_price'] - data['start_price']) / (data['highest_price'] - data['start_price'])\n\n #print(body_ratio, data['close_price'] > data['start_price'])\n if data['close_price'] > data['start_price'] and body_ratio < 0.5:\n profit = (data['close_price'] - code_dict[code]['bought_price']) / code_dict[code]['bought_price'] * 100\n print(code, today, 'OK', profit)\n code_dict[code]['state'] = STATE_NONE\n print_code_dict(code, today)\n elif data['close_price'] < data['start_price']:\n profit = (data['close_price'] - code_dict[code]['bought_price']) / code_dict[code]['bought_price'] * 100\n print(code, today, 'CUT', profit)\n code_dict[code]['state'] = STATE_NONE\n print_code_dict(code, today)\n \n\nif __name__ == '__main__':\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = (message.SERVER_IP, message.CLIENT_SOCKET_PORT)\n sock.connect(server_address)\n message_reader = stream_readwriter.MessageReader(sock)\n message_reader.start()\n market_code = stock_api.request_stock_code(message_reader, message.KOSDAQ)\n #market_code = ['A000440']\n\n\n from_date = date(2019, 1, 1)\n until_date = date(2020, 2, 10)\n\n #from_date = date(2019, 12, 1)\n #until_date = date(2020, 2, 10)\n for code in market_code:\n code_dict[code] = {'state': STATE_NONE,\n 'buy_price': 0,\n 'amount': 0,\n 'cut_price': 0,\n 'bought_price': 0, 'count': 0}\n\n while from_date <= until_date:\n if holidays.is_holidays(from_date):\n from_date += timedelta(days=1)\n continue\n start_today_trading(message_reader, market_code, from_date)\n from_date += timedelta(days=1)\n","sub_path":"clients/weak_suppress/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"5285757","text":"class Solution:\n def longestSubstring(self, s, k):\n \"\"\"\n :type s: str\n :type k: int\n :rtype: int\n \"\"\"\n \n if len(s) ==0: return 0\n from collections import Counter\n d = Counter(s)\n i = 0\n while i < len(s) and (s[i] in d and d[s[i]] >= k):\n i = i + 1\n \n if i == len(s): return len(s)\n \n return max(self.longestSubstring(s[:i],k), self.longestSubstring(s[(i+1):],k))\n\nif __name__ == '__main__':\n s = 'ababacb'\n #s = 'aaabb'\n k = 3\n solution = Solution()\n res = solution.longestSubstring(s, k)\n print(res)\n a =1\n print(a in [1,2,3])","sub_path":"leetcode/395_longestSubstring.py","file_name":"395_longestSubstring.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"155051428","text":"# James Hagle\r\n# CSC 241-401\r\n# Midterm exam\r\n\r\n# Question 1 (3)\r\ndef countgreater(lst, value):\r\n num = 0\r\n for i in lst:\r\n if i > value:\r\n num += 1\r\n return num\r\n\r\n# Question 2 (3)\r\ndef formalgreet(name, gender):\r\n nm = name.split()\r\n lst = nm[-1]\r\n gender = gender.lower()\r\n if gender == 'male':\r\n print('Hello, Mr. ',lst, end='.')\r\n elif gender == 'female':\r\n print('Hello, Ms. ',lst, end='.')\r\n else:\r\n print('Invalid parameter')\r\n\r\n# Question 3 (3)\r\ndef copyreplace(inname, outname, str1, str2):\r\n infile = open(inname, 'r')\r\n s = infile.read()\r\n outfile = open(outname, 'w')\r\n if str1 in s:\r\n st = s.replace(str1, str2)\r\n outfile.write(st)\r\n else:\r\n outfile.write(s)\r\n infile.close()\r\n outfile.close()\r\n \r\n# Question 4 (1)\r\ndef maxrow(mlst):\r\n lst = [0]\r\n for i in mlst:\r\n a = sum(i)\r\n lst.append(a)\r\n lst.sort()\r\n return lst[-1]\r\n\r\n\r\n# Extra credit question\r\n# NOTE: This is optional and should only be attempted\r\n# when the rest of your midterm is completely finished!\r\ndef maxrowindex(mlst):\r\n pass\r\n","sub_path":"CSC241-Python1/Midterm/midterm (2).py","file_name":"midterm (2).py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"585733736","text":"# -*- coding: utf-8 -*-\nfrom matplotlib.pyplot import *\nfrom numpy import *\n\ndef data_dict(txtfile):\n\twith open(txtfile,\"r\") as infile:\n\t\tdata = {}\n\t\tidentifiers = infile.readline().split()\n\t\tfor identifier in identifiers:\n\t\t\tdata[identifier] = []\n\n\t\tlines = infile.readlines()\n\t\tfor line in lines: \n\t\t\tvalues = line.split()\n\t\t\tfor identifier,value in zip(identifiers,values):\n\t\t\t\tdata[identifier].append(float(value))\n\t\tfor identifier in identifiers:\n\t\t\tdata[identifier] = array(data[identifier])\n\treturn data\n\ndef label_(txtfile):\n\treturn \"$\\\\alpha = $\" + txtfile[2:3] + \" $\\lambda = $\" + str(float(txtfile[-8:-6])/100) + \" $\\gamma = $\" + txtfile[-5:-4]\n\ndef make_plot(savefile):\n\txlabel(\"Wealth [$]\",fontsize=20)\n\tylabel(\"Probability [%]\",fontsize=20)\n\txticks(fontsize=20)\n\tyticks(fontsize=20)\n\txlim(0,10)\n\tgrid(\"on\")\n\ttight_layout()\n\tlegend(loc=\"best\",fontsize=20)\n\tsavefig(savefile ,bbox_inches=\"tight\")\n\tclf()\n\ndef plot_plot(constraint1, constraint2, data, txtfile, label_text):\n\tif constraint1 == txtfile[2:3] and constraint2 == txtfile[-8:-6]:\n\t\tif 0 == int(txtfile[-5:-4]):\n\t\t\tplot(data[\"X\"],data[\"Y\"]*100,\"k-\",label=label_text)\n\t\telif 1 == int(txtfile[-5:-4]):\n\t\t\tplot(data[\"X\"],data[\"Y\"]*100,\"b-\",label=label_text)\n\t\telif 2 == int(txtfile[-5:-4]):\n\t\t\tplot(data[\"X\"],data[\"Y\"]*100,\"g-\",label=label_text)\n\t\telif 3 == int(txtfile[-5:-4]):\n\t\t\tplot(data[\"X\"],data[\"Y\"]*100,\"r-\",label=label_text)\n\t\telif 4 == int(txtfile[-5:-4]):\n\t\t\tplot(data[\"X\"],data[\"Y\"]*100,\"m-\",label=label_text)\n\ndef plot_semi(constraint1, constraint2, data, txtfile, label_text):\n\tif constraint1 == txtfile[2:3] and constraint2 == txtfile[-8:-6]:\n\t\tif 0 == int(txtfile[-5:-4]):\n\t\t\tsemilogy(data[\"X\"],data[\"Y\"]*100,\"k-\",label=label_text)\n\t\telif 1 == int(txtfile[-5:-4]):\n\t\t\tsemilogy(data[\"X\"],data[\"Y\"]*100,\"b-\",label=label_text)\n\t\telif 2 == int(txtfile[-5:-4]):\n\t\t\tsemilogy(data[\"X\"],data[\"Y\"]*100,\"g-\",label=label_text)\n\t\telif 3 == int(txtfile[-5:-4]):\n\t\t\tsemilogy(data[\"X\"],data[\"Y\"]*100,\"r-\",label=label_text)\n\t\telif 4 == int(txtfile[-5:-4]):\n\t\t\tsemilogy(data[\"X\"],data[\"Y\"]*100,\"m-\",label=label_text)\n","sub_path":"Project5/new_result/new_5e/older/data_dict.py","file_name":"data_dict.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"284079331","text":"from FrameIterator import FrameIterator\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom intrinsics import calibrateChessboard, stereoCalibrateChessboard\nfrom segmentation import manual_split, lk_segmentation\nfrom extrinsics import calculate_E_F, rectification\nfrom utils import * # contains utility functions\nfrom parser import make_parser\nfrom os import path\n\n###############################################################################\n# Setup Parameters and Parse Arguments\n###############################################################################\n\nargs = make_parser().parse_args()\n\nopticalflow_path ='data/blender/optical_flow.avi'\nintrinsics_path = 'data/blender/calibration/*.png'\ntemp_path = make_temp_dir('temp')\n\ninput_path = 'data/blender/blender.png'\noutput_path = os.path.join(temp_path,'disparity.png')\n\n# parameters for intrinsics calibration\nintrinsics_params = dict( \n chess_size=(5,5),\n tile_size=0.25, # <= 14 mm\n mirror='left',\n verbose=1,\n show=True)\n\n# parameters for lukas kanade calibration\nlk_segmentation_params = dict(\n grid_size=100,\n iterater_max=10,\n verbose=1,\n show=True)\n\n###############################################################################\n# Intrinsics Calibration\n###############################################################################\n\nif args.intrinsic:\n \n # get mirror segmentation once for all intrinsics images\n mirror_seg_intr = manual_split(FrameIterator(intrinsics_path).first())\n\n # compute intrinsics matrix K from Chessboard \n intr, extr = stereoCalibrateChessboard(\n filepattern=intrinsics_path,\n split_position=mirror_seg_intr,\n **intrinsics_params)\n K = intr['KL']\n\nelse: K=None\n\n###############################################################################\n# Image Segmentation: Detect the mirror \n###############################################################################\n\nif args.mirror:\n\n # compute split with Lukas-Kanade optical flow\n mirror_segmentation = lk_segmentation(\n path=opticalflow_path,\n **lk_segmentation_params\n )\n \n # optical flow frames are at 50% of real resolution\n mirror_segmentation = mirror_segmentation * 2 \n\nelse: mirror_segmentation=None\n\n\n###############################################################################\n# Depth Estimation on input image\n###############################################################################\n\n# load input image\nimg = FrameIterator(input_path).first()\nimg = getDownSampledImg(1, img)\nprint(f'img.shape={img.shape}')\ncv2.imwrite(path.join(temp_path,'00_input.png'), img)\n\n\n# fill missing values with defaults\n\nif K is None:\n width, height = img.shape[1], img.shape[0]\n K = manual_K(width, height, focal_length_mm=27.9, sensor_width_mm=36)\nif mirror_segmentation is None:\n mirror_segmentation = manual_split(img, verbose=1)\n\nheight, width, _ = img.shape\n# split and flip image according to mirror position into stereo pair\nimgL, imgR, maskL, maskR = split_image(img, mirror_segmentation, flip='left', temp_path=temp_path, show=True)\n# calculate essential and fundamental matrices as well as the SIFT keypoints\n\n_, F, pts1, pts2 = calculate_E_F(imgL, imgR, K, temp_path)\n#if 'extr' in locals():\n# F = extr['F']\n\nprint(K)\nprint(F)\n\nwindow_name = 'Disparity Computation'\ncv2.namedWindow(window_name, cv2.WINDOW_NORMAL)\ncanvL, canvR = draw_epilines(imgL, imgR, pts1, pts2, F)\ncanv = draw_stereo(canvL, canvR, path.join(temp_path,'06_epilines_unrect.png'))\ncv2.imshow(window_name, canv)\ncv2.waitKey(0)\n\n# compute rectified stereo pair\ncanvL, canvR, = rectification(canvL, canvR, pts1, pts2, F)\ncanv = draw_stereo(canvL, canvR, path.join(temp_path,'07_epilines_rect.png'))\ncv2.imshow(window_name, canv)\ncv2.waitKey(0)\n\nrectL, rectR = rectification(imgL, imgR, pts1, pts2, F)\ncanv = draw_stereo(rectL, rectR, path.join(temp_path,'08_rectification.png'))\ncv2.imshow(window_name, canv)\ncv2.waitKey(0)\n\nmaskL, maskR = rectification(maskL, maskR, pts1, pts2, F)\ncanv = draw_stereo(maskL * 255, maskR * 255, path.join(temp_path,'08_rectification_masked.png'))\ncv2.imshow(window_name, canv)\ncv2.waitKey(0)\n\n\n# downsample for smoother disparity map\nrectL = getDownSampledImg(0.5, rectL)\nrectR = getDownSampledImg(0.5, rectR)\nmaskL = getDownSampledImg(0.5, maskL)\nmaskR = getDownSampledImg(0.5, maskR)\n\n# compute disparity using semi-global block matching\nstereo = cv2.StereoSGBM_create(minDisparity=-5, numDisparities=48, blockSize=16, speckleRange=0,\n speckleWindowSize=0, uniquenessRatio=10)\ndisparity = stereo.compute(rectL, rectR)\n\n# mask disparity\nmask = np.logical_and(maskL, maskR).astype(np.uint8)\ndisparity[mask[:,:,0]==0] = disparity.min()\n\n\n###############################################################################\n# Plot Estimated Disparity Map\n###############################################################################\n\nim = plt.imshow(disparity)\nplt.colorbar(im,fraction=0.046, pad=0.04)\nplt.show()\n","sub_path":"demo_blender.py","file_name":"demo_blender.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"301419980","text":"import sys\nimport json\nimport requests\nfrom os import walk\nfrom os.path import join\n\n# get gw_eui and band channels\ngw_eui = None\nchannels = None\nif len(sys.argv) == 3:\n gw_eui = sys.argv[1]\n channel_band = sys.argv[2]\nelse:\n print('no GW EUI and channel band provided')\n exit(1)\n\nif channel_band == '0':\n channels = [0, 1, 2, 3, 4, 5, 6, 7, 64]\nelif channel_band == '1':\n channels = [8, 9, 10, 11, 12, 13, 14, 15, 65]\nelse:\n print('Channel band not supported')\n exit(1)\n\n# login\nresp = requests.post('http://127.0.0.1:8080/api/internal/login',\n json={'email': 'admin', 'password': 'admin'},\n )\nif resp.status_code < 200 or resp.status_code >= 300:\n print(\"Login Failure - StatusCode: \", resp.status_code)\n exit(1)\njwt = resp.json()['jwt']\n\n\n# get Org Id\nresp = requests.get('http://127.0.0.1:8080/api/organizations',\n headers={'Grpc-Metadata-Authorization': 'Bearer ' + jwt},\n params={'limit': 10}\n )\nif resp.status_code < 200 or resp.status_code >= 300:\n print(\"Get Org Failure - StatusCode: \", resp.status_code)\n exit(1)\norg_id = resp.json()['result'][0]['id']\nprint('Organization ID: ' + org_id)\n\n# network server\nresp = requests.post('http://127.0.0.1:8080/api/network-servers',\n headers={'Grpc-Metadata-Authorization': 'Bearer ' + jwt},\n json={\"networkServer\": {\n \"name\": \"local-network-server\",\n \"server\": \"chirpstack-network-server:8000\"\n }}\n )\nif resp.status_code < 200 or resp.status_code >= 300:\n print(\"POST Network Server Failure - StatusCode: \", resp.status_code)\n exit(1)\nnw_id = resp.json()['id']\nprint('Network Server ID: ' + nw_id)\n\n# gateway profile\nresp = requests.post('http://127.0.0.1:8080/api/gateway-profiles',\n headers={'Grpc-Metadata-Authorization': 'Bearer ' + jwt},\n json={\"gatewayProfile\": {\n \"channels\": channels,\n \"name\": \"local-gateway-profile\",\n \"networkServerID\": \"\"+nw_id\n }}\n )\ngwp_id = None\nif resp.status_code < 200 or resp.status_code >= 300:\n print(\"POST Gateway Profile Failure - StatusCode: \", resp.status_code)\nelse:\n gwp_id = resp.json()['id']\n print('Gateway Profile ID: ' + gwp_id)\n\n\n# service profile\nresp = requests.post('http://127.0.0.1:8080/api/service-profiles',\n headers={'Grpc-Metadata-Authorization': 'Bearer ' + jwt},\n json={\"serviceProfile\": {\n \"addGWMetaData\": True,\n \"name\": \"local-service-profile-default\",\n \"networkServerID\": \"\"+nw_id,\n \"nwkGeoLoc\": False,\n \"reportDevStatusBattery\": False,\n \"reportDevStatusMargin\": False,\n \"organizationID\": \"\"+org_id\n }}\n )\nif resp.status_code < 200 or resp.status_code >= 300:\n print(\"POST Service Profile Failure - StatusCode: \", resp.status_code)\n exit(1)\nsp_id = resp.json()['id']\nprint('Service Profile ID: ' + sp_id)\n\n\n# gateway\nif gwp_id != None and gw_eui != None:\n resp = requests.post('http://127.0.0.1:8080/api/gateways',\n headers={\n 'Grpc-Metadata-Authorization': 'Bearer ' + jwt},\n json={\"gateway\": {\n \"description\": \"rak2247 USB PCIe w/ rak packet forwarder\",\n \"discoveryEnabled\": False,\n \"gatewayProfileID\": \"\"+gwp_id,\n \"name\": \"local-rak-gateway\",\n \"id\": gw_eui,\n \"networkServerID\": \"\"+nw_id,\n \"organizationID\": \"\"+org_id,\n \"location\": {\n \"accuracy\": 0,\n \"altitude\": 0,\n \"latitude\": 0,\n \"longitude\": 0,\n \"source\": \"UNKNOWN\"\n }\n }}\n )\n if resp.status_code < 200 or resp.status_code >= 300:\n print(\"POST Gateway Failure - StatusCode: \", resp.status_code)\n print('Local Gateway EUI: ' + gw_eui)\nelse:\n print('No local gateway added')\n\n# application\nresp = requests.post('http://127.0.0.1:8080/api/applications',\n headers={'Grpc-Metadata-Authorization': 'Bearer ' + jwt},\n json={\"application\": {\n \"description\": \"default-app\",\n \"name\": \"default-app\",\n \"organizationID\": \"\"+org_id,\n \"serviceProfileID\": \"\"+sp_id\n }}\n )\nif resp.status_code < 200 or resp.status_code >= 300:\n print(\"POST Application Failure - StatusCode: \", resp.status_code)\n exit(1)\napp_id = resp.json()['id']\nprint('App ID: ' + app_id)\n\n\n# device data\ndevice_profiles = {}\ntry:\n path = 'init_data/resources/device_profiles'\n _, _, filenames = next(walk(path), (None, None, []))\n for file in filenames:\n try:\n with open(join(path, file)) as json_file:\n d = json.load(json_file)\n d['deviceProfile']['networkServerID'] = '' + nw_id\n d['deviceProfile']['organizationID'] = '' + org_id\n\n resp = requests.post('http://127.0.0.1:8080/api/device-profiles',\n headers={\n 'Grpc-Metadata-Authorization': 'Bearer ' + jwt},\n json=d\n )\n if resp.status_code < 200 or resp.status_code >= 300:\n print(\"POST Device Profile Failure - StatusCode: \",\n resp.status_code)\n print(resp.json())\n else:\n print(\"Added device profile \" + d['deviceProfile']['name'])\n device_profiles[d['deviceProfile']['name']] = resp.json()[\n 'id']\n except:\n print('Error adding device profile', file)\n\nexcept IOError:\n print(\"No init data provided with init_data.json\")\n","sub_path":"chirpstack-app-init.py","file_name":"chirpstack-app-init.py","file_ext":"py","file_size_in_byte":6531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"240219879","text":"# <>\n# Copyright 2022, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <>\n\nimport numpy\n\nfrom xData import xDataArray as arrayModule\n\nfrom fudge.covariances import enums as covarianceEnumsModule\nfrom fudge.covariances import covarianceMatrix as covarianceMatrixModule\nfrom fudge.core.math import linearAlgebra as linearAlgebraModule\n\nfrom .. import endfFormats as endfFormatsModule\n\ndef toENDF6(self, flags, targetInfo, inCovarianceGroup=False):\n\n endf = []\n conversionFlags = targetInfo['ENDFconversionFlags'].get(self,\"\")\n rowdat, coldat = targetInfo['dataPointer']\n MF,MT1 = list( map( int, rowdat.ENDF_MFMT.split(',') ) )\n if not inCovarianceGroup:\n # print header for this subsection (contains one NL sub-subsection)\n MAT1 = targetInfo['MAT1']\n XMF1,XLFS1,NC,NI = 0,0,0,1\n if coldat:\n MF1, MT1 = list( map( int, coldat.ENDF_MFMT.split(',') ) )\n if MF in (31,33):\n endf.append( endfFormatsModule.endfHeadLine(XMF1,XLFS1,MAT1,MT1,NC,NI) )\n # header for matrix:\n rows,cols = self.matrix.array.shape\n if isinstance( self.matrix.array, arrayModule.Diagonal ):\n LS = 0; LB = 1; NP = len(self.matrix.axes[2].values); NT = 2*NP\n if self.type == covarianceEnumsModule.Type.absolute:\n LB = 0\n if 'LB' in conversionFlags:\n LB = int( conversionFlags.split('=')[1] )\n matrixData = list( zip( self.matrix.axes[2].values, list(self.matrix.array.values) + [0] ) )\n matrixData = [val for sublist in matrixData for val in sublist] # flatten\n elif isinstance( self.matrix.array, arrayModule.Full ):\n LB = 5\n if 'LB' in conversionFlags:\n LB = int( conversionFlags.split('=')[1].split(',')[0] )\n if LB == 2:\n LS = 0; NP = len(self.matrix.axes[2].values); NT = 2*NP\n fullMatrix = self.matrix.array.constructArray()\n vals = numpy.sqrt( numpy.diagonal( fullMatrix ) )\n firstNonZero = numpy.nonzero(vals)[0][0]\n vals = numpy.copysign(vals, fullMatrix[firstNonZero])\n if 'firstNegative' in conversionFlags:\n negativeIndex = int(conversionFlags.split('firstNegativeIndex=')[-1])\n if vals[negativeIndex] > 0:\n vals *= -1\n matrixData = list( zip( self.matrix.axes[2].values, list(vals) + [0] ) )\n matrixData = [val for sublist in matrixData for val in sublist] # flatten\n elif self.matrix.array.symmetry in (arrayModule.Symmetry.lower, arrayModule.Symmetry.upper):\n LS = 1; NT = (rows+1) + rows*(rows+1)/2; NP = rows+1\n arrayData = list( self.matrix.array.values )\n if self.matrix.array.symmetry == arrayModule.Symmetry.lower:\n arrayData = linearAlgebraModule.switchSymmetry( arrayData, upperToLower=False )\n matrixData = list(self.matrix.axes[2].values) + arrayData\n elif self.matrix.axes[1].isLink():\n LS = 0; NT = (rows+1) + rows*cols; NP = rows+1\n matrixData = list(self.matrix.axes[2].values) + list(self.matrix.array.values)\n else:\n LS = 0; LB = 6; NT = (rows+1) + (cols+1) + rows*cols; NP = rows+1\n matrixData = list(self.matrix.axes[2].values) + list(self.matrix.axes[1].values) + list(\n self.matrix.array.values)\n else:\n raise NotImplemented\n if MF==35: # header for fission spectra is different:\n slice_ = rowdat.slices[0]\n E1,E2 = (slice_.domainMin, slice_.domainMax)\n if LS: LB = 7\n else:\n raise Exception (\"Unknown spectrum (MF35) covariance format\")\n endf.append( endfFormatsModule.endfHeadLine( E1,E2,LS,LB,NT,NP ) )\n else:\n endf.append( endfFormatsModule.endfHeadLine( 0,0,LS,LB,NT,NP ) )\n endf += endfFormatsModule.endfDataList( matrixData )\n return endf\n\ncovarianceMatrixModule.CovarianceMatrix.toENDF6 = toENDF6\n","sub_path":"brownies/legacy/toENDF6/covariances/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"157193267","text":"import unittest\nfrom mock import Mock, patch\n\nimport beeline\nimport libhoney\n\nclass TestBeelineSendEvent(unittest.TestCase):\n def setUp(self):\n self.m_client = patch('beeline.g_client').start()\n self.m_state = patch('beeline.g_state').start()\n self.m_tracer = patch('beeline.g_tracer').start()\n\n def tearDown(self):\n self.m_client.stop()\n self.m_state.stop()\n self.m_tracer.stop()\n\n def test_send_event(self):\n ''' test correct behavior for send_event '''\n ev = Mock()\n delattr(ev, 'traced_event')\n self.m_state.pop_event.return_value = ev\n beeline._send_event()\n self.m_state.pop_event.assert_called_once_with()\n ev.send.assert_called_once_with()\n\n def test_send_no_events(self):\n ''' ensure nothing crashes when we try to send with no events in the\n stack '''\n self.m_state.pop_event.return_value = None\n beeline._send_event()\n self.m_state.pop_event.assert_called_once_with()\n\n def send_traced_event(self):\n ''' test send_event behavior when event is traced '''\n ev = Mock()\n ev.traced_event = True\n self.m_state.pop_event.return_value = ev\n beeline._send_event()\n self.m_state.pop_event.assert_called_once_with()\n self.m_tracer.send_traced_event.assert_called_once_with(ev)\n\n def test_send_all(self):\n ''' ensure events are flushed '''\n ev1, ev2, ev3 = Mock(), Mock(), Mock()\n ev3.send.side_effect = libhoney.SendError(\"bad thing!\")\n delattr(ev1, 'traced_event')\n delattr(ev2, 'traced_event')\n delattr(ev3, 'traced_event')\n self.m_state.pop_event.side_effect = [ev1, ev2, ev3, None]\n\n beeline._send_all()\n\n ev1.send.assert_called_once_with()\n ev2.send.assert_called_once_with()\n ev3.send.assert_called_once_with()\n","sub_path":"beeline/test_beeline.py","file_name":"test_beeline.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"136778284","text":"import functools\n\nimport sqlalchemy\nfrom flask import jsonify, request, abort\nfrom flask_login import current_user\nfrom sqlalchemy import or_, cast\n\nfrom museum import Config\n\n\ndef jsbool(val):\n if val == 'true':\n return True\n return False\n\n\ndef create_array(obj):\n res = []\n for key in obj.columns():\n res.append(getattr(obj, key[1]))\n return res\n\n\ndef extract(columns, record):\n res = []\n for column in columns:\n if record[column[2]] is None:\n res.append(None)\n else:\n res.append(getattr(record[column[2]], column[3]))\n return res\n\n\ndef perform_query_2(columns, query):\n amounttotal = query.count()\n params = parse_params()\n filters = []\n for i, column in enumerate(params['columns']):\n col = cast(columns[i][1], sqlalchemy.String)\n if params['search']['value']:\n search = '%{}%'.format(params['search']['value'])\n filters.append(col.ilike(search))\n if column['search']['value']:\n search = '%{}%'.format(column['search']['value'])\n filters.append(col.ilike(search))\n\n if len(filters) > 0:\n f = functools.reduce(lambda a, b: or_(a, b), filters)\n query = query.filter(f)\n\n for i, order in enumerate(params['order']):\n col = columns[order['column']][1]\n if order['dir'] == 'asc':\n query = query.order_by(col.asc())\n elif order['dir'] == 'desc':\n query = query.order_by(col.desc())\n\n amount = query.count()\n query = query.offset(params['start'])\n if params['length'] != -1:\n query = query.limit(params['length'])\n data = query.all()\n\n return jsonify({'draw': params['draw'],\n 'recordsTotal': amounttotal,\n 'recordsFiltered': amount,\n 'data': [extract(columns, i) for i in data]})\n\n\ndef perform_query(model):\n params = parse_params()\n query = model.query\n filters = []\n\n for i, column in enumerate(params['columns']):\n col = getattr(model, model.columns()[i][1])\n if params['search']['value']:\n search = '%{}%'.format(params['search']['value'])\n filters.append(cast(col, sqlalchemy.String).ilike(search))\n if column['search']['value']:\n search = '%{}%'.format(column['search']['value'])\n filters.append(cast(col, sqlalchemy.String).ilike(search))\n\n if len(filters) > 0:\n f = functools.reduce(lambda a, b: or_(a, b), filters)\n query = query.filter(f)\n\n for i, order in enumerate(params['order']):\n col = getattr(model, model.columns()[order['column']][1])\n if order['dir'] == 'asc':\n query = query.order_by(col.asc())\n elif order['dir'] == 'desc':\n query = query.order_by(col.desc())\n\n amount = query.count()\n query = query.offset(params['start'])\n if params['length'] != -1:\n query = query.limit(params['length'])\n data = query.all()\n\n return jsonify({'draw': params['draw'],\n 'recordsTotal': model.query.count(),\n 'recordsFiltered': amount,\n 'data': [create_array(i) for i in data]})\n\n\ndef parse_params():\n params = {}\n params['draw'] = request.args.get('draw', 1, int)\n params['start'] = request.args.get('start', 0, int)\n params['length'] = request.args.get('length', Config.PAGE_ITEMS, int)\n params['search'] = {}\n params['search']['value'] = request.args.get('search[value]', None, str)\n params['search']['regex'] = request.args.get('search[regex]', False, jsbool)\n i = 0\n params['order'] = []\n while not request.args.get(f'order[{i}][column]') is None:\n params['order'].append({})\n params['order'][i]['column'] = request.args.get(f'order[{i}][column]', 0, int)\n params['order'][i]['dir'] = request.args.get(f'order[{i}][dir]', 'asc', str)\n i += 1\n i = 0\n params['columns'] = []\n while not request.args.get(f'columns[{i}][data]') is None:\n params['columns'].append({})\n params['columns'][i]['data'] = request.args.get(f'columns[{i}][data]', '', str)\n params['columns'][i]['name'] = request.args.get(f'columns[{i}][name]', '', str)\n params['columns'][i]['searchable'] = request.args.get(f'columns[{i}][searchable]', False, jsbool)\n params['columns'][i]['orderable'] = request.args.get(f'columns[{i}][orderable]', False, jsbool)\n params['columns'][i]['search'] = {}\n params['columns'][i]['search']['value'] = request.args.get(f'columns[{i}][search][value]', None, str)\n params['columns'][i]['search']['regex'] = request.args.get(f'columns[{i}][search][regex]', None, str)\n i += 1\n return params\n\n\ndef required_permission(level=0, logged_in=False):\n def inner(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if ((logged_in or level > 0) and current_user.is_anonymous) or current_user.permission_level < level:\n abort(403)\n return func(*args, **kwargs)\n return wrapper\n return inner\n","sub_path":"bd/museum/museum/api_utils.py","file_name":"api_utils.py","file_ext":"py","file_size_in_byte":5105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"106811765","text":"import itertools as it\n\nimport numpy as np\nimport pandas as pd\n\nfrom utils.statistics import significance_test, fdr_corrected_pvals\n\n\ndef initialize_empty_columns_at_index(dataframe, row_index):\n # I have to do this ugly thing here because initializing with 3 nans is somehow not possible because pandas sets\n # the dtype of the cell as float instead ob object\n dataframe.loc[row_index, 'config_has_run'] = False\n dataframe.loc[row_index, 'mse_train'] = False\n dataframe.loc[row_index, 'mse_valid'] = False\n dataframe.loc[row_index, 'mse_test'] = False\n dataframe.loc[row_index, 'corr_train'] = False\n dataframe.loc[row_index, 'corr_valid'] = False\n dataframe.loc[row_index, 'corr_test'] = False\n dataframe.loc[row_index, 'corr_p_train'] = False\n dataframe.loc[row_index, 'corr_p_valid'] = False\n dataframe.loc[row_index, 'corr_p_test'] = False\n dataframe.loc[row_index, 'r^2_train'] = False\n dataframe.loc[row_index, 'r^2_valid'] = False\n dataframe.loc[row_index, 'r^2_test'] = False\n return dataframe\n\n\ndef prefill_columns_configuration_at_index(dataframe, row_index, has_run):\n dataframe.loc[row_index, 'config_has_run'] = has_run\n dataframe.loc[row_index, 'mse_train'] = [[np.nan], [np.nan], [np.nan]]\n dataframe.loc[row_index, 'mse_valid'] = [[np.nan], [np.nan], [np.nan]]\n dataframe.loc[row_index, 'mse_test'] = [[np.nan], [np.nan], [np.nan]]\n dataframe.loc[row_index, 'corr_train'] = [[np.nan], [np.nan], [np.nan]]\n dataframe.loc[row_index, 'corr_valid'] = [[np.nan], [np.nan], [np.nan]]\n dataframe.loc[row_index, 'corr_test'] = [[np.nan], [np.nan], [np.nan]]\n dataframe.loc[row_index, 'corr_p_train'] = [[np.nan], [np.nan], [np.nan]]\n dataframe.loc[row_index, 'corr_p_valid'] = [[np.nan], [np.nan], [np.nan]]\n dataframe.loc[row_index, 'corr_p_test'] = [[np.nan], [np.nan], [np.nan]]\n dataframe.loc[row_index, 'r^2_train'] = [[np.nan], [np.nan], [np.nan]]\n dataframe.loc[row_index, 'r^2_valid'] = [[np.nan], [np.nan], [np.nan]]\n dataframe.loc[row_index, 'r^2_test'] = [[np.nan], [np.nan], [np.nan]]\n return dataframe\n\n\ndef fill_results_in_configurations_at_index(dataframe, row_index, df, i_subject):\n if len(df) == 12:\n dataframe.loc[row_index, 'mse_train'][i_subject] = df.values[0, 1]\n dataframe.loc[row_index, 'mse_valid'][i_subject] = df.values[4, 1]\n dataframe.loc[row_index, 'mse_test'][i_subject] = df.values[8, 1]\n dataframe.loc[row_index, 'corr_train'][i_subject] = df.values[1, 1]\n dataframe.loc[row_index, 'corr_valid'][i_subject] = df.values[5, 1]\n dataframe.loc[row_index, 'corr_test'][i_subject] = df.values[9, 1]\n dataframe.loc[row_index, 'corr_p_train'][i_subject] = df.values[2, 1]\n dataframe.loc[row_index, 'corr_p_valid'][i_subject] = df.values[6, 1]\n dataframe.loc[row_index, 'corr_p_test'][i_subject] = df.values[10, 1]\n dataframe.loc[row_index, 'r^2_train'][i_subject] = df.values[3, 1]\n dataframe.loc[row_index, 'r^2_valid'][i_subject] = df.values[7, 1]\n dataframe.loc[row_index, 'r^2_test'][i_subject] = df.values[11, 1]\n elif len(df) == 9:\n print('WARNING! Residual result df missing r^2 (len = 9)!')\n dataframe.loc[row_index, 'mse_train'][i_subject] = df.values[0, 1]\n dataframe.loc[row_index, 'mse_valid'][i_subject] = df.values[3, 1]\n dataframe.loc[row_index, 'mse_test'][i_subject] = df.values[6, 1]\n dataframe.loc[row_index, 'corr_train'][i_subject] = df.values[1, 1]\n dataframe.loc[row_index, 'corr_valid'][i_subject] = df.values[4, 1]\n dataframe.loc[row_index, 'corr_test'][i_subject] = df.values[7, 1]\n dataframe.loc[row_index, 'corr_p_train'][i_subject] = df.values[2, 1]\n dataframe.loc[row_index, 'corr_p_valid'][i_subject] = df.values[5, 1]\n dataframe.loc[row_index, 'corr_p_test'][i_subject] = df.values[8, 1]\n else:\n print('Unknown result df format of len: {}'.format(len(df)))\n exit()\n # if configurations.loc[i_config, 'model_name'] in ['deep4', 'eegnet', 'resnet']:\n # for column_name in df.columns:\n # if column_name == 'train_loss':\n # configurations.loc[i_config, 'mse_train'][i_subject] = df.tail(1)[column_name].values[0]#, 1]\n # if column_name == 'valid_loss':\n # configurations.loc[i_config, 'mse_valid'][i_subject] = df.tail(1)[column_name].values[0]#, 2]\n # if column_name == 'test_loss':\n # configurations.loc[i_config, 'mse_test'][i_subject] = df.tail(1)[column_name].values[0]#, 3]\n # if column_name == 'train_corr':\n # configurations.loc[i_config, 'corr_train'][i_subject] = df.tail(1)[column_name].values[0]#, 4]\n # if column_name == 'valid_corr':\n # configurations.loc[i_config, 'corr_valid'][i_subject] = df.tail(1)[column_name].values[0]#, 5]\n # if column_name == 'test_corr':\n # configurations.loc[i_config, 'corr_test'][i_subject] = df.tail(1)[column_name].values[0]#, 6]\n # elif configurations.loc[i_config, 'model_name'] in ['lin_reg', 'lin_svr', 'rbf_svr', 'rf_reg']:\n # dataframe.loc[row_index, 'mse_train'][i_subject] = df.values[0, 1]\n # dataframe.loc[row_index, 'mse_valid'][i_subject] = df.values[4, 1]\n # dataframe.loc[row_index, 'mse_test'][i_subject] = df.values[8, 1]\n # dataframe.loc[row_index, 'corr_train'][i_subject] = df.values[1, 1]\n # dataframe.loc[row_index, 'corr_valid'][i_subject] = df.values[5, 1]\n # dataframe.loc[row_index, 'corr_test'][i_subject] = df.values[9, 1]\n # dataframe.loc[row_index, 'corr_p_train'][i_subject] = df.values[2, 1]\n # dataframe.loc[row_index, 'corr_p_valid'][i_subject] = df.values[6, 1]\n # dataframe.loc[row_index, 'corr_p_test'][i_subject] = df.values[10, 1]\n # else:\n # print('Unknown model name: {}'.format(configurations.loc[i_config, 'model_name']))\n # break\n return dataframe\n\n\ndef max_of_each_entry(df, label_key, value_key):\n max_value = []\n for unique_label in np.unique(df[label_key]):\n max_value.append(np.max(df[df[label_key] == unique_label][value_key]))\n\n return max_value\n\n\ndef get_dataframe_values_matching_two_criteria_in_two_columns(dataframe, condition_col, condition_name,\n metric_name_col, metric_name, metric_value_col):\n return dataframe[(dataframe[condition_col] == condition_name)\n & (dataframe[metric_name_col] == metric_name)][metric_value_col].values\n\n\ndef dataframe_significance_test(dataframe, condition_col, condition_name_a, condition_name_b, metric_name_col,\n metric_name, metric_value_col):\n a = get_dataframe_values_matching_two_criteria_in_two_columns(dataframe, condition_col, condition_name_a,\n metric_name_col, metric_name, metric_value_col)\n b = get_dataframe_values_matching_two_criteria_in_two_columns(dataframe, condition_col, condition_name_b,\n metric_name_col, metric_name, metric_value_col)\n\n return significance_test(a, b, alpha=0.5, alternative='two-sided', use_continuity=True)\n\n\ndef single_value_row_configurations(configurations, subject_values):\n # Reformat configuration dataframe to have one row per metric value\n single_value_configurations = pd.DataFrame()\n for i_config in range(len(configurations)):\n for metric in ['mse', 'corr', 'corr_p', 'r^2']:\n for data_split in ['train', 'valid', 'test']:\n for i_subject in range(len(subject_values)):\n single_value_configurations = single_value_configurations.append(\n pd.DataFrame({'subject_train': subject_values[i_subject],\n 'subject_apply': subject_values[i_subject],\n 'data_split': data_split,\n 'metric_value': configurations[metric + '_' + data_split][i_config][i_subject],\n 'metric_name': metric,\n 'data': configurations.loc[i_config, 'data'],\n 'electrodes': configurations.loc[i_config, 'electrodes'],\n 'band_pass': configurations.loc[i_config, 'band_pass'],\n 'model_name': configurations.loc[i_config, 'model_name'],\n 'unique_id': configurations.loc[i_config, 'unique_id'],\n 'data_folder': configurations.loc[i_config, 'data_folder'],\n 'batch_size': configurations.loc[i_config, 'batch_size'],\n 'max_epochs': configurations.loc[i_config, 'max_epochs'],\n 'cuda': configurations.loc[i_config, 'cuda'],\n 'result_folder': configurations.loc[i_config, 'result_folder'],\n 'init_lr': configurations.loc[i_config, 'init_lr'],\n 'weight_decay': configurations.loc[i_config, 'weight_decay'],\n 'sampling_rate': configurations.loc[i_config, 'sampling_rate'],\n 'n_seconds_valid_set': configurations.loc[i_config, 'n_seconds_valid_set'],\n 'n_seconds_test_set': configurations.loc[i_config, 'n_seconds_test_set'],\n 'config_has_run': configurations.loc[i_config, 'config_has_run']}, index=[0]),\n ignore_index=True)\n return single_value_configurations\n\n\ndef remove_columns_with_same_value(df, exclude=('train',)):\n cols_multiple_vals = []\n for col in df.columns:\n try:\n values_set = set(df[col])\n has_multiple_vals = len(values_set) > 1\n if has_multiple_vals:\n all_nans = np.all(np.isnan(values_set))\n except TypeError:\n all_nans = False\n # transform to string in case there are lists\n # since lists not hashable to set\n has_multiple_vals = len(set([str(val) for val in df[col]])) > 1\n cols_multiple_vals.append((has_multiple_vals and (not all_nans)))\n cols_multiple_vals = np.array(cols_multiple_vals)\n excluded_cols = np.array([c in exclude for c in df.columns])\n df = df.iloc[:, (cols_multiple_vals | excluded_cols)]\n return df\n\n\ndef print_performance_matrix(dataframe,\n x_col,\n y_col,\n metric_name_col,\n metric_name,\n metric_value_col,\n averaging_func=np.median,\n transformation_func=lambda x: x,\n fmt=':3f'):\n performance_matrix = compute_performance_matrix(dataframe,\n x_col,\n y_col,\n metric_name_col,\n metric_name,\n metric_value_col,\n averaging_func=averaging_func,\n transformation_func=transformation_func)[0]\n # format(performance_matrix, fmt)\n # latex_tabular = \" \\\\\\\\\\n\".join([\" & \".join(map(str, line)) for line in performance_matrix])\n print(\" \\\\\\\\\\n\".join([\" & \".join(map('{0:.3f}'.format, line)) for line in performance_matrix]))\n # print(latex_tabular.)\n\n\ndef compute_performance_matrix(dataframe,\n x_col,\n y_col,\n metric_name_col,\n metric_name,\n metric_value_col,\n averaging_func=np.median,\n transformation_func=lambda x: x):\n x_values, x_indices = np.unique(dataframe[x_col], return_index=True)\n x_values = x_values[np.argsort(x_indices)]\n x_indices = range(len(x_values))\n y_values, y_indices = np.unique(dataframe[y_col], return_index=True)\n y_values = y_values[np.argsort(y_indices)]\n y_indices = range(len(y_values))\n performance_matrix = np.ndarray((len(x_indices), len(y_indices)))\n for x, y in it.product(range(len(x_indices)), range(len(y_indices))):\n a = transformation_func(dataframe[(dataframe[x_col] == x_values[x]) &\n (dataframe[y_col] == y_values[y]) &\n (dataframe[metric_name_col] == metric_name)][metric_value_col].values)\n if np.size(a) > 1:\n performance_matrix[x, y] = averaging_func(a)\n else:\n performance_matrix[x, y] = a\n return performance_matrix, x_values, y_values\n\n\ndef compute_diff_matrix(dataframe, condition_col, metric_name_col='metric_name', metric_name='rmse',\n metric_value_col='metric_value', averaging_func=np.median):\n condition_values, condition_indices = np.unique(dataframe[condition_col], return_index=True)\n condition_values = condition_values[np.argsort(condition_indices)]\n condition_indices = range(len(condition_values))\n diff_matrix = np.ndarray((len(condition_indices), len(condition_indices)))\n significance_matrix = np.ndarray(diff_matrix.shape)\n for x, y in it.product(range(len(condition_indices)), range(len(condition_indices))):\n a = get_dataframe_values_matching_two_criteria_in_two_columns(\n dataframe,\n condition_col,\n condition_values[x],\n metric_name_col,\n metric_name,\n metric_value_col)\n b = get_dataframe_values_matching_two_criteria_in_two_columns(\n dataframe,\n condition_col,\n condition_values[y],\n metric_name_col,\n metric_name,\n metric_value_col)\n if np.size(a) >= 10:\n diff_matrix[x, y] = averaging_func(a) - averaging_func(b)\n else:\n diff_matrix[x, y] = averaging_func(a - b)\n significance_matrix[x, y] = significance_test(a, b, alpha=0.5, alternative='two-sided', use_continuity=True)\n tril_indices = np.tril_indices_from(significance_matrix, k=-1)\n significance_matrix[tril_indices] = fdr_corrected_pvals(significance_matrix[tril_indices])\n return diff_matrix, significance_matrix, condition_values\n\n\ndef expand_row_with_multiple_values(df, column):\n new_df = df\n for row in df.index:\n for column in df.iloc[row]:\n tmp = np.size(column)\n if tmp > 1:\n new_df\n","sub_path":"utils/df_utils.py","file_name":"df_utils.py","file_ext":"py","file_size_in_byte":15070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"452259503","text":"#! /usr/bin/env python\n\n# ###################################################################\n#\n# This utility builds an executable using a source file, compiler and\n# build flags passed to it as command line arguments. It also will\n# connect with db2 to get an executable id and record the parameters\n# of the build.\n#\n# build2db2 sourceFile buildOpts \n#\n# ###################################################################\n\nfrom __future__ import print_function\nfrom DB2 import *\nimport mx.DateTime\nimport time\nimport sys\nimport bgutils\nimport re\nimport os.path\nimport pwd\n\nif len(sys.argv) < 3:\n print(sys.argv[0], \"sourceFile buildOpts \")\n sys.exit(-1)\n\nsourceFile = sys.argv[1]\nbuildOpts = sys.argv[2]\nprofileBuild = 0\ntargetPlatform = 0\ninstallImagePath = bgutils.installbase() + 'usr/opt/bluegene'\n\ncompilerFamily = \"xlC_r\"\nexeExtension = \"smp.aix.exe\"\nobjExtension = \"o\"\n\nprint(\"buildOpts:\\n\", buildOpts)\n\nif (len(sys.argv) > 3):\n profileBuild = int(sys.argv[3])\nif (len(sys.argv) > 4):\n targetPlatform = sys.argv[4]\nif (len(sys.argv) > 5):\n installImagePath = sys.argv[5]\nif (len(sys.argv) > 6):\n compilerFamily = sys.argv[6]\nif (len(sys.argv) > 7):\n exeExtension = sys.argv[7]\n\nrootfilepat = re.compile(r'(.+)\\.cpp$')\nfmatch = re.match(rootfilepat, sourceFile)\nif fmatch == None:\n print(\"Error: Invalid file name\", sourceFile)\n sys.exit(-1)\n\nrootFileName = fmatch.expand(r'\\1')\nprint(\"rootFileName =\", rootFileName)\n\n# database name\ndbpat = re.compile(r'^//%database_name:\\s*(\\w+)')\nsourcepat = re.compile(r'^//%source_id:\\s*(\\d+)')\nsyspat = re.compile(r'^//%system_id:\\s*(\\d+)')\nctppat = re.compile(r'^//%ctp_id:\\s*(\\d+)')\npsversionpat = re.compile(r'^//%probspec_version:\\s*([\\w\\.]+)')\npstagpat = re.compile(r'^//%probspec_tag:([\\w ]+)')\nmagicnopat = re.compile(r'^//%magic_number:([\\w]+)')\n\ninFile = open(sourceFile)\n\nmatchTotal = 7\nmatchCount = 0\nfor line in inFile.xreadlines():\n ma = re.match(dbpat, line)\n mb = re.match(sourcepat, line)\n mc = re.match(syspat, line)\n md = re.match(ctppat, line)\n me = re.match(psversionpat, line)\n mf = re.match(pstagpat, line)\n mg = re.match(magicnopat, line)\n\n if ma != None:\n matchCount=matchCount+1\n dbName = ma.expand(r'\\1')\n print(\"database name =\", ma.expand(r'\\1'))\n elif mb != None:\n matchCount=matchCount+1\n sourceId = mb.expand(r'\\1')\n print(\"source_id =\", mb.expand(r'\\1'))\n elif mc != None:\n matchCount=matchCount+1\n sysId = mc.expand(r'\\1')\n print(\"sys_id =\", mc.expand(r'\\1'))\n elif md != None:\n matchCount=matchCount+1\n ctpId = md.expand(r'\\1')\n print(\"ctp_id =\", md.expand(r'\\1'))\n elif me != None:\n matchCount=matchCount+1\n probspecVersion = me.expand(r'\\1')\n print(\"probspec_version =\", me.expand(r'\\1'))\n elif mf != None:\n matchCount=matchCount+1\n probspecTag = mf.expand(r'\\1')\n print(\"probspec_tag =\", mf.expand(r'\\1'))\n elif mg != None:\n matchCount=matchCount+1\n magicNumber = mg.expand(r'\\1')\n print(\"magic number =\", mg.expand(r'\\1'))\n if (matchCount >= matchTotal):\n break\ninFile.close()\n\nconn = Connection(dbName)\ncursor = conn.cursor()\n\n# get the probspec version information based on the probspecTag\ncmd = \"select probspec_version from experiment.probspec where tag = \\'\" \\\n + probspecTag + \"\\'\"\ncursor.execute(cmd)\nfoo = cursor.fetchone()\nif foo:\n probspecVersion = foo[0]\nelse:\n print(\"ERROR: Unable to find probspec version corresponding to tag\", probspecTag)\n sys.exit(-2)\n\n\n# fetch and print platform information based on target platform id\ncmd = \"select platform_id, hardware, os_family, os_version from experiment.platform where platform_id = \" + str(targetPlatform)\nprint(cmd)\ncursor.execute(cmd)\nres = cursor.fetchone()\nprint(\"Platform:\", res[0], res[1], res[2], res[3])\n\n# we're going to check the version files under both BlueMatter and bgfe and\n# verify that they contain the same version (if any).\nblueMatterVersion = None\nbgfeVersion = None\nversionpat = re.compile(r'^\\$Name:\\s*(\\w+)')\ninFile = open(installImagePath + \"/BlueMatter.version\")\nfor line in inFile.xreadlines():\n mVer = re.match(versionpat, line)\n if mVer != None:\n blueMatterVersion = mVer.expand(r'\\1')\n print(\"blueMatterVersion =\", blueMatterVersion)\n break;\ninFile.close()\n\ninFile = open(installImagePath + \"/bgfe.version\")\nfor line in inFile.xreadlines():\n mVer = re.match(versionpat, line)\n if mVer != None:\n bgfeVersion = mVer.expand(r'\\1')\n print(\"bgfeVersion =\", bgfeVersion)\n break;\ninFile.close()\n\ntagVersion = bgfeVersion\nif bgfeVersion == None or blueMatterVersion == None or bgfeVersion != blueMatterVersion:\n tagVersion = None\n\nif tagVersion == None:\n print(\"tagVersion: NONE\")\nelse:\n print(\"tagVersion:\", tagVersion)\n\n# now get the relevant information about compiler levels and such.\n# The following is AIX-specific:\n\ncompilerInfo = os.popen('lslpp -L -c vacpp.cmp.C vacpp.cmp.batch vacpp.cmp.core vacpp.cmp.include memdbg.adt bos.adt.include', 'r')\n\ncompilerVersion = ''\nfor line in compilerInfo.xreadlines():\n compilerVersion = compilerVersion + line\nprint(compilerVersion, end=' ')\n\ncmd = \"select count(*) from experiment.compiler where family = \" + \"\\'\" + str(compilerFamily) + \"\\' and version = \\'\" + str(compilerVersion) + \"\\'\"\nprint(cmd)\ncursor.execute(cmd)\n\nfoo = cursor.fetchone();\nprint(foo[0],\"rows returned\")\n\nif foo[0] > 1:\n print(\"WARNING: multiple entries in compiler table for \" + compilerFamily)\n print(\"version\\n\" + compilerVersion)\nelif int(foo[0]) == 0:\n cmd = \"insert into experiment.compiler (compiler_id, family, version) values (DEFAULT, \" \\\n + \"\\'\" + str(compilerFamily) + \"\\', \" \\\n + \"\\'\" + str(compilerVersion) + \"\\')\"\n print(cmd)\n cursor.execute(cmd)\ncmd = \"select compiler_id from experiment.compiler where family = \" \\\n+ \"\\'\" + str(compilerFamily) + \"\\' and version = \\'\" + str(compilerVersion) + \"\\'\"\nprint(cmd)\ncursor.execute(cmd)\nfoo = cursor.fetchone()\nif foo:\n compilerId = foo[0]\n print(\"compilerId:\", compilerId)\nelse:\n print(\"ERROR: Could not fetch compiler_id from db2\")\n sys.exit(-2)\n\n\n# machine identifier\ncompileHost = os.uname()[4];\n\nprint(compileHost)\n\n# Because we are having db2 generate the executable_id which we must retrieve\n# prior to the build, we need to have a way to uniquely identify the row that\n# we are adding. Since we will be updateing the build_begin and build_end\n# columns after the build completes we can use them to ensure that the row we\n# add can be specified uniquely without knowing the executable_id. We'll do\n# this by setting build_begin to\n# mx.DateTime.TimestampFromTicks(time.time())\n# and setting build_end to\n# mx.DateTime.TimestampFromTicks(os.getpid())\n\nbuildBegin = mx.DateTime.TimestampFromTicks(time.time())\nbuildEnd = mx.DateTime.TimestampFromTicks(os.getpid())\nbuilder = pwd.getpwuid(os.getuid())[0]\n\n# initial insertion\ncmd = \"insert into experiment.executable (executable_id, build_begin, build_end, build_machine, build_opts, builder, compiler_id, install_image_path, install_image_tag, platform_id, probspec_version, source_id) \" \\\n+ \"values (DEFAULT, \" \\\n+ \"\\'\" + str(buildBegin) + \"\\', \" \\\n+ \"\\'\" + str(buildEnd) + \"\\', \" \\\n+ \"\\'\" + str(compileHost) + \"\\', \" \\\n+ \"\\'\" + \"buildOpts\" + \"\\', \" \\\n+ \"\\'\" + str(pwd.getpwuid(os.getuid())[0]) + \"\\', \" \\\n+ str(compilerId) + \", \" \\\n+ \"\\'\" + str(installImagePath) + \"\\', \" \\\n+ \"\\'\" + str(tagVersion) + \"\\', \" \\\n+ str(targetPlatform) + \", \" \\\n+ str(probspecVersion) + \", \" \\\n+ str(sourceId) + \")\"\n\nprint(cmd)\ncursor.execute(cmd)\n\ncmd = \\\n \"select executable_id from experiment.executable where \" +\\\n \"build_begin = \\'\" + str(buildBegin) + \"\\' and \" +\\\n \"build_end = \\'\" + str(buildEnd) + \"\\' and \" +\\\n \"build_machine = \\'\" + str(compileHost) + \"\\' and \" +\\\n \"build_opts = \\'\" + \"buildOpts\" + \"\\' and \" +\\\n \"builder = \\'\" + str(pwd.getpwuid(os.getuid())[0]) + \"\\' and \" +\\\n \"compiler_id = \" + str(compilerId) + \" and \" +\\\n \"install_image_path = \\'\" + str(installImagePath) + \"\\' and \" +\\\n \"install_image_tag = \\'\" + str(tagVersion) + \"\\' and \" +\\\n \"platform_id = \" + str(targetPlatform) + \" and \" +\\\n \"probspec_version = \" + str(probspecVersion) + \" and \" +\\\n \"source_id = \" + str(sourceId)\n\nprint(cmd)\ncursor.execute(cmd)\nfoo = cursor.fetchone()\n\nexecutableId = -1\n\nif foo:\n executableId = foo[0]\nelse:\n print(\"ERROR: Could not retrieve executable id from table\")\n sys.exit(-2)\n\nprint(\"executableId =\", executableId)\n\nallOpts = buildOpts + \" -DEXECUTABLEID=\" + str(executableId)\n#buildCmd = str(compilerFamily) + \" \" + str(allOpts) + \" \" +\\\n# str(sourceFile) + \" -o \" + str(rootFileName) +\\\n# \".\" + str(exeExtension)\nif profileBuild: # Build with profiling\n buildCmd = \\\n str(compilerFamily) + \" \" + str(allOpts) + \" -P \" +\\\n str(sourceFile) + \"&&\" +\\\n str(compilerFamily) + \" -+ \" + str(allOpts) + \" -qlinedebug -c \" +\\\n str(rootFileName) + \".i &&\" +\\\n str(compilerFamily) + \" -pg \" + str(allOpts) +\\\n \" -bloadmap:\" + str(rootFileName) + \".map\" +\\\n \" -o \" + str(rootFileName) +\\\n \".\" + str(exeExtension) +\\\n \" \" + str(rootFileName) + \".\" + str(objExtension)\nelse:\n buildCmd = str(compilerFamily) + \" \" + str(allOpts) + \" -c \" +\\\n str(sourceFile) + \";\" +\\\n str(compilerFamily) + \" \" + str(allOpts) +\\\n \" \" + str(rootFileName) + \".\" + str(objExtension) +\\\n \" -o \" + str(rootFileName) +\\\n \".\" + str(exeExtension)\nprint(buildCmd)\nbuildBegin = mx.DateTime.TimestampFromTicks(time.time())\nret = os.system(buildCmd)\nbuildEnd = mx.DateTime.TimestampFromTicks(time.time())\n\nif (ret == 0):\n cmd = \"update experiment.executable set (build_begin, build_end) = (\\'\" +\\\n str(buildBegin) + \"\\', \\'\" + str(buildEnd) +\\\n \"\\') where executable_id=\" + str(executableId)\n\n print(cmd)\n cursor.execute(cmd)\nelse:\n print(\"ERROR: Compile failed with return code =\", str(ret))\n sys.exit(-4)\n\nconn.close()\n","sub_path":"svntrunk/src/BlueMatter/db2/src/build2db2.py","file_name":"build2db2.py","file_ext":"py","file_size_in_byte":10502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"490008851","text":"import xarray as xr\n\n\ndef join_new(dsets, dim_name, coord_value, varname, options={}):\n if isinstance(varname, str):\n varname = [varname]\n concat_dim = xr.DataArray(coord_value, dims=(dim_name), name=dim_name)\n return xr.concat(dsets, dim=concat_dim, data_vars=varname, **options)\n\n\ndef join_existing(dsets, options={}):\n return xr.concat(dsets, **options)\n\n\ndef union(dsets, options={}):\n return xr.merge(dsets, **options)\n\n\ndef _to_nested_dict(df):\n \"\"\"Converts a multiindex series to nested dict\"\"\"\n if hasattr(df.index, 'levels') and len(df.index.levels) > 1:\n ret = {}\n for k, v in df.groupby(level=0):\n ret[k] = _to_nested_dict(v.droplevel(0))\n return ret\n else:\n return df.to_dict()\n\n\ndef _create_asset_info_lookup(\n df, path_column_name, variable_column_name=None, data_format=None, format_column_name=None\n):\n\n if data_format:\n data_format_list = [data_format] * len(df)\n elif format_column_name is not None:\n data_format_list = df[format_column_name]\n\n if variable_column_name is None:\n varname_list = [None] * len(df)\n else:\n varname_list = df[variable_column_name]\n\n return dict(zip(df[path_column_name], tuple(zip(varname_list, data_format_list))))\n\n\ndef _aggregate(\n aggregation_dict,\n agg_columns,\n n_agg,\n v,\n lookup,\n mapper_dict,\n zarr_kwargs,\n cdf_kwargs,\n preprocess,\n):\n def apply_aggregation(v, agg_column=None, key=None, level=0):\n \"\"\"Recursively descend into nested dictionary and aggregate items.\n level tells how deep we are.\"\"\"\n\n assert level <= n_agg\n\n if level == n_agg:\n # bottom of the hierarchy - should be an actual path at this point\n # return open_dataset(v)\n data_format = lookup[v][1]\n # Get varname in order to specify data_vars=[varname] during concatenation\n # See https://github.com/NCAR/intake-esm/issues/172#issuecomment-549001751\n varname = lookup[v][0]\n ds = _open_asset(\n mapper_dict[v],\n data_format=data_format,\n zarr_kwargs=zarr_kwargs,\n cdf_kwargs=cdf_kwargs,\n preprocess=preprocess,\n )\n ds.attrs['intake_esm_varname'] = varname\n return ds\n\n else:\n agg_column = agg_columns[level]\n\n agg_info = aggregation_dict[agg_column]\n agg_type = agg_info['type']\n\n if 'options' in agg_info:\n agg_options = agg_info['options']\n else:\n agg_options = {}\n\n dsets = [\n apply_aggregation(value, agg_column, key=key, level=level + 1)\n for key, value in v.items()\n ]\n keys = list(v.keys())\n\n attrs = dict_union(*[ds.attrs for ds in dsets])\n\n # copy encoding for each variable from first encounter\n variables = set([v for ds in dsets for v in ds.variables])\n\n encoding = {}\n for ds in dsets:\n for v in variables:\n if v in ds.variables and v not in encoding:\n if ds[v].encoding:\n encoding[v] = ds[v].encoding\n # get rid of the misleading file-specific attributes\n # github.com/pydata/xarray/issues/2550\n for enc_attrs in ['source', 'original_shape']:\n if enc_attrs in encoding[v]:\n del encoding[v][enc_attrs]\n\n if agg_type == 'join_new':\n varname = dsets[0].attrs['intake_esm_varname']\n ds = join_new(\n dsets,\n dim_name=agg_column,\n coord_value=keys,\n varname=varname,\n options=agg_options,\n )\n\n elif agg_type == 'join_existing':\n ds = join_existing(dsets, options=agg_options)\n\n elif agg_type == 'union':\n ds = union(dsets, options=agg_options)\n\n ds.attrs = attrs\n for v in ds.variables:\n if v in encoding and not ds[v].encoding:\n ds[v].encoding = encoding[v]\n\n return ds\n\n return apply_aggregation(v)\n\n\ndef _open_asset(path, data_format, zarr_kwargs, cdf_kwargs, preprocess):\n\n if data_format == 'zarr':\n ds = xr.open_zarr(path, **zarr_kwargs)\n\n else:\n ds = xr.open_dataset(path, **cdf_kwargs)\n\n if preprocess is None:\n return ds\n else:\n return preprocess(ds)\n\n\ndef dict_union(*dicts, merge_keys=['history', 'tracking_id'], drop_keys=[]):\n \"\"\"Return the union of two or more dictionaries.\"\"\"\n from functools import reduce\n\n if len(dicts) > 2:\n return reduce(dict_union, dicts)\n elif len(dicts) == 2:\n d1, d2 = dicts\n d = type(d1)()\n # union\n all_keys = set(d1) | set(d2)\n for k in all_keys:\n v1 = d1.get(k)\n v2 = d2.get(k)\n if (v1 is None and v2 is None) or k in drop_keys:\n pass\n elif v1 is None:\n d[k] = v2\n elif v2 is None:\n d[k] = v1\n elif v1 == v2:\n d[k] = v1\n elif k in merge_keys:\n d[k] = '\\n'.join([v1, v2])\n return d\n elif len(dicts) == 1:\n return dicts[0]\n","sub_path":"intake_esm/merge_util.py","file_name":"merge_util.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"412338273","text":"#Deleting duplicate elements from user values\r\na=[]\r\nn= int(input(\"Enter the number of elements :\"))\r\nfor i in range(0,n):\r\n element=int(input())\r\n a.append(element)\r\nb = set()\r\nunique = []\r\nfor i in a:\r\n if i not in b:\r\n unique.append(i)\r\n b.add(i)\r\nprint(\"original elements:\")\r\nprint(unique)\r\n","sub_path":"L3-08-09-ASS-delete duplicate elemnt.py","file_name":"L3-08-09-ASS-delete duplicate elemnt.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"168186403","text":"import time\n\n# Comparação de eficiência entre algoritmos de ordem cúbica e quadratica\n# de permutação de lista.\n\n# Autor: Danilo de Oliveira;\n# Data: 27 de julho de 2015\n\n# versão quadratica\ndef permute(param_list):\n\tif len(param_list) == 2:\n\t\t# Retorna a permutação do caso base. Ex: [['b','c'],['c','b']]\n\t\treturn [param_list[:],[param_list[1],param_list[0]]]\n\n\telse:\n\t\tlist_temp = [] # Guardará o retorno de cada permutação\n\n\t\tfor sub_list in permute(param_list[1:]): # sub_list receberá a lista de permutação abaixo:\n\t\t\t\n\t\t\tfor j in range(len(sub_list)+1): # Para cada lista permutada faça conforme o exemplo: \n\t\t\t\t\t\t\t\t\t\t\t #[[a,b,c],[b,a,c],[b,c,a]]\n\t\t\t\t\t\t\t\t\t\t\t #[[a,c,b],[c,a,b],[c,b,a]]\n\t\t\t\tlist_temp += [sub_list[:j]+[param_list[0]]+sub_list[j:]]\n\t\t\n\t\treturn list_temp # Será retornado do exemplo acima: [[a,b,c],[b,a,c],[b,c,a], [a,c,b],[c,a,b],[c,b,a]]\n\t\t\t\t\t\t # para cada sublista da lista retornada, repita o passo.\n# Fim versão quadrática\n\n\n# Permutação versão cubica\ndef permutacao_list(param_list):\n if len(param_list) == 2:\n return [[param_list[0],param_list[1]],[param_list[1],param_list[0]]]\n \n else:\n for k in range(len(param_list)):\n temp = []\n new_list = permutacao_list(param_list[:k]+param_list[k+1:])\n for pequena_permutacao in new_list:\n for i in range(len(pequena_permutacao)+1):\n temp+= [pequena_permutacao[:i]+[param_list[k]]+pequena_permutacao[i:]]\n return temp\n# Fim permutação versão cubica\n\n\n\n# Aqui é para verificar se existem 2 permutações iguais na lista de permutações.\ndef verifica_igual(param_list):\n\tvar_temp = []\n\n\tfor i in range(len(param_list)):\n\t\tvar_temp = param_list[i]\n\t\tfor j in range(i+1,len(param_list)):\n\t\t\tif var_temp == param_list[j]:\n\n\t\t\t\treturn True # Se retorna-se verdadeiro, o algoritmo não funcionaria.\n\treturn False\n\n\n\ndef calcula_segundos(inicio, fim):\n segundos_inicio = int(inicio[:2])*60 + int(inicio[3:])\n segundos_fim = int(fim[:2])*60 + int(fim[3:])\n\n return segundos_fim - segundos_inicio\n\n\n\ndef imprime(funcao_name, param_tempo):\n\t\n\tprint(\"Funcao: %s.\" % funcao_name)\n\tprint(\"Tempo gasto: %d segundos\\n\\n\" % param_tempo)\n\t\n\treturn 0\n\n\n\n\ndef main():\n\t\n\tlista = ['a','b','c','d','e','f','g','h','i']\n\ttime_start = time.asctime()[14:19]\n\tpermute(lista)\n\ttime_finish = time.asctime()[14:19]\n\timprime('Versão Quadrática', calcula_segundos(time_start, time_finish))\n\t\n\t\n\t\n\t\n\ttime_start = time.asctime()[14:19]\n\tpermutacao_list(lista)\n\ttime_finish = time.asctime()[14:19]\n\timprime('Versão Cúbica', calcula_segundos(time_start, time_finish))\n\treturn 0\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"Algoritmos/comparation_of_algorithims.py","file_name":"comparation_of_algorithims.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"474817600","text":"from aion.websocket_server import BaseServerClass\n\nfrom src import log\nfrom src.errors import TemplateMatchingSummaryServerError\nfrom src.summary.vehicle import VehicleSummary\n\n\nclass VehicleServer(BaseServerClass):\n summary = VehicleSummary()\n\n @log.client.function_log\n async def template_matching_by_opencv(self, sid, data):\n log.print(data, debug=True)\n dicts = data.get('templateMatchingByOpenCV')\n if not dicts:\n msg = 'Request body is not found.'\n raise TemplateMatchingSummaryServerError(msg)\n\n self.summary.set(dicts)\n vehicle_dict = self.summary.get_vehicle()\n end_dict = self.summary.get_end()\n res = self.summary.get_metadata()\n self.summary.stack()\n summary_dict = self.summary.get_all_vehicles()\n res['vehicle'] = vehicle_dict\n res['end'] = end_dict\n res['summary'] = summary_dict\n\n if end_dict['status']:\n self.summary.reset()\n return res\n\n @log.client.function_log\n async def reset(self, sid, data):\n self.summary.reset()\n return\n","sub_path":"src/servers/vehicle.py","file_name":"vehicle.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"574517934","text":"import os\nimport sys\nimport logging\nimport asyncio\n\nfrom aiohttp import web\nfrom . import get_app\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', dest='port', type=int)\n parser.add_argument('--host', dest='host', type=str, default='127.0.0.1')\n parser.add_argument('--test', action='store_true')\n parser.add_argument('--debug', action='store_true')\n args = parser.parse_args()\n\n if sys.platform == 'win32':\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n\n # package.json location for the update server\n update_package = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), '..', 'package.json')\n\n # package.json location for the API server installed in the update server's\n # environment (e.g.: which version is available by import). This is one way\n # of finding this info, but it could also be determined by making an HTTP\n # request to the API server and selecting this info. In the future, this\n # server should check the health of the API server process and possibly get\n # the version that way instead.\n try:\n import opentrons\n api_package = os.path.join(\n os.path.abspath(os.path.dirname(opentrons.__file__)),\n 'package.json')\n opentrons.robot.connect()\n smoothie_version = opentrons.robot.fw_version\n except Exception:\n print(\"Module `opentrons` import failed\")\n api_package = None\n smoothie_version = 'not available'\n\n fmt = '%(asctime)s %(name)s %(levelname)s [Line %(lineno)s] %(message)s'\n config_dict = {\n 'format': fmt,\n 'level': 'DEBUG' if args.debug else 'INFO'\n }\n logging.basicConfig(**config_dict)\n log = logging.getLogger(__name__)\n\n log.info('Starting update server on http://{}:{}'.format(\n args.host, args.port))\n app = get_app(\n api_package=api_package,\n update_package=update_package,\n smoothie_version=smoothie_version,\n test=args.test,\n with_migration=True)\n web.run_app(app, host=args.host, port=args.port)\n","sub_path":"update-server/otupdate/balena/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"182944387","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom pylab import savefig\n\nwith open('num_pairs.dat') as f0:\n tmp = f0.readlines()\nnPairs = [float(x.rstrip()) for x in tmp]\n\nwith open('num_odd.dat') as f1:\n tmp = f1.readlines()\nnOdd = [float(x.rstrip()) for x in tmp]\n\nwith open('prop_pairs.dat') as f2:\n tmp = f2.readlines()\npropPairs = [float(x.rstrip()) for x in tmp]\n\nwith open('num_socks.dat') as f3:\n tmp = f3.readlines()\nnSocks = [float(x.rstrip()) for x in tmp]\n\n# Start histograms\n\nplt.subplots_adjust(wspace=.5, hspace=.5)\n\n\nplt.subplot(221)\nplt.xlabel('Number of socks in pairs')\nplt.ylabel('Probability')\nplt.title('Posterior on Pairs of Socks')\nplt.grid(True)\nplt.hist(np.array(nPairs), 20, normed=1, facecolor='g', alpha=0.75)\n\nplt.subplot(222)\nplt.xlabel('Number of odd socks')\nplt.ylabel('Probability')\nplt.title('Posterior on Odd Socks')\nplt.grid(True)\nplt.hist(np.array(nOdd), 20, normed=1, facecolor='y', alpha = 0.75)\n\nplt.subplot(223)\nplt.xlabel('Total number of socks')\nplt.ylabel('Probability')\nplt.title('Posterior on Total Socks')\nplt.grid(True)\nplt.hist(np.array(nSocks), 20, normed=1, facecolor='r', alpha = 0.75)\n\nplt.subplot(224)\nplt.xlabel('Proportion of socks in pairs')\nplt.ylabel('Probability')\nplt.title('Posterior on Proportion Paired')\nplt.grid(True)\nplt.hist(np.array(propPairs), 20, normed=1, facecolor='b', alpha = 0.75)\n\nplt.savefig(\"sock_posteriors.png\", dpi = 120)\n\n","sub_path":"demo/hist_plots.py","file_name":"hist_plots.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"312184941","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom fosae.get_view_data import MAX_N\n\ndef dis(preds, preds_next):\n for a,b in zip(preds, preds_next):\n print(a-b)\n print(\"-\"*10)\n\ndata = np.load(\"fosae/block_data/block_data.npy\")\npreds = np.load(\"fosae/block_data/block_preds.npy\")\n\ndata_next = np.load(\"fosae/block_data/block_data_next.npy\")\npreds_next = np.load(\"fosae/block_data/block_preds_next.npy\")\n\naction = np.load(\"fosae/block_data/change.npy\")\n\nfig, axs = plt.subplots(5, MAX_N, figsize=(8, 6))\nfor _, ax in np.ndenumerate(axs):\n ax.set_xticks([])\n ax.set_yticks([])\nplt.gca()\n\ndef show_img(ax, arr):\n ax.imshow(np.transpose(arr, (1,2,0)))\n\nwhile True:\n for one_data, one_data_next, one_p, one_p_nt, one_a in zip(\n data, data_next, preds, preds_next, action\n ):\n for i, (d, d_nt) in enumerate(zip(one_data, one_data_next)):\n show_img(axs[0,i], d)\n show_img(axs[1,i], d_nt)\n\n\n axs[2,0].imshow(one_p, cmap='gray')\n axs[3,0].imshow(one_p_nt, cmap='gray')\n axs[4,0].imshow(one_a, cmap='gray')\n print(np.abs(0.5-one_p) > 0.49)\n print(\"-\"*20)\n print(np.abs(0.5-one_p_nt) > 0.49)\n print(\"-\"*20)\n print(one_a)\n print(\"-\"*20)\n\n plt.pause(0.2)\n # a = 1\n\n\n\n","sub_path":"fosae/view_plot.py","file_name":"view_plot.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"26881047","text":"import sys\nfrom random import randint\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QTableWidgetItem, QTableWidget, \\\n QHBoxLayout, QVBoxLayout, QWidget, QLabel, QLineEdit, QPushButton\nfrom PyQt5.QtGui import QColor\nimport csv\n\n\nclass MyWidget(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setGeometry(300, 300, 500, 400)\n self.tableWidget = QTableWidget(self)\n self.loadTable('price.csv')\n\n def loadTable(self, table_name):\n self.w = QWidget()\n self.setCentralWidget(self.w)\n self.vbox = QVBoxLayout(self.w)\n with open(table_name, encoding=\"utf8\") as csvfile:\n reader = csv.reader(csvfile, delimiter=';', quotechar='\"')\n title = next(reader)\n self.tableWidget.setColumnCount(3)\n self.tableWidget.setHorizontalHeaderLabels(title + ['Количество'])\n self.tableWidget.setRowCount(0)\n for i, row in enumerate(reader):\n self.tableWidget.setRowCount(self.tableWidget.rowCount() + 1)\n for j, elem in enumerate(row):\n self.tableWidget.setItem(i, j, QTableWidgetItem(elem))\n for i in range(self.tableWidget.rowCount()):\n self.tableWidget.setItem(i, 2, QTableWidgetItem('0'))\n self.tableWidget.itemSelectionChanged.connect(self.recount)\n self.vbox.addWidget(self.tableWidget)\n self.hbox = QHBoxLayout(self)\n self.vbox.addLayout(self.hbox)\n self.label = QLabel('итого', self)\n self.line = QLineEdit(self)\n self.line.setDisabled(True)\n self.hbox.addWidget(self.label)\n self.hbox.addWidget(self.line)\n self.btn = QPushButton('обновить', self)\n self.hbox.addWidget(self.btn)\n self.btn.clicked.connect(self.updt)\n\n def recount(self):\n s = 0\n for i in range(self.tableWidget.rowCount()):\n s += int(self.tableWidget.item(i, 1).text()) * int(self.tableWidget.item(i, 2).text())\n self.line.setText(str(s))\n\n def updt(self):\n # rows = sorted([self.tableWidget.row(i) for i in range(self.tableWidget.rowCount())], key=lambda x: int(x[1]))\n rows = []\n for i in range(self.tableWidget.rowCount()):\n rows.append([self.tableWidget.item(i, j).text() for j in range(self.tableWidget.columnCount())])\n rows.sort(key=lambda x: int(x[1]), reverse=True)\n for i, row in enumerate(rows):\n for j, elem in enumerate(row):\n self.tableWidget.setItem(i, j, QTableWidgetItem(elem))\n if len(rows) >= 5:\n for i in range(5):\n self.rand_color_row(i)\n else:\n for i in range(len(rows)):\n self.rand_color_row(i)\n\n def rand_color_row(self, row):\n color = QColor(randint(0, 255), randint(0, 255), randint(0, 255))\n for i in range(self.tableWidget.columnCount()):\n self.tableWidget.item(row, i).setBackground(color)\n\n\n\napp = QApplication(sys.argv)\nex = MyWidget()\nex.show()\nsys.exit(app.exec_())\n","sub_path":"csv_4.py","file_name":"csv_4.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"436948765","text":"def number():\r\n return int(input(\"Give a number:\"))\r\ndef prime(num):\r\n if num==1:\r\n print(\"It is not a prime number\")\r\n elif num==2:\r\n print(\"It is a prime number\")\r\n elif num!= 1 or 2:\r\n p=True\r\n for i in range(2,num):\r\n if num%i==0:\r\n p=False\r\n break\r\n return p\r\ndef print_p(num):\r\n num=prime(num)\r\n if num:\r\n print(\"prime\")\r\n else:\r\n print(\"not prime\")\r\nwhile 1==1:\r\n print_p(number())\r\n","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"565323333","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n@ author: Hubery\n@ create on: 2018/11/8 14:03\n@ file: protocol.py\n@ site: \n@ purpose: \n\"\"\"\nfrom RunFast.Common.pack import *\n\n\nclass Encrypt2ParseData:\n def __init__(self, update_data, method=None):\n self.update_data = update_data\n self.method = method\n\n # Update protocol data and perform different processing of data according to requirements.\n def get_update_data(self):\n if self.method == \"pack\":\n return self.pack_data()\n elif self.method == \"unpack\":\n return self.unpack_data()\n else:\n return None\n\n # pack data\n def pack_data(self):\n package = net_package(self.update_data[\"protocol_num\"][1])\n\n if len(self.update_data) > 0:\n for k, v in self.update_data.items():\n if k != \"protocol_num\":\n if v[0] == \"INT32\":\n package.write_int32(v[1])\n elif v[0] == \"INT64\":\n package.write_int32(v[1])\n elif v[0] == \"INT16\":\n package.write_int16(v[1])\n elif v[0] == \"STRING\":\n if self.update_data[\"protocol_num\"][1] == 2523:\n if type(v[1]) == list:\n for i in v[1]:\n if '\\x00' in i:\n package.write_string(i[: len(i) - 1])\n else:\n package.write_string(i)\n else:\n # print(u\"单张出牌操作数据: %s\" % str(v[1]))\n package.write_string(str(v[1]))\n else:\n package.write_string(str(v[1]))\n # print(\"package.encode: %s\" % package.encode())\n return package.encode()\n\n # unpack data\n def unpack_data(self):\n pass\n\n# Update protocol data.\nclass CommonUtils2UpdateData:\n def __init__(self, original_data, original_data_keys_list, update_data, method):\n self.original_data = original_data\n self.original_data_keys_list = original_data_keys_list\n self.data = update_data\n self.method = method\n\n def update_data(self):\n if len(self.data) > 0:\n for k, v in self.data.items():\n if k in self.original_data_keys_list:\n self.original_data[k][1] = v\n\n # print(\"data: %s\" % self.original_data)\n ud = Encrypt2ParseData(self.original_data, self.method)\n return ud.get_update_data()\n\n\n# common api to call update data\nclass CallUpdateApi:\n def __init__(self, original_data, original_data_keys_list, update_data, method):\n self.original_data = original_data\n self.original_data_keys_list = original_data_keys_list\n self.update_data = update_data\n self.method = method\n self.real_data = None\n self.get_real_data()\n\n def get_real_data(self):\n cu = CommonUtils2UpdateData(self.original_data, self.original_data_keys_list, self.update_data, self.method)\n self.real_data = cu.update_data()\n\n######################\n### 登录\n######################\n\n# login entity --> CS\nclass CSLogin:\n def __init__(self, data={}):\n self.cs_login_entity = {\"protocol_num\": [\"INT32\", 1000], \"mid\": [\"INT32\", 0], \"sesskey\": [\"STRING\", \"\"],\n \"gp\": [\"INT32\", 0], \"sid\": [\"INT32\", 1500]}\n self.cs_keys_list = self.cs_login_entity.keys()\n self.update_data = data\n self.method = \"pack\"\n self.real_data = CallUpdateApi(self.cs_login_entity, self.cs_keys_list, self.update_data, self.method).real_data\n\n\n# login entity --> SC\nclass SCLogin:\n def __init__(self, data={}):\n self.sc_entity_data = {\"ErrorCode\": [\"INT32\", 0], \"UnUsed1\": [\"INT32\", 100], \"UnUsed2\": [\"INT32\", 100]}\n self.sc_login_keys_list = self.sc_entity_data.keys()\n self.update_data = data\n self.method = \"unpack\"\n\nclass SCReconnect:\n def __init__(self):\n self.sc_entity_data = {\"playType\": [\"INT32\", 0],\"RoomID\": [\"INT32\", 0], \"SID\": [\"INT32\", -100]}\nclass SCOffline:\n def __init__(self):\n self.sc_entity_data = {\"SeatID\": [\"INT32\", 0]}\n\n######################\n### 房间内容\n######################\n\n# 创建房间 --1010\nclass CSCreateRoom():\n def __init__(self, data={}):\n self.entity = {\n \"protocol_num\": [\"INT32\", 1010],\n \"GameType\": [\"STRING\", \"81\"], # 游戏类型\n \"GameInnings\": [\"INT32\", 0], # 局数\n \"GamePaiNum\": [\"INT32\", 0], # 牌的张数\n \"GameFirstRule\": [\"INT32\", 0], # 首局规则\n \"GameShouJu3cFirst\": [\"INT32\", 0], # 黑桃3首出规则 -- 0无要求 1 黑桃3先出\n \"GameShowRest\": [\"INT32\", 0], # 显示剩余牌数\n \"GameSelectWanFa1\": [\"INT32\", 0], # 多选1\n \"GameSelectWanFa2\": [\"INT32\", 0], # 多选2\n \"GamePlayers\": [\"INT32\", 3], # 人数 -- 几人(2,3,4)\n \"GamePiao\": [\"INT32\", 0], # 飘\n \"GameJoinGame\": [\"INT32\", 0], # 0普通创房 1代理开房 3茶馆\n \"GameClunId\": [\"INT32\", 0], #\n \"GamePaytype\": [\"INT32\", 0], #是否AA制开房,0否,1是\n \"GameCoverCard\": [\"INT32\", 0], #是否首轮盖牌,0否,1是\n \"GameClubName\": [\"STRING\", \"\"], #俱乐部名字,代理商名字\n \"GameIsCreateEmptyRoom\": [\"INT32\", 0], # 俱乐部名字,代理商名字\n \"GamePassWord\": [\"INT32\", 0], # 密码\n \"GameFZB\": [\"INT32\", 0], # 防作弊\n \"GameAgentDRoom\": [\"INT32\", 0], # 所有人同意解散\n \"GameWanFaIndex\": [\"INT32\", 0], # 玩法下标\n \"GameKoFangKa\": [\"INT32\", 0], # 是否使用自己房卡开房(0用自己的)\n \"GameBaodi\": [\"INT32\", 0], # 报单是否保底 0保底 1不保底\n \"GameIsGuanPai\": [\"INT32\", 0], # 关牌\n }\n self.cs_keys_list = self.entity.keys()\n self.update_data = data\n self.method = \"pack\"\n self.real_data = CallUpdateApi(self.entity, self.cs_keys_list, self.update_data, self.method).real_data\n\n\n# 创建房间回包 --1010\nclass SCCreateRoom():\n def __init__(self):\n self.sc_entity_data = {\n \"Type\": [\"INT32\", 0],\n \"ErrorCode\": [\"INT32\", 0],\n \"RoomID\": [\"INT32\", 0],\n \"RoomType\": [\"STRING\", \"\"],\n \"GameInnings\": [\"INT32\", 0],\n \"GamePlayers\": [\"INT32\", 3],\n \"GamePlayOptions\": [\"INT32\", 0],\n \"GameBaseScore\": [\"INT32\", 0],\n \"GameDouble\": [\"INT32\", 0], # 加倍\n \"GameDoubleCondition\": [\"INT32\", 0],\n \"GameClubID\": [\"INT32\", 0],\n \"GameClubName\": [\"STRING\", \"\"],\n \"GameFixedPlay\": [\"INT32\", 0],\n \"GameFixedIndex\": [\"INT32\", -1],\n \"GameLimitIP\": [\"INT32\", 0],\n }\n\n\n# 申请加入房间 --1001\nclass CSApplyToEnterRoom:\n def __init__(self, data):\n self.entity = {\n \"protocol_num\": [\"INT32\", 1001],\n \"RoomID\": [\"INT32\", 0], # 房间ID\n \"Version\": [\"INT32\", 0], # 版本\n \"NetWork\": [\"INT32\", 0], # 网络\n \"PassWord\": [\"INT32\", 0], # 密码\n \"Source\": [\"INT32\", 0], # 0:来自app,1:来自h5\n \"ClubPay\": [\"INT32\", 0] # 0 普通 1代开 3俱乐部\n }\n self.cs_keys_list = self.entity.keys()\n self.update_data = data\n self.method = \"pack\"\n self.real_data = CallUpdateApi(self.entity, self.cs_keys_list, self.update_data,\n self.method).real_data\n\n# 申请加入房间回包 --1001\nclass SCApplyToEnterRoom:\n def __init__(self):\n self.sc_entity_data = {\n \"playType\":[\"INT32\", 0],\n \"SeatID\": [\"INT32\", 0],\n \"RoomID\": [\"INT32\", 0]\n #\"RoomType\": [\"STRING\", \"\"],\n #\"RoomID\": [\"INT32\", 0],\n #\"GameFixedIndex\": [\"INT32\", 0],\n #\"GameClubID\": [\"INT32\", 0],\n #\"RoomState\": [\"INT32\", 0] # 房间状态(0:进入房间(等待开始) 1:游戏中 2:空闲状态)\n }\n\n# room snapshot 1002 快照 -->SC\nclass SCRoomSnapshot:\n def __init__(self):\n self.sc_entity_data = {\"room_owner\": ['INT32', 0], \"room_state\": [\"INT32\",0],\"total_ju\": [\"INT32\",0],\"players_num\": [\"INT32\",0],\n \"_player_info\": [\"INT32\",{\"seat_id\": ['INT32', 0],\"ip\": [\"STRING\", \"\"], \"mid\": ['INT32', 0],\"gp\": ['INT32', 0],\n \"sex\": ['INT32', 0], \"name\": [\"STRING\", \"\"],\n \"icon\": [\"STRING\", \"\"], \"city\": ['STRING', 0],\n \"json_str\": [\"STRING\", 0]}],\n \"banker_seatno\": [\"INT32\", 0], \"seats_size\": [\"INT32\", 0], \"banker_seatno\": [\"STRING\", 0]}\n# desktop snapshot --> SC\nclass SCDesktopSnapshot:\n def __init__(self):\n self.sc_entity_data = {\"room_num\": [\"INT32\", 0], \"room_type\": [\"STRING\", \"\"], \"total_times\": [\"INT32\", 0],\n \"banker_seat_id\": [\"INT32\", 0], \"current_player_seat_id\": [\"INT32\", 0],\n \"current_card\": [\"STRING\", \"\"], \"current_send_card_seat_id\": [\"INT32\", 0],\n \"hupai_type\": [\"INT32\", 0], \"player_num\": [\"INT32\", 0], \"player_info\": [\"INT32\", {\n \"seat_id\": [\"INT32\", 0], \"mid\": [\"INT32\", 0], \"zanli\": [\"INT32\", 0], \"isready\": [\"INT32\", 0],\n \"huxi\": [\"INT32\", 0], \"send_card_num\": [\"INT32\", 0], \"_card_list\": [\"STRING\", \"\"],\n \"molding_card\": [\"INT32\", 0], \"_card_type_list\": [\"INT32\", {\"shoupai_type\": [\"INT32\", 0],\n \"card_num\": [\"INT32\", 0],\n \"_card\": [\"STRING\", \"\"], }]}],\n \"homer_seat_id\": [\"INT32\", 0], \"game_start_type\": [\"INT32\", 0],\n \"player_number\": [\"INT32\", 0],\n \"_player\": [\"INT32\", {\"seat_id\": [\"INT32\", 0], \"jiatuo_daniao\": [\"STRING\", \"\"]}]}\n\n# 解散房间 --1008\nclass CSDissolveRoom:\n def __init__(self):\n self.entity = {\"protocol_num\": [\"INT32\", 1008]}\n self.cs_keys_list = self.entity.keys()\n self.update_data = {}\n self.method = \"pack\"\n self.real_data = CallUpdateApi(self.entity, self.cs_keys_list, self.update_data,\n self.method).real_data\n\n# 解散房间回包 --1008\nclass SCDissolveRoom:\n def __init__(self):\n self.sc_entity_data = {\n \"ErrorCode\": [\"INT32\", -100],\n \"DismissSeatID\": [\"INT32\", 0], # 申请解散座位ID\n \"AutoAgreeDissolveTime\": [\"INT32\", 0], # 剩余自动解散时间\n \"RoomPlayers\": [\"INT32\", -100],\n \"RoomDissolveInfo\": [\n \"INT32\",\n {\n \"SeatID\": [\"INT32\", -100],\n \"IsAgree\": [\"INT32\", -100]\n }\n ]\n }\n\n# 同意解散 --1012\nclass CSAgreeDissolve:\n def __init__(self):\n self.cs_club_game_list_data = {\n \"protocol_num\": [\"INT32\", 1012],\n \"Agree\": [\"INT32\", 1]\n }\n self.cs_keys_list = self.cs_club_game_list_data.keys()\n self.update_data = {}\n self.method = \"pack\"\n self.real_data = CallUpdateApi(self.cs_club_game_list_data, self.cs_keys_list, self.update_data,\n self.method).real_data\n\n# 同意解散回包 --1012\nclass SCAgreeDissolve:\n def __init__(self):\n self.sc_entity_data = {\"ErrorCode\": [\"INT32\", -100]}\n\n# 解散房间原因 --5013\nclass SCDissolveReason:\n def __init__(self):\n self.sc_entity_data = {\"RoomId\": [\"INT32\", -100], \"RoomType\": [\"STRING\", \"\"], \"DissolveType\": [\"INT32\", -100]}\n\n# 申请离开房间 --5009\nclass CSLeaveRoom:\n def __init__(self):\n self.entity = {\n \"protocol_num\": [\"INT32\", 4009]\n }\n self.cs_keys_list = self.entity.keys()\n self.update_data = {}\n self.method = \"pack\"\n self.real_data = CallUpdateApi(self.entity, self.cs_keys_list, self.update_data,\n self.method).real_data\n\n# 申请离开房间 --5009\nclass SCLeaveRoom:\n def __init__(self):\n self.sc_entity_data = {\"Mid\": [\"INT32\", 00], \"SeatID\": [\"INT32\", 99]}\n#写牌 --65534\nclass CSMakeCardsType:\n def __init__(self, data):\n #self.cs_make_cards_type_data = {\"protocol_num\": [\"INT32\", 32766], \"mid\": [\"INT32\", 0], \"cards_list\": [\"STRING\", \"\"]}\n self.entity = {\"protocol_num\": [\"INT32\", 65534],\"cards_list\": [\"STRING\", \"\"]}\n self.cs_keys_list = self.entity.keys()\n self.update_data = data\n self.method = \"pack\"\n self.real_data = CallUpdateApi(self.entity, self.cs_keys_list, self.update_data,\n self.method).real_data\n\n##########################\n### 开始牌局\n##########################\n\n# 准备 --1005\nclass CSReadyForGame:\n def __init__(self, data={}):\n self.entity = {\"protocol_num\": [\"INT32\", 1005]}\n self.cs_keys_list = self.entity.keys()\n self.update_data = data\n self.method = \"pack\"\n self.real_data = CallUpdateApi(self.entity, self.cs_keys_list, self.update_data,\n self.method).real_data\n\n# 准备回包 --1005\nclass SCReadyForGame:\n def __init__(self):\n self.sc_entity_data = {\"SeatID\": [\"INT32\", 0]}\n\n\n# 游戏开始回包 --1007\nclass SCGameStart:\n def __init__(self):\n self.sc_entity_data = {\"ErrorCode\": [\"INT32\", -100], \"CurrentInnings\":[\"INT32\", 0]}\n\n\n# 发牌回包 --2520\nclass SCReceiveCards:\n def __init__(self):\n self.sc_entity_data = {\n \"CardsNum\": [\"INT32\", 0],\n \"CardsInfo\": [\"INT32\", {\"Card\":[\"STRING\", \"\"]}]\n }\n\n# 通知玩家出牌 --1021\nclass SCInformPlayerToDo:\n def __init__(self):\n self.sc_entity_data = {\n \"SeatID\": [\"INT32\", 0],\n \"OpSize\": [\"INT32\", 0],\n \"op\": [\"INT32\", 0]\n }\n\n\n# 通知用户做相应的操作回包 --1022\nclass SCPlayerCanDo:\n def __init__(self):\n self.sc_entity_data = {\n \"SeatID\": [\"INT32\", 0],\n \"OperateNum\": [\"INT32\", 0],\n \"OperateInfo\": [\"INT32\", -100], # 1 无效 89:过牌 100:出牌 104:取消\n \"OperateSequence\": [\"STRING\", \"\"],\n \"NotByYao\": [\"INT32\", 0]\n }\n\n\n# 用户请求出牌 --2523\nclass CSPlayerOperate:\n def __init__(self, data={}):\n self.entity = {\"protocol_num\": [\"INT32\", 2523], \"OperateSequence\": [\"STRING\", \"\"], \"CardNum\": [\"INT32\", 0], \"Cards\": [\"STRING\", \"\"]}\n self.cs_keys_list = self.entity.keys()\n self.update_data = data\n self.method = \"pack\"\n self.real_data = CallUpdateApi(self.entity, self.cs_keys_list, self.update_data,\n self.method).real_data\n\n# 用户请求回包 --2523\nclass SCPlayerOperate:\n def __init__(self):\n self.sc_entity_data = {\n # \"ErrorCode\": [\"INT32\", -100],\n \"SeatID\": [\"INT32\", -100],\n \"HandCardNum\": [\"INT32\", -100], # 手牌数\n \"CardNum\": [\"INT32\", -100],\n \"Cards\": [\"STRING\", \"\"],\n #\"IsAutoSendCard\": [\"INT32\", -100],\n }\n\n\n\n# 小局结算 --2531\nclass SCSettlement:\n def __init__(self):\n self.sc_entity_data = {\n \"Type\": [\"INT32\", -100],\n \"BankerSeatID\": [\"INT32\", -100],\n \"RemainInnings\": [\"INT32\", -100],\n \"GamePlayers\": [\"INT32\", -100],\n \"PlayerInfo\":[\"INT32\",\n {\n \"SeatID\": [\"INT32\", -100],\n \"Mid\": [\"INT32\", -100],\n \"Name\": [\"STRING\", \"\"],\n \"Icon\": [\"STRING\", \"\"],\n \"BombCount\": [\"INT32\", -100], # 炸弹次数\n \"CurrentScore\": [\"INT32\", -100], # 当局得分\n #\"SeatScore\": [\"INT64\", -100], # 座位分\n \"PiaoFenScore\": [\"INT32\", 0], #飘分得分\n \"PiaoFen\": [\"INT32\", 0], #是否飘分\n \"SeatScore\": [\"INT64\", -100], # 总分\n \"HandCardNum\": [\"INT32\", -100], # 手牌牌数\n \"Cards\": [\"STRING\", \"\"], # 牌\n }\n ],\n \"RoomInfo\": [\"STRING\", \"\"], # 房间信息\n \"ZhongBirdSeatID\": [\"INT32\", -100], # 中鸟座位号\n \"ReservedParam\": [\"STRING\", \"\"], # 预留字段\n }\n\n# 总结算 --2535\nclass SCTotalSettlement:\n def __init__(self):\n self.sc_entity_data = {\n \"type\": [\"INT32\", -100],\n \"RoomOwner\": [\"INT32\", -100],\n \"GamePlayers\": [\"INT32\", 0],\n \"PlayerInfo\": [\"INT32\",\n {\n \"SeatID\": [\"INT32\", -100],\n \"Mid\": [\"INT32\", -100],\n \"Name\": [\"STRING\", \"\"],\n \"Icon\": [\"STRING\", \"\"],\n \"SeatScore\": [\"INT64\", -100], # 座位分\n \"BombCount\": [\"INT32\", -100], # 炸弹次数\n \"WinCount\": [\"INT32\", -100], # 赢得局数\n \"LoseCount\": [\"INT32\", -100], # 输的局数\n \"OneGameMaxScore\": [\"INT32\", -100], # 当局最高得分\n \"NSpringCount\": [\"INT32\", -100] #被春天数\n\n }\n ],\n \"RoomInfo\": [\"STRING\", \"\"]\n }","sub_path":"qs-RegressTest-yg/RunFast/Common/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":18185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"344541241","text":"def wordBreak(self, s, wordDict):\n hMap = set(wordDict)\n dp = [False for i in range(len(s) + 1)]\n dp[0] = True\n \n for i in range(1, len(s) + 1):\n for j in range(i - 1, -1, -1):\n if dp[j] and s[j:i] in hMap:\n dp[i] = True\n break\n return dp[-1]","sub_path":"LC/139_Word_Break.py","file_name":"139_Word_Break.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"329123635","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom . import views\n\nurlpatterns = [\n path('',views.index,name=\"index\"),\n path('add/',views.add,name=\"add\"),\n path('view/',views.view,name=\"view\"),\n path('delete/',views.delete,name=\"delete\"),\n path('update/',views.update,name=\"update\"),\n]","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"302368453","text":"import os\nfrom azureml.pipeline.steps import PythonScriptStep\nfrom azureml.core.runconfig import RunConfiguration\nfrom azureml.core.conda_dependencies import CondaDependencies\nfrom azureml.pipeline.core import PipelineData\nfrom azureml.pipeline.core import PipelineParameter\nfrom azureml.pipeline.steps import EstimatorStep\nfrom azureml.train.dnn import PyTorch\n\ndef evaluate_step(model_dir, test_dir, compute_target):\n '''\n This step evaluates the trained model on the testing data and outputs the accuracy.\n\n :param model_dir: The reference to the directory containing the trained model\n :type model_dir: DataReference\n :param test_dir: The reference to the directory containing the testing data\n :type test_dir: DataReference\n :param compute_target: The compute target to run the step on\n :type compute_target: ComputeTarget\n \n :return: The evaluate step, step outputs dictionary (keys: accuracy_file)\n :rtype: EstimatorStep, dict\n '''\n\n accuracy_file = PipelineData(\n name='accuracy_file', \n pipeline_output_name='accuracy_file',\n datastore=test_dir.datastore,\n output_mode='mount',\n is_directory=False)\n\n outputs = [accuracy_file]\n outputs_map = { 'accuracy_file': accuracy_file }\n \n estimator = PyTorch(\n source_directory=os.path.dirname(os.path.abspath(__file__)),\n entry_script='evaluate.py',\n framework_version='1.3',\n compute_target=compute_target,\n use_gpu=True)\n\n step = EstimatorStep(\n name=\"Evaluate Model\",\n estimator=estimator,\n estimator_entry_script_arguments=[\n '--test_dir', test_dir, \n '--model_dir', model_dir, \n '--accuracy_file', accuracy_file\n ],\n inputs=[model_dir, test_dir],\n outputs=outputs,\n compute_target=compute_target,\n allow_reuse=True)\n\n return step, outputs_map\n","sub_path":"modules/evaluate/evaluate_step.py","file_name":"evaluate_step.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"104208985","text":"import csv, random, nltk\n\n\n\nids =[]\nsentiments =[]\n\nids1 = []\ntexts = []\ndef read_csv_file(filename, delimiter = ','):\n ids = []\n values =[]\n with open(filename, 'rb') as csvfile:\n reader = csv.reader(csvfile,delimiter=delimiter)\n for row in reader:\n ids.append(row[0])\n values.append(row[1])\n csvfile.close()\n return (ids, values)\n\n\nids, sentiments = read_csv_file('./data/sentiment_labels.txt', '|')\n\n\nids1, texts = read_csv_file('./data/sentlex_exp12.txt')\n\nprint(len(sentiments))\nprint(len(texts))\n\ndata = []\nfor i in range(len(texts)):\n data.append((texts[i], 'neg' if float(sentiments[i]) <= 0.4\n else ('pos' if float(sentiments[i]) > 0.6 else 'nut') ))\n\ntweets =[]\n\nfor(words, sentiment) in data:\n words_filtered = [e.lower() for e in words.split() if len(e) >= 3]\n tweets.append((words_filtered, sentiment))\n\nrandom.shuffle(tweets)\n\nprint(tweets[:1])","sub_path":"election2016/nltktest/moviereview1.py","file_name":"moviereview1.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"511827187","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 07 19:26:23 2017\n\n@author: Noah\n\"\"\"\n\nfrom PyQt4 import QtCore, QtGui\nfrom CustomMViewTiles.tetrix import TetrixWindow as tetrix\nclass tetris(QtGui.QFrame):\n def __init__(self):\n super(tetris, self).__init__(None)\n self.setObjectName(\"myParentWidget\");\n self.setStyleSheet(\"QFrame#myParentWidget{background: rgb(52, 73, 94);\"\n \"margin:0px; border:2px solid rgb(0, 0, 0);}\"\n \"QPushButton{color:rgb(189,195, 199); background: rgb(70,80,88)}\"\n \"QLabel{color:rgb(189,195, 199); background: rgb(52, 73, 94)}\"\n )\n self.hidden = True\n self.font = QtGui.QFont()\n self.font.setBold(False)\n self.font.setWeight(50)\n self.font.setKerning(True)\n self.font.setPointSize(20)\n self.label = QtGui.QLabel(\"Tetris\")\n self.label.setStyleSheet(\"QLabel{color:rgb(189,195, 199); background : rgb(52, 73, 94)}\")\n self.label.setFont(self.font)\n self.button = QtGui.QPushButton(\"Tetris\")\n \n self.button.clicked.connect(self.toggleTetris)\n self.layout = QtGui.QHBoxLayout()\n self.vlayout = QtGui.QVBoxLayout()\n self.setLayout(self.vlayout)\n self.vlayout.addWidget(self.label)\n self.buttonLayout = QtGui.QHBoxLayout()\n \n self.buttonLayout.addWidget(self.button)\n self.buttonLayout.addStretch()\n self.vlayout.addLayout(self.buttonLayout)\n self.vlayout.addLayout(self.layout)\n self.tet = tetrix()\n self.tet.hide()\n self.layout.addWidget(self.tet)\n self.layout.addStretch()\n \n def toggleTetris(self):\n if self.hidden:\n self.tet.show()\n self.hidden = False\n self.button.setText(\"Hide Tetris\")\n else:\n self.tet.hide()\n self.hidden = True\n self.button.setText(\"Show Tetris\")\n","sub_path":"GUI/LeidenVitalsGUI/CustomMViewTiles/tetris.py","file_name":"tetris.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"360982112","text":"import numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\n\n\nloansData = pd.read_csv('loansData.csv')\nintrate = loansData['Interest.Rate']\nloanamt = loansData['Amount.Requested']\nfico = loansData['FICO.Score']\n\n# The dependent variable\ny = np.matrix(intrate).transpose()\n# The independent variables shaped as columns\nx1 = np.matrix(fico).transpose()\nx2 = np.matrix(loanamt).transpose()\nx = np.column_stack([x1,x2])\n\nX = sm.add_constant(x)\nmodel = sm.OLS(y,X)\nf = model.fit()","sub_path":"linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"222153008","text":"def bfs(rows, cols, startrow, startcol, endrow, endcol):\n grid = [[-1 for j in range(cols)] for i in range(rows)]\n xmoves = [2, 2, 1, 1, -2, -2, -1, -1]\n\n ymoves = [1, -1, 2, -2, 1, -1, 2, -2]\n\n def getneighbors(x, y):\n neighbors = []\n\n for i in range(len(xmoves)):\n if x + xmoves[i] < rows and x + xmoves[i] >= 0 and y + ymoves[i] < cols and y + ymoves[i] >= 0 and grid[ x + xmoves[i]][y + ymoves[i]]==-1:\n neighbors.append((x + xmoves[i], y + ymoves[i]))\n print(x,y,neighbors)\n return neighbors\n\n queue = []\n count = 0\n\n if startrow == endrow and startcol == endcol:\n return count\n else:\n queue.append((startrow, startcol))\n\n while queue and count < rows * cols:\n currrow,currcol = queue.pop(0)\n\n count+=1\n if currrow == endrow and currcol == endcol:\n return grid[currrow][currcol]+1\n else:\n for nextrow, nextcol in getneighbors(currrow, currcol):\n grid[nextrow][nextcol]=grid[currrow][currcol]+1\n queue.append((nextrow, nextcol))\n\n\n return -1\n\nrows = 5\ncols = 5\nstart_row = 0\nstart_col = 0\nend_row = 4\nend_col = 1\n#print(get_neighbours((0,0),grid))\n'''\nrows = 2\ncols = 50000\nstart_row = 1\nstart_col = 997\nend_row = 0\nend_col = 49997\n'''\n\n\nrows = 3\ncols = 8\nstart_row = 0\nstart_col = 6\nend_row = 2\nend_col = 0\n\n#print(find_minimum_number_of_moves(rows, cols, start_row, start_col, end_row, end_col))\n#print(find_minimum_number_of_moves_bfs(rows, cols, start_row, start_col, end_row, end_col))\nprint(bfs(rows,cols,start_row,start_col,end_row,end_col))","sub_path":"Python/Practice/Graphs/knights.py","file_name":"knights.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"353300075","text":"#hmm_CAD\r\n#Author: Sarah A. Mounho\r\n#Created: 11/18/2019\r\n#Last edited: 12/9/2019\r\n#Description:\r\n#Script to create a bar chart to compare sampled sequences to predicted sequences\r\n#for validation of model\r\n#input:\r\n#transition state matrix where one variable is manipulated to test response to treatment\r\n\r\n#Package dependencies: numpy (1.16.4), matplotlib (3.1.1), hmmlearn (0.2.2)\r\n#download instructions for hmmlearn: https://pypi.org/project/hmmlearn/\r\n#Python 3.7.0\r\n\r\n#load packages\r\nimport numpy as np\r\nfrom hmmlearn import hmm\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n \r\n#set up transition matrixes\r\n#transition matrix\r\n#states:\r\n#healthy_start(0), high bp(1), high ldl(2), smoker(3), heart attack(4), stable/no stent(5), stent(6)\r\ntransmat_nobp = np.array([[0.162944787, 0.335688062, 0.329781527, 0.15553875, 0.001300225, 0.014746649, 0],#0\r\n [1.0, 0, 0, 0, 0, 0, 0],#1\r\n [0.354175611, 0, 0.60305577, 0, 0.003464699, 0.03930392, 0], #2\r\n [0.292883373, 0, 0, 0.683394538, 0.001921803, 0.021800286, 0],#3\r\n [0, 0, 0, 0, 0.003619055, 0.149457142, 0.846923803],#4\r\n [0.612, 0, 0, 0, 0.081, 0, 0.307],#5\r\n [0.697, 0, 0, 0, 0.089, 0, 0.214]])#6\r\ntransmat_noldl = np.array([[0.162944787, 0.335688062, 0.329781527, 0.15553875, 0.001300225, 0.014746649, 0],#0\r\n [0.634635493, 0.298651997, 0, 0, 0.005404855, 0.061307655, 0],#1\r\n [1.0, 0, 0, 0, 0, 0, 0], #2\r\n [0.292883373, 0, 0, 0.683394538, 0.001921803, 0.021800286, 0],#3\r\n [0, 0, 0, 0, 0.003619055, 0.149457142, 0.846923803],#4\r\n [0.612, 0, 0, 0, 0.081, 0, 0.307],#5\r\n [0.697, 0, 0, 0, 0.089, 0, 0.214]])#6\r\ntransmat_nosmoker = np.array([[0.162944787, 0.335688062, 0.329781527, 0.15553875, 0.001300225, 0.014746649, 0],#0\r\n [0.634635493, 0.298651997, 0, 0, 0.005404855, 0.061307655, 0],#1\r\n [0.354175611, 0, 0.60305577, 0, 0.003464699, 0.03930392, 0], #2\r\n [1.0, 0, 0, 0, 0, 0, 0],#3\r\n [0, 0, 0, 0, 0.003619055, 0.149457142, 0.846923803],#4\r\n [0.612, 0, 0, 0, 0.081, 0, 0.307],#5\r\n [0.697, 0, 0, 0, 0.089, 0, 0.214]])#6\r\n\r\ndef hmm_CAD_inverse(transmat, name):\r\n\r\n #settings for plots\r\n plt.rcParams.update({'font.size': 25})\r\n plt.rc('xtick', labelsize=30)\r\n plt.rc('ytick', labelsize=30)\r\n\r\n #set seed\r\n np.random.seed(1998)\r\n\r\n #always start healthy\r\n #when setting random seed, observed that starting state\r\n #did not have an impact on samples/predictions\r\n startprob = np.array([1.0, 0, 0, 0, 0, 0, 0])\r\n\r\n #emission states healthy_end(1), hospitilization(2), death(3)\r\n emission_prob = np.array([[0, 0, 0], #1\r\n [0, 0, 0], #2\r\n [0, 0, 0], #3\r\n [0, 0, 0], #4\r\n [0, 0, 0.14], #5\r\n [0.279, 0.325, 0.091], #6\r\n [0.35, 0.357, 0.089]]) #7\r\n #create an instance of the model \r\n model = hmm.MultinomialHMM(n_components=7)\r\n model.startprob_ = startprob\r\n model.transmat_ = transmat\r\n model.emissionprob_ = emission_prob\r\n #draw samples given the startprob, transmat and emissionprob\r\n X, Z = model.sample(100)\r\n X_array = np.array(X)\r\n\r\n #plt the emmission data\r\n plt.plot(X, '.-', label='observations', ms=6,\r\n mfc='orange', alpha=0.7)\r\n #indicate emission\r\n emit = ['Healthy', 'Hospitalized', 'Dead']\r\n plt.xlabel('Iteration')\r\n ticks = [0, 1, 2]\r\n plt.yticks(ticks, emit, rotation = 60)\r\n plt.legend(loc='best')\r\n plt.title('Observation Sampled Sequence')\r\n plt.show()\r\n\r\n #train the model\r\n #training HMM parameters and inferring the hidden state sequence\r\n #based on the sampled outcome sequence\r\n remodel = hmm.MultinomialHMM(n_components=7, n_iter=100)\r\n remodel.fit(X)\r\n\r\n #predict hidden states\r\n prob, sequence = remodel.decode(X)\r\n #log probabilitity of the produced state sequence (only meaningful in comparison of models)\r\n #may use to discern differences in different random seeds on model accuracy \r\n ##print('prob:\\n',prob) #uncomment if testing different random seeds.\r\n\r\n #visualize sampled hidden state sequence and predicted hidden state sequence\r\n #plot the sampled data (transition-hidden states)\r\n plt.plot(Z, '.-', label='sampled', ms=6,\r\n mfc = 'orange', alpha=0.7)\r\n plt.plot(sequence, '.-', label='predicted', ms=6,\r\n mfc='black', alpha=0.7)\r\n #indicate component numbers\r\n states = ['Healthy', 'High BP', 'High LDL', 'Smoker', 'Heart\\nattack', 'Stable\\nno stent', 'Stent'] \r\n plt.xlabel('Iteration')\r\n ticks = [0, 1, 2, 3, 4, 5, 6]\r\n plt.yticks(ticks, states)\r\n plt.legend(loc='best')\r\n plt.title('Hidden State Sampled and Predicted Sequence')\r\n plt.show()\r\n\r\n #validate the results\r\n print()\r\n sample_outcome_count = pd.value_counts(X_array.flatten())\r\n print('Counts for', name,':')\r\n print('Counts for sampled outcomes:\\n', sample_outcome_count)\r\n print()\r\n sample_state_count = pd.value_counts(Z)\r\n print('Counts for sampled states:\\n', sample_state_count)\r\n print()\r\n predict_state_count = pd.value_counts(sequence)\r\n print('Counts for predicted outcomes:\\n', predict_state_count)\r\nhmm_CAD_inverse(transmat_nobp, 'Managed BP')\r\nhmm_CAD_inverse(transmat_noldl, 'Managed LDL Cholesterol')\r\nhmm_CAD_inverse(transmat_nosmoker, 'Managed Smoking')\r\n\r\n#Function Treatment_Response\r\n#input: counts for [healthy, hospitalized, dead] for\r\n#normal, managed bp, managed ldl, and managed smoking\r\n#outputs from hmm_CAD.py and inverseMC_CAD hmm_CAD_inverse\r\n#output:\r\n#bar chart comparing observation count\r\n\r\n#inputs\r\nall_risk = [93, 6, 1]\r\nno_bp = [95, 4, 1]\r\nno_ldl = [96, 3, 1]\r\nno_smoke = [93, 6, 1]\r\n\r\ndef Treatment_Response(all_risk, no_bp, no_ldl, no_smoke):\r\n #bar charts\r\n import matplotlib\r\n import matplotlib.pyplot as plt\r\n import numpy as np\r\n #settings for plots\r\n plt.rcParams.update({'font.size': 25})\r\n plt.rc('xtick', labelsize=30)\r\n plt.rc('ytick', labelsize=30)\r\n\r\n labels = ['Healthy', 'Hospitalized', 'Dead']\r\n x = np.arange(len(labels))\r\n width = 0.1\r\n\r\n fig, ax = plt.subplots()\r\n rects1 = ax.bar(x -0.15, all_risk, width, label='Normal')\r\n rects2 = ax.bar(x-0.05, no_bp, width, label='Managed blood pressure')\r\n rects3 = ax.bar(x+0.05, no_ldl, width, label='Managed LDL cholesterol')\r\n rects4 = ax.bar(x+0.15, no_smoke, width, label='Managed smoking')\r\n\r\n ax.set_ylabel('Count')\r\n ax.set_title('Counts of observations')\r\n ax.set_xticks(x)\r\n ax.set_xticklabels(labels)\r\n ax.legend()\r\n \r\n def autolabel(rects):\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.annotate('{}'.format(height),\r\n xy=(rect.get_x() + rect.get_width() / 2, height),\r\n xytext=(0, 3), # 3 points vertical offset\r\n textcoords=\"offset points\",\r\n ha='center', va='bottom')\r\n autolabel(rects1)\r\n autolabel(rects2)\r\n autolabel(rects3)\r\n autolabel(rects4)\r\n plt.show()\r\n\r\nTreatment_Response(all_risk, no_bp, no_ldl, no_smoke)\r\n","sub_path":"inverseMC_CAD.py","file_name":"inverseMC_CAD.py","file_ext":"py","file_size_in_byte":7639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"242213923","text":"# %%\nimport pyspark\nfrom pyspark import SparkContext\nfrom pyspark.sql import SparkSession, Row\nfrom pyspark.sql.types import StructField, StringType, StructType, LongType\nfrom pyspark.sql.functions import col, column, expr\n\n# %%\ndef get_spark_context():\n sc = SparkContext()\n sc.stop()\n conf = pyspark.SparkConf().setAll(\n [('spark.executor.cores', '4'),\n ('spark.cores.max', '4'),\n ('spark.executor.memory', '16g'),\n ('spark.driver.extraJavaOptions', '-Xms4096m -Xmx4096m -XX:MaxPermSize=4096m -XX:MaxNewSize=4096')])\n spark = SparkSession.builder.config(conf=conf).getOrCreate()\n return spark\n\n\n# %%\nsc = get_spark_context()\n\n# %%\nmy_manual_schema = StructType([\n StructField('DEST_COUNTRY_NAME', StringType(), True),\n StructField('ORIGIN_COUNTRY_NAME', StringType(), True),\n StructField('count', LongType(), False, metadata={'hello': 'world'})\n])\ndf = sc.read.format('json').schema(my_manual_schema).load('data/flight-data/json/2015-summary.json')\n# %%\ndf.printSchema()\nprint(df.schema)\n# %%\ndf.columns\n# %%\ndf.first()\n# %%\nr_1 = Row('Hejka', 12, 1, False, None)\nr_1[0]\nr_1[2]\n# %%\n","sub_path":"my_code/data_wrangling/app_3.py","file_name":"app_3.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"468835080","text":"import numpy as np\nimport matplotlib as mpl\nmpl.use('agg')\nimport sys\nfrom matplotlib import pyplot as plt\nfrom os import listdir, makedirs \nfrom typing import List\nfrom os.path import exists\n\n\nclass Histogram:\n values = None\n n = None\n nbins = None\n data = None\n binwidth = None\n mean = None\n stdev = None\n\n def __init__(self, values:list):\n # entries\n self.values = values\n self.n = len(self.values)\n \n # bin number\n scale = 500\n self.nbins = int(scale * (self.values.max()-self.values.min())) \n\n # compute histogram data\n bins, ranges = np.histogram(self.values, self.nbins)\n\n middles = []\n for lower, upper in zip( ranges[:-1], ranges[1:]):\n middles.append((upper + lower) / 2)\n self.data = list(zip(middles, bins))\n self.binwidth = middles[1] - middles[0]\n\n # calculate mean and standard deviation\n Px = 0\n Px2 = 0\n\n for m, b in self.data:\n Px += b * m\n Px2 += b * (m ** 2)\n self.mean = Px / self.n\n self.stdev = np.sqrt(Px2 / self.n - (self.mean) ** 2)\n\n\n def PlotHistogram(self, title:str, xlabel:str, ylabel:str, filename:str):\n # plots histogram of loaded data\n x, y = zip(*self.data)\n plt.bar(x, y, width = self.binwidth, align = 'center', color = '#1f77b4', edgecolor = '#1f77b4')\n \n # formatting\n plt.ylim(0,)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title('%s\\nMean: %.4e | Std: %.4e | Entries: %i | Bins: %i' % (title, self.mean, self.stdev, self.n, self.nbins))\n \n if not exists('plots/histograms/%s' % filename):\n makedirs('plots/histograms/%s' % filename)\n\n plt.savefig('plots/histograms/%s/%s.png' % (filename, title))\n print('Histogram saved at: plots/histograms/%s/%s.png' % (filename, title))\n plt.clf()\n\n\n def SaveResults(self, filename:str, team:str):\n path:str = 'results/summaries/%s/' % filename\n results = [self.mean, self.stdev]\n\n if not exists(path):\n makedirs(path) \n np.savetxt('%s%s.sum' % (path, team), results) \n\n\ndef LoadMatches(filename:str):\n datafiles:List[str] = [file for file in listdir('data_files/csvs/%s' % filename) if file.split('.')[-1] == 'sum']\n num_files:int = len(datafiles)\n print('%s: %i csv files found' % (filename, num_files))\n data = None\n \n for i, datafile in enumerate(datafiles):\n dat = np.loadtxt('data_files/csvs/%s/%s' % (filename, datafile), dtype=float)\n dat = np.reshape(dat, (1,2))\n\n if i == 0:\n data = dat \n else:\n data = np.concatenate((data, dat), 0)\n\n print('%s: %i csv files loaded' % (filename, num_files))\n print(data.shape)\n return(data)\n\n\ndef main(date:str, run_name:str):\n filename:str = '%s.%s' % (date, run_name)\n data:np.ndarray = LoadMatches(filename)\n homedata:List[float] = data[:,0]\n awaydata:List[float] = data[:,1]\n\n # create histograms\n homehist = Histogram(homedata)\n awayhist = Histogram(awaydata)\n\n # save histograms and results\n homehist.PlotHistogram('%s | Home' % filename, 'Team Spatial Control', 'Counts', filename)\n homehist.SaveResults(filename, 'Home')\n\n awayhist.PlotHistogram('%s | Away' % filename, 'Team Spatial Control', 'Counts', filename)\n awayhist.SaveResults(filename, 'Away')\n\n\nif __name__ == '__main__':\n # process sys args\n if len(sys.argv) == 3:\n run_name:str = sys.argv[1]\n date:str = sys.argv[2]\n\n else:\n print('''\n %s - Plots histograms of mean spatial control per match\n Args:\n [1] - Run Name (MatchType:HomeTeam:AwayTeam)\n [2] - Date (DD.MM)\n ''' % sys.argv[0])\n exit()\n\n # args complete, run main function\n main(date, run_name)","sub_path":"scripts/Sem2MatchAnalysis.py","file_name":"Sem2MatchAnalysis.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"257519646","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Philipp Temminghoff\n\"\"\"\n\nfrom prettyqt import gui\n\n\nclass CompositeValidator(gui.Validator):\n\n def __repr__(self):\n return f\"CompositeValidator({self.validators})\"\n\n def __init__(self, validators=None, parent=None):\n super().__init__(parent)\n self.validators = validators if validators is not None else []\n\n def __getstate__(self):\n return dict(validators=self.validators)\n\n def __setstate__(self, state):\n self.__init__()\n self.validators = state.get(\"validators\", [])\n\n def validate(self, text, pos=0):\n vals = [v.validate(text, pos)[0] for v in self.validators]\n return (min(vals), text, pos)\n\n\nif __name__ == \"__main__\":\n from prettyqt import widgets\n from prettyqt import custom_validators\n val1 = custom_validators.NotEmptyValidator()\n val2 = custom_validators.PathValidator()\n val = CompositeValidator([val1, val2])\n app = widgets.app()\n widget = widgets.LineEdit(\"This is a test\")\n widget.setValidator(val)\n widget.show()\n app.exec_()\n","sub_path":"prettyqt/custom_validators/compositevalidator.py","file_name":"compositevalidator.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"451753875","text":"import sys\nsys.path.append('../')\n\nfrom schmactions import schmactions\n\nimport astropy.units as u\nimport pickle\n\nimport matplotlib.pyplot as plt\nfrom scipy.stats import sigmaclip\nimport numpy as np\n\nimport matplotlib as mpl\nfrom matplotlib import rc\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern']})\nrc('text', usetex=True)\nmpl.rcParams['text.latex.preamble'] = [r'\\usepackage{amsmath} \\usepackage{bm}']\n\ntextwidth = 7.10000594991\ncolumnwidth = 3.35224200913\n\ntb_c = ['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',\n '#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac']\n\nxmin = 0\nxmax = 1000\n\ny0min = 20\ny0max = 40\ny1min = -1800\ny1max = -1000\ny2min = 10\ny2max = 30\n\nhistmin = 15\nhistmax = 30\n\nhistmin_thin = 0\nhistmax_thin = 5\n\ndef sclip(a, s=4):\n _, low0, high0 = sigmaclip(a[:,0], low=s, high=s)\n _, low1, high1 = sigmaclip(a[:,1], low=s, high=s)\n _, low2, high2 = sigmaclip(a[:,2], low=s, high=s)\n a0bool = np.logical_and(a[:,0] > low0, a[:,0] < high0)\n a1bool = np.logical_and(a[:,1] > low1, a[:,1] < high1)\n a2bool = np.logical_and(a[:,2] > low2, a[:,2] < high2)\n k0 = np.where(a0bool)[0]\n k1 = np.where(a1bool)[0]\n k2 = np.where(a2bool)[0]\n return k0, k1, k2\n\nzout = pickle.load(open('zout.p', 'rb'))\nxout = pickle.load(open('xout.p', 'rb'))\nres = pickle.load(open('true_res.p', 'rb'))\nJ0, J1, J2 = res['actions'].to_value(u.kpc*u.km/u.s)\n\nzout_thin = pickle.load(open('zout_thin.p', 'rb'))\nxout_thin = pickle.load(open('xout_thin.p', 'rb'))\nres_thin = pickle.load(open('true_res_thin.p', 'rb'))\nJ0_thin, J1_thin, J2_thin = res_thin['actions'].to_value(u.kpc*u.km/u.s)\n\ninit_pos = [8, 0, 0] * u.kpc\ninit_vel = [0, -190, 50] * u.km/u.s\ninit_vel_thin = [0, -190, 10] * u.km/u.s\n\ns = schmactions(init_pos, init_vel)\nsthin = schmactions(init_pos, init_vel_thin)\n\nzact = s.extract_actions(zout)\nxact = s.extract_actions(xout)\n\nztime = s.extract_time(zout)\nxtime = s.extract_time(xout)\n\nzact_thin = sthin.extract_actions(zout_thin)\nztime_thin = sthin.extract_time(zout_thin)\n\nfig, ax = plt.subplots(2, 3, figsize=(textwidth, 4), sharex=True)\n\nfor x,t,a in zip(ax, (ztime, xtime), (zact, xact)):\n # get keys corresponding to 4 sigmaclip\n k0, k1, k2 = sclip(a)\n x[0].plot(t[k0], a[:,0][k0], c=tb_c[0])\n x[1].plot(t[k1], a[:,1][k1], c=tb_c[0])\n x[2].plot(t[k2], a[:,2][k2], c=tb_c[0])\n\n x[0].plot(t, np.full(len(t), J0), c=tb_c[0], ls='dashed')\n x[1].plot(t, np.full(len(t), J1), c=tb_c[0], ls='dashed')\n x[2].plot(t, np.full(len(t), J2), c=tb_c[0], ls='dashed')\n\n # set limits on plots\n x[0].set_ylim(y0min, y0max)\n x[1].set_ylim(y1min, y1max)\n x[2].set_ylim(y2min, y2max)\n for xx in x:\n xx.set_xlim(xmin, xmax)\n\n\nfor x in ax[1]:\n x.set_xlabel(r'$t\\,[\\,\\text{Myr}\\,]$')\n x.set_xticks(np.arange(0,1000,100), minor=True)\n\nfor x in ax[:,0]:\n x.set_ylabel(r'$J_{R,\\text{obs}}\\,[\\,\\text{kpc}\\,\\text{km}/\\text{s}\\,]$')\nfor x in ax[:,1]:\n x.set_ylabel(r'$J_{\\phi,\\text{obs}}\\,[\\,\\text{kpc}\\,\\text{km}/\\text{s}\\,]$')\nfor x in ax[:,2]:\n x.set_ylabel(r'$J_{z,\\text{obs}}\\,[\\,\\text{kpc}\\,\\text{km}/\\text{s}\\,]$')\n\nax[0][1].set_title(r'$z\\,\\text{offset}=100\\,\\text{pc}$')\nax[1][1].set_title(r'$x\\,\\text{offset}=100\\,\\text{pc}$')\n\nfig.tight_layout()\nplt.savefig('schmactions_one_orbit.pdf')\nplt.close()\n\nfig, ax = plt.subplots(2,1, figsize=(columnwidth,6.5))\n\nk0, k1, k2 = sclip(zact)\ndJzJz = 0.5*(np.percentile(zact[:,2][k2], 95) - np.percentile(zact[:,2][k2], 5))\n\nax[0].hist(zact[:,2][k2], bins=np.linspace(histmin, histmax, 60),\n edgecolor='k', fc='none', histtype='stepfilled')\nax[0].arrow(J2, 60, dJzJz, 0, head_width=1, head_length=0.4, length_includes_head=True, color='k')\nax[0].text(J2+dJzJz/3.5, 61, r'$\\Delta J_z$', color='k')\n\nax[0].axvline(x=J2, color='k', ls='dashed', lw=1)\nax[0].set_ylabel(r'$\\text{count}$')\nax[0].text(14.5, 75, r'\\text{thick-disk orbit}', color='k')\n\nk0t, k1t, k2t = sclip(zact_thin)\ndJzJz_thin = 0.5*(np.percentile(zact_thin[:,2][k2t], 95) - np.percentile(zact_thin[:,2][k2t], 5))\n\nax[1].hist(zact_thin[:,2][k2t], bins=np.linspace(histmin_thin, histmax_thin, 60),\n edgecolor='k', fc='none', histtype='stepfilled')\nax[1].arrow(J2_thin, 60, dJzJz_thin, 0, head_width=1, head_length=0.15, length_includes_head=True, color='k')\nax[1].text(J2_thin+dJzJz_thin/3.5, 61.5, r'$\\Delta J_z$', color='k')\n\nax[1].axvline(x=J2_thin, color='k', ls='dashed', lw=1)\nax[1].set_ylabel(r'$\\text{count}$')\nax[1].text(1.3, 100, r'\\text{thin-disk orbit}', color='k')\n\nax[1].set_xlabel(r'$J_{z,\\text{obs}}\\,[\\,\\text{kpc}\\,\\text{km}/\\text{s}\\,]$')\n\n\nfig.tight_layout()\nplt.savefig('schmactions_Jz_hist.pdf')\n\n","sub_path":"schmactions/one_orbit/plot_one_orbit.py","file_name":"plot_one_orbit.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"577902996","text":"\"\"\"\nThis program only supports python 3.\n\"\"\"\n\nfrom sys import argv\nfrom os import path\nfrom search_strategies import strategy_by_name\nimport time\n\ndef parse_args():\n\tassert len(argv) == 3, \"Must be called in form: search filename method\"\n\tfilename, method = argv[1], argv[2]\n\treturn filename, method\n\ndef puzzle_from_file(filename):\n\t\"\"\"\n\tConverts a puzzle file into type ([][], [][]), where the first element of the tuple is the \n\t initial_state and the second is the desired_state.\n\tExample file:\n\t2x3 | width x height\n\t1 2 3 4 0 5 | initial_state\n\t3 1 2 4 5 0 | desired_state\n\t\"\"\"\n\tdef state_from_line(line):\n\t\t\"\"\"\n\t\tConverts a single line of a puzzle file that represents a state into a 2d array.\n\t\tExample: \"1 2 3 4 0 5\" is converted into [[1,2],[3,4],[0,5]] if puzzle dimensions are 2x3\n\t\t\"\"\"\n\t\tstate = [[] for x in range(width)]\n\t\tfor x in range(width):\n\t\t\tfor y in range(height):\n\t\t\t\tstate[x].append(line[x + y*width])\n\t\treturn state\n\n\tassert path.isfile(filename), \"File '\" + filename + \"' does not exist!\"\n\twith open(filename) as file:\n\t\tcontents = file.read().split('\\n')\n\t\tdimensions = contents[0].split('x')\n\t\twidth, height = int(dimensions[0]), int(dimensions[1])\n\t\tinitial_state = state_from_line(contents[1].split(' '))\n\t\tdesired_state = state_from_line(contents[2].split(' '))\n\t\treturn initial_state, desired_state\n\ndef _main():\n\tfilename, method = parse_args()\n\tinitial_state, desired_state = puzzle_from_file(filename)\n\t\n\tstart_time = time.process_time() # does not include time process was swapped out\n\tnumber_of_nodes, solution = strategy_by_name(method)(initial_state, desired_state)\n\tprint(\"Time taken: \" + str(time.process_time() - start_time) + \" secs\")\n\t\n\tprint(filename, method, number_of_nodes)\n\tif solution != None:\n\t\tfor action in solution: \n\t\t\tprint(action, end='')\n\telse:\n\t\tprint(\"No solution found.\")\n\nif __name__ == \"__main__\":\n\t_main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"156657842","text":"\nfrom django.db import models\nfrom api.managers import InvitationManager\nfrom django.contrib.auth.models import User\nfrom api.models import Curso\n\nclass Invitation(models.Model):\n \n\n code = models.CharField(max_length=50, unique=True)\n\n issued_by = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='issued_by'\n )\n used_by = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n null=True,\n )\n\n curso = models.ForeignKey(Curso, on_delete=models.CASCADE)\n\n used = models.BooleanField(default=False)\n used_at = models.DateTimeField(blank=True, null=True)\n\n # Manager\n objects = InvitationManager()\n\n def __str__(self):\n \n return '#{}: {}'.format(self.curso.curso, self.code)\n","sub_path":"api/models/invitations.py","file_name":"invitations.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"572165838","text":"# programme permettant de créer, écouter et jouer à des histoires intéractives\r\n\r\n# fait par Didier Mathias\r\n\r\nimport tkinter as tk\r\nfrom tkinter import messagebox\r\nfrom modules import module_lecture_fichier as read\r\n\"\"\"\r\najouter un menu d'aide et d'option\r\n\"\"\"\r\n\r\ndef choix(dossier, root = None):\r\n \"\"\"\r\n fonction qui affiche la liste des choix possibles dans un dossier\r\n parametres:\r\n dossier, une chaine de caracteres avec le nom du dossier\r\n root, optionnel, à mentionner si on ne souhaite pas changer de fenetre\r\n renvoie une fenetre tkinter avec la liste des choix possibles\r\n \"\"\"\r\n liste = read.lister_fichier(dossier)\r\n\r\n if root == None:\r\n root = tk.Tk()\r\n else:\r\n # on nettoie la fenetre\r\n for c in root.winfo_children():\r\n c.destroy()\r\n\r\n root.title(\"Listes des Histoires\")\r\n root.config(bg = \"#87CEEB\")\r\n\r\n zoneMenu = tk.Frame(root, borderwidth=3, bg='darkblue')\r\n zoneMenu.grid(row = 0, column = 0, columnspan = len(liste), sticky=\"NSEW\")\r\n\r\n voix = tk.BooleanVar()\r\n ChoixVoix = tk.Checkbutton(zoneMenu, text='Voix', width='10', borderwidth=2, bg='gray', activebackground='darkorange', relief = \"raised\",variable = voix, onvalue = True, offvalue = False)\r\n ChoixVoix.grid(row=0, column=0, sticky=\"NSEW\")\r\n\r\n Ajout = tk.Button(zoneMenu, text = 'Ajout', width='10', borderwidth=2, bg='gray', relief = \"raised\", command = lambda x=[root, dossier] : ajout(x[0], x[1]))\r\n Ajout.grid(row=0, column=1, sticky=\"NSEW\")\r\n\r\n \"\"\"\r\n ajouter un menu pour changer le font des textes : voir font dans histoire\r\n changer grid(zoneMenu)\r\n \"\"\"\r\n\r\n for i_titre in range(len(liste)):\r\n # on ouvre un pack.png avec une image a afficher en row 1 column i_titre\r\n tk.Button(root, height = 5, width = 20, text = liste[i_titre], command = lambda x=[liste[i_titre], voix] : histoire(root, dossier + \"/\" + x[0], x[1].get())).grid(row = 2, column = i_titre, sticky=\"NSEW\")\r\n\r\n grid(root, 3, len(liste))\r\n grid(zoneMenu, 1, 2)\r\n\r\n root.mainloop()\r\n\r\ndef histoire(root, dossier, voix = False):\r\n \"\"\"\r\n fonction qui permet de raconter une histoire au format tkinter\r\n parametres:\r\n root, le fenetre tkinter dans laquelle se déroule l'histoire\r\n dossier, une chaine de caracteres avec le nom du dossier\r\n voix, optionnel, permet de d'activer la voix synthétique, par défaut False\r\n \"\"\"\r\n root.title(dossier)\r\n\r\n # on nettoie la fenetre\r\n for c in root.winfo_children():\r\n c.destroy()\r\n\r\n narration = read.lire_fichier(dossier + \"/narration.md\")\r\n\r\n def detail_etapes(narration):\r\n etapes = {}\r\n i_ligne = 0\r\n while i_ligne < len(narration):\r\n\r\n if narration[i_ligne][0] == \"#\":\r\n numero = narration[i_ligne].split(\"\\r\")[0]\r\n\r\n etapes[numero] = []\r\n i_ligne += 1\r\n if \":\" in narration[i_ligne]:\r\n image = narration[i_ligne].split(\" : \")[1]\r\n else:\r\n image = \"\"\r\n etapes[numero] += [image]\r\n i_ligne += 1\r\n\r\n texte = \"\"\r\n while narration[i_ligne][0] not in [\"/\", \"!\"]:\r\n texte += narration[i_ligne]\r\n i_ligne += 1\r\n\r\n etapes[numero] += [texte]\r\n\r\n choix = []\r\n while narration[i_ligne][0] != \"!\":\r\n separation = narration[i_ligne][1:].split(\"/\")\r\n texte = separation[0]\r\n\r\n nbr = separation[1].split(\"\\r\")[0]\r\n choix += [[texte, nbr]]\r\n i_ligne += 1\r\n\r\n etapes[numero] += [choix]\r\n\r\n i_ligne += 1\r\n return etapes\r\n\r\n etapes = detail_etapes(narration)\r\n\r\n i = tk.StringVar()\r\n i.set(\"#0\")\r\n forme = [(\"Comic Sans MS\", 15, \"bold\"), (\"Comic Sans MS\", 10, \"bold\")]\r\n\r\n while True:\r\n nbr = i.get()\r\n for c in root.winfo_children():\r\n c.destroy()\r\n\r\n etape = etapes[nbr]\r\n nbr_column = len(etape[2])\r\n\r\n \"\"\"\r\n image en 0-nbr_column 0\r\n \"\"\"\r\n titre = tk.Label(root, text = etape[1], font = forme[0], bg = \"#87CEEB\")\r\n\r\n if voix:\r\n parole = \"\"\r\n for j in etape[1].split(\"\\r\"):\r\n parole += j + \" \"\r\n parler(parole)\r\n\r\n parole_fin = {}\r\n for j in etape[2]:\r\n parole_fin[j[1]] = j[0]\r\n\r\n\r\n if nbr_column > 0:\r\n titre.grid(row = 1, column = 0, columnspan = nbr_column, sticky=\"NSEW\")\r\n else:\r\n titre.grid(row = 1, column = 0, sticky=\"NSEW\")\r\n\r\n if nbr_column == 0:\r\n break\r\n\r\n else:\r\n taille = \"%dp\" % (titre.winfo_reqwidth() // nbr_column)\r\n for i_choix in range(nbr_column):\r\n tk.Button(root,text = etape[2][i_choix][0], wraplength = taille, font = forme[1], command = lambda x=etape[2][i_choix][1] : i.set(x), bg = \"#87CEEB\", activebackground = \"#87CEEB\").grid(row = 2, column = i_choix, sticky=\"NSEW\")\r\n\r\n \"\"\"\r\n grid() ne fonctionne pas :\r\n peut-être taille fenetre : modifier la taille de la fenetre à chaque fois\r\n grille déjà grande avant : créer une frame le contenu de la fenetre et on le supprime à la place d'enlever chaque élément / changer les zones de création et les suppressions et les grid(root)\r\n \"\"\"\r\n grid(root, 1, nbr_column)\r\n\r\n root.wait_variable(i)\r\n if voix:\r\n parole = parole_fin[i.get()]\r\n parler(parole)\r\n\r\n # FIN\r\n if voix:\r\n parler(\" FIN\")\r\n\r\n tk.Button(root, text = \"Fin\", font = forme[1], command = lambda x=[dossier, root] : choix(x[0].split(\"/\")[0], x[1]), bg = \"#87CEEB\", activebackground = \"#87CEEB\").grid(row = 2, column = 0, sticky=\"NSEW\")\r\n\r\ndef ajout(root, dossier):\r\n \"\"\"\r\n fonction permettant d'ajouter une histoire\r\n parametres:\r\n root, une fenetre tkinter\r\n \"\"\"\r\n def plus():\r\n \"\"\"\r\n sous-fonction qui permet d'ajouter un morceau à l'histoire\r\n \"\"\"\r\n lettres = caracteres(texte.get(\"1.0\", \"end\"))\r\n\r\n dif_choix = [i.split(\"/\") for i in choice.get(\"1.0\", \"end\").split(\"\\n\")][:-1]\r\n faire = []\r\n good = True\r\n if not fin.get():\r\n for i in dif_choix:\r\n if len(i) != 2: # trouver pour éviter qu'il n'y ait pas de reference\r\n good = False\r\n\r\n if numero.get() == \"\":\r\n messagebox.showerror(\"Erreur numéro\", \"Mauvais numéro\")\r\n \"\"\"\r\n si checkbox image, vérifier qu'une image a été mise\r\n \"\"\"\r\n elif len(lettres) < 2 and (\" \" in lettres or \"\\n\" in lettres):\r\n messagebox.showerror(\"Erreur texte\", \"Il faut un texte\")\r\n elif not good:\r\n messagebox.showerror(\"Erreur choix\", 'Un choix par ligne avec le choix et le numero séparés par \"/\"')\r\n elif numero.get() in etapes.keys():\r\n if messagebox.askyesno(\"Changement\", \"Voulez-vous modifier ce numéro ?\"):\r\n etapes[numero.get()] = [texte.get(\"1.0\", \"end\"), choice.get(\"1.0\", \"end\")]\r\n else:\r\n tk.Button(schema, text=numero.get(), command = lambda x=\"%s\" % numero.get() : restaurer(x, etapes[x][0], etapes[x][1])).grid(row=ligne.get(), column=0, sticky=\"NSEW\")\r\n grid(schema,ligne.get() ,0)\r\n ligne.set(ligne.get() + 1)\r\n if fin.get():\r\n etapes[numero.get()] = [texte.get(\"1.0\", \"end\"), False]\r\n else:\r\n etapes[numero.get()] = [texte.get(\"1.0\", \"end\"), choice.get(\"1.0\", \"end\")]\r\n \"\"\"\r\n si image, la mettre en 3\r\n \"\"\"\r\n\r\n def restaurer(N, T, C):\r\n \"\"\"\r\n sous-fonction qui permet d'afficher un morceau dans la fenetre\r\n parametres:\r\n N, la référence\r\n T, le texte\r\n C, les choix\r\n \"\"\"\r\n numero.set(N)\r\n texte.delete(\"1.0\", \"end\")\r\n if C == False:\r\n choice.configure(state = \"normal\")\r\n choice.delete(\"1.0\", \"end\")\r\n choice.configure(state = \"disabled\")\r\n fin.set(True)\r\n f.select()\r\n else:\r\n fin.set(False)\r\n f.deselect()\r\n choice.configure(state = \"normal\")\r\n choice.delete(\"1.0\", \"end\")\r\n choice.insert(\"1.0\", C)\r\n\r\n texte.insert(\"1.0\", T)\r\n \"\"\"\r\n restaurez image\r\n \"\"\"\r\n\r\n def valider():\r\n \"\"\"\r\n sous-fonction qui permet de construire le dossier qui contiendrat l'histoire\r\n \"\"\"\r\n if not \"0\" in etapes.keys():\r\n messagebox.showerror(\"Pas de début\", \"Il manque le début d'indice 0\")\r\n \"\"\"\r\n vérifier si tous peut fonctionner :\r\n les references dites existe dans etapes sinon erreur\r\n les images sont ouvrables\r\n # il y a une fin\r\n \"\"\"\r\n else:\r\n titre = str(input(\"Quel est le titre de votre histoire ?\"))\r\n while read.fichier_existe(dossier + \"/\" + titre):\r\n titre = str(input(\"Ce titre existe déjà, choissisez-en un autre.\"))\r\n \"\"\"\r\n demander image de l'histoire\r\n \"\"\"\r\n chemin = dossier + \"/\" + titre\r\n read.add_repertoire(dossier, titre)\r\n\r\n read.add_repertoire(chemin, \"images\")\r\n\r\n \"\"\"\r\n mettres les images enregistrées\r\n \"\"\"\r\n\r\n # changer etapes en 1 fichier\r\n narration = \"\"\r\n\r\n for reference, contenu in etapes.items():\r\n narration += \"#\" + reference + \"\\r\"\r\n narration += \"False\"\r\n \"\"\"\r\n remplacer \"False\" par \"image\" si checkbox image\r\n \"\"\"\r\n for L in contenu[0].split(\"\\n\"):\r\n if L != \"\":\r\n narration += \"\\r\" + L\r\n\r\n if contenu[1] != False:\r\n for C in [i.split(\"/\") for i in contenu[1].split(\"\\n\")][:-1]:\r\n if C != ['']:\r\n narration += \"\\r/ \" + C[0] + \" /#\" + C[1]\r\n\r\n narration += \"\\r!\\r\\r\"\r\n\r\n read.add_fichier(chemin, \"narration.md\", narration)\r\n choix(dossier.split(\"/\")[0], root)\r\n\r\n def vue():\r\n if fin.get():\r\n choice.configure(state = \"normal\")\r\n fin.set(False)\r\n else:\r\n choice.configure(state = \"disabled\")\r\n fin.set(True)\r\n\r\n\r\n etapes = {}\r\n root.title(\"Ajout\")\r\n\r\n # on nettoie la fenetre\r\n for c in root.winfo_children():\r\n c.destroy()\r\n\r\n \"\"\"\r\n schema visuel a implementer\r\n schema sous forme de Canvas\r\n \"\"\"\r\n schema = tk.Frame(root, bg = \"#87CEEB\", borderwidth=3)\r\n ligne = tk.IntVar()\r\n ligne.set(0)\r\n\r\n schema.grid(row = 0, rowspan = 6, column = 1, sticky='NSEW')\r\n\r\n # numero\r\n tk.Label(root, text = \"Numéro :\", bg = \"#87CEEB\").grid(row = 0, column = 0, sticky=\"w\")\r\n numero = tk.StringVar()\r\n tk.Entry(root, textvariable = numero, width = 40).grid(row = 1, column = 0, sticky='NSEW')\r\n\r\n \"\"\"\r\n mettre checkbox image\r\n si cocher, il faut ouvrir une image\r\n sinon case désactiver\r\n\r\n on ouvre l'image avec un ouvrir puis on parcours les fichiers\r\n \"\"\"\r\n\r\n # texte\r\n label_texte = tk.Label(root, text = \"Texte :\", bg = \"#87CEEB\").grid(row = 2, column = 0, sticky=\"w\")\r\n texte = tk.Text(root, height = 10, width = 30)\r\n texte.grid(row = 3, column = 0, sticky='NSEW')\r\n\r\n # choix\r\n fin = tk.BooleanVar()\r\n fin.set(False)\r\n f = tk.Checkbutton(root, text='Choix possible :', width='10', borderwidth=2, bg='#87CEEB', command = lambda x=fin : vue())\r\n f.grid(row=4, column=0, sticky=\"w\")\r\n\r\n tk.Label(root, text=\"texte_choix / reference\", bg = \"#87CEEB\").grid(row = 4, column = 0, sticky=\"e\")\r\n\r\n choice = tk.Text(root, height = 10, width = 30)\r\n choice.grid(row = 5, column = 0, sticky='NSEW')\r\n\r\n # plus\r\n tk.Button(root, text=\"+\", bg = \"#87CEEB\", command=plus).grid(row=6, column=0, sticky=\"NSEW\")\r\n\r\n # validation\r\n tk.Button(root, text=\"Valider\", bg = \"#87CEEB\", command=valider).grid(row=6, column=1, sticky=\"NSEW\")\r\n\r\n grid(root, 6, 2)\r\n root.minsize(width=700, height=600)\r\n\r\ndef grid(root, R, C):\r\n for i in range(R):\r\n root.grid_rowconfigure(i, weight=1)\r\n for i in range(C):\r\n root.grid_columnconfigure(i, weight=1)\r\n\r\ndef parler(texte):\r\n \"\"\"\r\n fonction permettant de dire un texte --> Text-to-Speech\r\n parametres:\r\n texte, une chaine de caracteres à dire\r\n \"\"\"\r\n import pyttsx3\r\n\r\n engine = pyttsx3.init()\r\n engine.setProperty('rate', 150) # mettre rate à 150 permet d'avoir un débit raisonnable\r\n\r\n engine.say(texte)\r\n engine.runAndWait()\r\n\r\ndef caracteres(texte):\r\n \"\"\"\r\n fonction permettant d'obtenir une liste de tous les caracteres distinct présent dans une chaine de caracteres\r\n parametres:\r\n texte, une chaine de caracteres\r\n renvoie une liste de caracteres\r\n \"\"\"\r\n lettres = []\r\n\r\n for caractere in texte:\r\n if caractere not in lettres:\r\n lettres += [caractere]\r\n\r\n return lettres\r\n\r\n\r\nif __name__ == '__main__':\r\n choix(\"histoires\")","sub_path":"histoire_interactive_V1.0.py","file_name":"histoire_interactive_V1.0.py","file_ext":"py","file_size_in_byte":13493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"185268966","text":"import hashlib, random, time, json\n\ndef convert_base(num, to_base=10, from_base=10):\n # first convert to decimal number\n if isinstance(num, str):\n n = int(num, from_base)\n else:\n n = int(num)\n # now convert decimal to 'to_base' base\n alphabet = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n if n < to_base:\n return alphabet[n]\n else:\n return convert_base(n // to_base, to_base) + alphabet[n % to_base]\n\nblock_time = 5 * 60\nstart = time.time()\nfinish = start + block_time\n\nprint(start)\nprint(finish)\n\n#exit()\n\nhash_arr = []\nobj = json.loads('{\"miner_adress\": \"XIhhtD5BprFlsnObmDyxL5xc5QUCX4uKn6839Y4DBpKsnZRMtmhdeMDfVtkKRIZn\", \"hash_last_block\": \"hashaaaaaaaaa\", \"timestamp\": 1513948552.0508235, \"difficulty\": 50, \"bust\": 87700948419, \"transaction\": {\"from\": \"XIhhtD5BprFlsnObmDyxL5xc5QUCX4uKn6839Y4DBpKsnZRMtmhdeMDfVtkKRIZn\", \"to\": \"yMqqoiz28LIvvyvJ2VDFOHbd2ggASSTiEBssUSkC9QRZ7Fy3p6LCySh/ok4yWqH0\", \"count\": 10, \"fee\": 0, \"sign\": \"wnERxJEqC5OWxOGBwelJx0CgDSDpv3T6iFmsK2AOiMG8mSisXKbHieDvVJHwxwyQ\"}}')\n\nprint(obj)\n\na = -1\n\nwhile True:\n a += 1\n\n obj['bust'] = str(a)\n msg = json.dumps(obj)\n\n m = hashlib.sha1()\n m.update(msg.encode('utf-8'))\n hash = m.hexdigest()\n\n digit = convert_base(hash, 10, 16)\n print(digit)\n hash_arr.append(int(digit))\n\n if (time.time() > finish):\n break\n\nprint(len(hash_arr))\nprint(a)\nhash_arr.sort()\nprint(hash_arr[0])","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"49393627","text":"import numpy as np\n\nfrom sort.sort import *\n\nfrom pylot.perception.detection.utils import BoundingBox2D, DetectedObstacle\nfrom pylot.perception.tracking.multi_object_tracker import MultiObjectTracker\n\n\nclass MultiObjectSORTTracker(MultiObjectTracker):\n def __init__(self, flags):\n self.tracker = Sort()\n\n def reinitialize(self, frame, obstacles):\n \"\"\" Reinitializes a multiple obstacle tracker.\n\n Args:\n frame (:py:class:`~pylot.perception.camera_frame.CameraFrame`):\n Frame to reinitialize with.\n obstacles : List of perception.detection.utils.DetectedObstacle.\n \"\"\"\n detections = self.convert_detections_for_sort_alg(obstacles)\n self.tracker.update(detections)\n\n def track(self, frame):\n \"\"\" Tracks obstacles in a frame.\n\n Args:\n frame (:py:class:`~pylot.perception.camera_frame.CameraFrame`):\n Frame to track in.\n \"\"\"\n # each track in tracks has format ([xmin, ymin, xmax, ymax], id)\n obstacles = []\n for track in self.tracker.trackers:\n coords = track.predict()[0].tolist()\n # changing to xmin, xmax, ymin, ymax format\n bbox = BoundingBox2D(int(coords[0]), int(coords[2]),\n int(coords[1]), int(coords[3]))\n obstacles.append(DetectedObstacle(bbox, 0, \"\", track.id))\n return True, obstacles\n\n def convert_detections_for_sort_alg(self, obstacles):\n converted_detections = []\n for obstacle in obstacles:\n bbox = [\n obstacle.bounding_box.x_min, obstacle.bounding_box.y_min,\n obstacle.bounding_box.x_max, obstacle.bounding_box.y_max,\n obstacle.confidence\n ]\n converted_detections.append(bbox)\n return np.array(converted_detections)\n","sub_path":"pylot/perception/tracking/sort_tracker.py","file_name":"sort_tracker.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"615141134","text":"import unittest\nfrom datetime import datetime\n\nfrom dateutil.tz import tzutc, tzlocal\n\nfrom t77_date.tz import to_utc\n\n\nclass ToUtcTestCase(unittest.TestCase):\n def test_datetime_in_utc(self):\n value_in = datetime.now(tz=tzutc())\n value_out = to_utc(value_in)\n self.assertIsInstance(value_out, datetime)\n self.assertEqual(value_out.tzinfo, tzutc())\n self.assertEqual(value_out, value_in)\n\n def test_datetime_in_local(self):\n value_in = datetime.now(tz=tzlocal())\n value_out = to_utc(value_in)\n self.assertIsInstance(value_out, datetime)\n self.assertEqual(value_out.tzinfo, tzutc())\n\n def test_datetime_without_tzinfo(self):\n value_in = datetime.now()\n value_out = to_utc(value_in)\n self.assertIsInstance(value_out, datetime)\n self.assertEqual(value_out.tzinfo, tzutc())\n\n def test_invalid_input(self):\n now = datetime.now()\n with self.assertRaises(ValueError):\n to_utc(None)\n with self.assertRaises(ValueError):\n to_utc(now.toordinal())\n with self.assertRaises(ValueError):\n to_utc(now.isoformat())\n","sub_path":"tests/to_utc_test.py","file_name":"to_utc_test.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"16874132","text":"# -*- coding: utf-8 -*-\n\nfrom io import open\nfrom re import sub\nfrom os import listdir\nfrom collections import Counter\n\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk import sent_tokenize, word_tokenize, ngrams\n\nfrom utils import constants\n\nstemmer = SnowballStemmer(\"english\")\n\n\ndef filter_stop_words(words):\n return filter(lambda w: w not in constants.stopWords, words)\n\n\ndef stem_all_words(words):\n stems = [stemmer.stem(w) for w in words]\n return stems\n\n\ndef get_all_words(all_docs):\n all_words = set()\n for doc in all_docs:\n [all_words.add(k) for k in doc.keys()]\n\n return all_words\n\n\ndef read_all_documents():\n all_docs = []\n # i = 0\n for file in listdir(\"training_data\"):\n # if i == 2: break\n\n with open(\"training_data/\"+file, 'r', encoding='utf8') as f:\n doc = f.read().lower()\n f.close()\n\n if file == \"englishText_0_10000\":\n doc = doc.split(\"endofarticle.\")\n all_docs += doc\n else:\n all_docs.append(doc)\n\n return all_docs\n\n\ndef tokenize_document(document):\n sentences = sent_tokenize(document)\n tokens = []\n for sentence in sentences:\n sentence = sub(r'[^\\w\\s]', ' ', sentence)\n\n tokens += [w for w in filter_stop_words(word_tokenize(sentence))]\n\n stemmed_tokens = stem_all_words(tokens)\n\n return tokens, stemmed_tokens\n\n\ndef get_tokenized_document(filename):\n \"\"\"\n Tokenizer for test data\n :param filename: Name of file presesnt in test_data directory\n :return: returns tokenized data\n \"\"\"\n\n with open(filename, 'r', encoding='utf8') as f:\n doc = f.read().lower()\n f.close()\n\n # doc, stemmed = tokenize_document(doc)\n doc, stemmed = tokenize_document_v2(doc)\n doc = Counter(stemmed)\n\n # print doc\n\n return doc\n\n\ndef joiner(words):\n tokens = []\n for w in words:\n x = filter_stop_words(w)\n\n if len(x) != 2:\n continue\n\n x = ' '.join(x)\n tokens.append(x)\n\n tokens = filter(lambda x1: x1 != '', tokens)\n return tokens\n\n\ndef get_bigrams(sentence):\n words = word_tokenize(sentence)\n bigrams = list(ngrams(words, 2))\n bigrams = joiner(bigrams)\n\n return bigrams\n\n\ndef tokenize_document_v2(document):\n sentences = sent_tokenize(document)\n tokens = []\n for sentence in sentences:\n sentence = sub(r'[^\\w\\s]', ' ', sentence)\n bigrams = get_bigrams(sentence)\n tokens += [w for w in filter_stop_words(word_tokenize(sentence))]\n tokens += bigrams\n\n stemmed_tokens = stem_all_words(tokens)\n return tokens, stemmed_tokens\n\n","sub_path":"utils/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"140615384","text":"# Object Oriented Programming\n\n\"\"\"Class is the blueprint of how object will be created\n Object is the real life entity for the given class\"\"\"\n\n'''There are 2 types of attributes associated to python classes\n 1. class attributes\n 2. instance attributes'''\n\n\nclass Employee:\n company = \"Google\"\n\n def __init__(self, name):\n print(\"\\nConstructor run -> Employee created\\n\")\n self.name = name\n\n def getSalary(self, signature):\n print(f\"\\nSalary is ${self.salary} for leo working at company {self.company}, not in {Employee.company}\"\n f\"\\n{signature}\")\n # Here self.salary is not defined yet so there might be a runtime error if not defined later\n # In any function, 'self' can be changed to any other valid identifier and will work fine, bu writing self is\n # a best practice\n\n @staticmethod\n def greet():\n print(\"Good morning from leo\")\n # Here self is not required as @staticmethod makes this method static, hence not related to an object but the class\n # @staticmethod is a decorator which modifies a function\n\n\nleo = Employee(\"leo\")\nleo.company = \"Facebook\"\nleo.salary = 5000\n\nprint(\"Name is: \", leo.name)\nprint(\"Company is: \", leo.company) # This is an instance attribute as it has preference over class attribute\nprint(\"Salary is: \", leo.salary) # This is an instance attribute\nprint(\"Employee company is: \", Employee.company) # This is a class attribute\n'''Whenever object.attribute is called, the attribute is searched as an instance attribute, if it is not present\n then it is searched as a class attribute. Instance attribute takes preference over class attribute during \n assignment and retrieval'''\n\n\n# 'self' parameter\n'''self refers to the instance of the class which is automatically passed with a function call from an object\n\n Internally, leo.getSalary() is converted to Employee.getSalary(leo)\n Hence the self parameter is always passed and it will receive the object through which it is called'''\n\nleo.getSalary(\"Cheers!\")\nleo.greet()\n\n\n# Constructor (__init__())\n\n'''__init__() is the first method which runs as soon as an object is created.\n It takes self as an argument and can also take other arguments'''\n","sub_path":"chapter10.py","file_name":"chapter10.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"31457727","text":"import math\r\nimport time\r\nfrom PIL import ImageGrab\r\nimport pytesseract\r\nimport winsound\r\nimport os\r\n\r\n#---------------USER-CUSTOMIZABLE-SETTINGS---------------#\r\npytesseract.pytesseract.tesseract_cmd = r'C:\\Users\\Evan\\AppData\\Local\\Tesseract-OCR\\tesseract.exe'\r\n #path of tesseract.exe (should just have to change USERSNAME if installed in default location\r\n\r\nExecutableCodeFileLocation = r'C:\\Users\\Evan\\Desktop\\Fortnite Build Warning Bot'\r\n #Location of 'Executable Code'\r\n #Can be found by right click on 'Executable Code' file > Properties > Then highlight and copy Location\r\n #YOU MUST LEAVE 'Executable Code' file in the folder with the 3 sound files\r\n #You can rename the folder if desired\r\n\r\nRES = (2560, 1440) #must be 16:9\r\n #resolution of your monitor\r\n#--------------------------------------------------------#\r\n\r\nrunning = ''\r\nwinsound.Beep(300, 300)\r\n\r\nCommonZero = ['QO', 'Q0', 'Oo', '0o', 'Q', 'O', 'o', '0']\r\nWood, Brick, Metal, Builds = 0, 0, 0, 0\r\nReady20, Ready10, Ready5 = False, False, False\r\n\r\nHorizontalConst = ((2310/2560), (2880/2880)) #factors found in photoshopr\r\nVerticalConst = ((1020/1440), (1060/1440))\r\n\r\nX1 = math.floor(RES[0] * HorizontalConst[0]) #round down to make sure nothing is cut off\r\nX2 = math.floor(RES[0] * HorizontalConst[1]) #round down to make sure not outside of the screen\r\nY1 = math.floor(RES[1] * VerticalConst[0])\r\nY2 = math.floor(RES[1] * VerticalConst[1])\r\n\r\nwhile True:\r\n time.sleep(0.5)\r\n \r\n screenshot = ImageGrab.grab(bbox=(X1, Y1, X2, Y2))\r\n screenshot = screenshot.point(lambda x: 255 if x<255 else 0)\r\n #screenshot.show()\r\n\r\n Materials = pytesseract.image_to_string(screenshot, lang='eng', config='--psm 7')\r\n\r\n if len(Materials.split()) == 3: #just ignore if we do not find 3 strings\r\n Materials = Materials.split()\r\n\r\n if Materials[0].isdigit() == True:\r\n Wood = int(Materials[0])\r\n elif Materials[0] in CommonZero:\r\n Wood = 0\r\n \r\n if Materials[1].isdigit() == True:\r\n Brick = int(Materials[1])\r\n elif Materials[1] in CommonZero:\r\n Brick = 0\r\n\r\n if Materials[2].isdigit() == True:\r\n Metal = int(Materials[2])\r\n elif Materials[2] in CommonZero:\r\n Metal = 0\r\n\r\n Builds = math.floor(Wood/10) + math.floor(Brick/10) + math.floor(Metal/10)\r\n\r\n if Builds < 20 and Ready20 == True:\r\n winsound.PlaySound(ExecutableCodeFileLocation + r'\\20builds.wav', winsound.SND_ASYNC)\r\n Ready20 = False\r\n elif Builds < 10 and Ready10 == True:\r\n winsound.PlaySound(ExecutableCodeFileLocation + r'\\10builds.wav', winsound.SND_ASYNC)\r\n Ready10 = False\r\n elif Builds < 5 and Ready5 == True:\r\n winsound.PlaySound(ExecutableCodeFileLocation + r'\\5builds.wav', winsound.SND_ASYNC)\r\n Ready5 = False\r\n \r\n if Builds > 30:\r\n Ready20, Ready10, Ready5 = True, True, True\r\n\r\n if len(running) == 3:\r\n running = \".\"\r\n else:\r\n running = running + '.'\r\n os.system('cls')\r\n print(\"Bot is running\" + running)\r\n print(\"Current Builds: \" + str(Builds))\r\n \r\n","sub_path":"Fortnite Build Warning Bot/Executable Code.py","file_name":"Executable Code.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"68076525","text":"import sys\nimport os,datetime,time,fileinput\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom ConfRead import Conf\nfrom PyQt5.QtGui import *\n#from Program import SecondWindow\n\nclass UI_FifthTab(object):\n def setupUi(self, Form):\n self.LabelSrc=QLabel('请选择源路径',self)\n self.LineSrc =QLineEdit(Conf().GetLastFile_LabelSrc)\n self.BtnSrc = QPushButton('Src', self)\n #self.BtnSrc.clicked.connect(self.ButtonSrcShowDialog)\n\n\n self.LabelDst=QLabel('请选择目标路径',self)\n self.LineDst =QLineEdit(Conf().GetLastFile_LabelDst,self)\n self.BtnDst = QPushButton('Dst', self)\n #self.BtnDst.clicked.connect(self.ButtonDstShowDialog)\n\n self.CheckBoxKeyFile=QCheckBox('仅提取指定文件(文件名请用逗号隔开)',self)\n self.LineKeyFile=QLineEdit('',self)\n\n upgrid=QGridLayout()\n #grid.setSpacing(10)\n upgrid.addWidget(self.LabelSrc,1,0)\n upgrid.addWidget(self.LineSrc,1,1)\n upgrid.addWidget(self.BtnSrc,1,2)\n upgrid.addWidget(self.LabelDst,2,0)\n upgrid.addWidget(self.LineDst,2,1)\n upgrid.addWidget(self.BtnDst,2,2)\n upgrid.addWidget(self.CheckBoxKeyFile,3,0,Qt.AlignRight)\n upgrid.addWidget(self.LineKeyFile,3,1,)\n upgrid.setColumnStretch(0,1)\n upgrid.setColumnStretch(1,3)\n\n self.LabelTime=QLabel('请输入时间差,单位:小时',self)\n self.SpinBoxTime=QDoubleSpinBox()\n self.SpinBoxTime.setValue(1.00)\n self.SpinBoxTime.setMaximum(720.00)\n self.BtnGetLastFile=QPushButton('GetLastFile',self)\n #self.BtnGetLastFile.clicked.connect(self.FirstSinalEmit)\n\n h0layout=QHBoxLayout()\n h0layout.addWidget(self.LabelTime)\n h0layout.addWidget(self.SpinBoxTime)\n h0layout.addWidget(self.BtnGetLastFile)\n\n\n\n\n\n\n vlayout=QVBoxLayout()\n vlayout.addLayout(h0layout)\n\n MainGrid=QGridLayout()\n MainGrid.addLayout(upgrid,0,0)\n MainGrid.addLayout(vlayout,1,0)\n self.setLayout(MainGrid)\n\n\n #self.setWindowIcon(QIcon('image/icon.png'))\n self.setWindowTitle('DFRZ DeployTool')","sub_path":"UI_FifthTab.py","file_name":"UI_FifthTab.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"1104513","text":"import math\r\na0=-1\r\nt=-1\r\nw10=float(input(\"Enter weight first network\"))\r\nb10=float(input(\"Enter base first network:\"))\r\nw20=float(input(\"Enter weight second network:\"))\r\nb20=float(input(\"Enter base second network:\"))\r\nc=float(input(\"Enter learning coefficient:\"))\r\nn1=float(w10*c+b10)\r\na1=math.tanh(n1)\r\nn2=float(w20*a1+b20)\r\na2=math.tanh(float(n2))\r\ne=t-a2\r\ns2=-2*(1-a2*a2)*e\r\ns1=(1-a1*a1)*w20*s2 \r\n \r\nw21=w20-(c*s2*a1)\r\nw11=w10-(c*s1*a0)\r\nb21=b20-(c*s2)\r\nb11=b10-(c*s1)\r\nprint(\"The updated weight of first n/w w11=\",w11)\r\nprint(\"The uploaded weight of second n/w w21= \",w21)\r\nprint(\"The updated base of first n/w b10=\",b10)\r\nprint(\"The updated base of second n/w b20= \",b20)\r\n","sub_path":"4B_ERRORBACK.py","file_name":"4B_ERRORBACK.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"165587561","text":"# -*- coding: utf-8 -*-\n#----------------------------------------------------------------------------\n#\n# Copyright (C) 2017 .\n# Coded by : Mohsen Dhifallah\n#\n#----------------------------------------------------------------------------\nfrom openerp.report import report_sxw\nimport time\nfrom openerp.osv import osv\n\n\nclass report_declaration_employer(report_sxw.rml_parse):\n \n def __init__(self, cr, uid, name, context):\n super(report_declaration_employer, self).__init__(cr, uid, name, context=context)\n self.localcontext.update({\n 'time': time,\n 'get_declaration_employer':self._get_declaration_employer\n # 'get_foprolos':self._get_foprolos,\n })\n\n# def _get_declaration_employer(self,fiscalyear_id) :\n# bulletin_obj = self.pool.get('hr.payroll.bulletin')\n# bulletin_ids = bulletin_obj.search(self.cr, self.uid,\n# [('month_id.period_id.fiscalyear_id','=',fiscalyear_id)],\n# \n# )\n# salaire_brute_imposable=0.0\n# total_salaire_brute_imposable=0.0\n# \n# for line in bulletin_obj.browse(self.cr,self.uid,bulletin_ids) :\n# employee_id = line.employee_id.name\n# salaire_brute_imposable += line.salaire_brute_imposable\n# total_salaire_brute_imposable += line.salaire_brute_imposable\n# \n# return { 'employee_id' : employee_id,\n# 'salaire_brute_imposable' : salaire_brute_imposable,\n# 'total_salaire_brute_imposable' : salaire_brute_imposable,\n# \n# }\n \n def _get_total_salaire_imposable(self,fiscalyear_id,context=None):\n salaire_obj=self.pool.get('hr.payroll.bulletin')\n search_ids=salaire_obj.search(self.cr,self.uid,[('month_id.period_id.fiscalyear_id','=',fiscalyear_id)],context)\n total=0.0\n for line in salaire_obj.browse(self.cr,self.uid,search_ids) :\n total += line.salaire_brute_imposable\n return total \n \n \n def _get_declaration_employer(self,fiscalyear_id,context=None):\n salaire_obj=self.pool.get('hr.payroll.bulletin')\n search_ids=salaire_obj.search(self.cr,self.uid,[('month_id.period_id.fiscalyear_id','=',fiscalyear_id)],context)\n employees={}\n for sal in salaire_obj.browse(self.cr,self.uid,search_ids) :\n if sal.employee_id.id in employees:\n employees[sal.employee_id.id]['salaire_brute_imposable'] += sal.salaire_brute_imposable\n employees[sal.employee_id.id]['irpp'] += sal.igr\n else :\n val={'employee_id':sal.employee_id.name,'salaire_brute_imposable':sal.salaire_brute_imposable,'irpp':sal.igr}\n employees[sal.employee_id.id] = val\n \n result=[]\n for key, val in employees.items():\n result.append(val) \n \n \n return result\n \n# def _get_foprolos(self,month_id,context=None):\n# payement_obj=self.pool.get('hr.payroll.bulletin')\n# search_ids=payement_obj.search(self.cr,self.uid,[('month_id', '=', month_id[0])],context)\n# total=0.0\n# for paymnt in payement_obj.browse(self.cr,self.uid,search_ids) :\n# total += paymnt.salaire_brute/100\n# return total\n\n \n\n \n\nclass declaration_employer_report(osv.AbstractModel):\n _name = 'report.devplus_hr_payroll_tn.declaration_employer_report'\n _inherit = 'report.abstract_report'\n _template = 'devplus_hr_payroll_tn.declaration_employer_report'\n _wrapped_report_class = report_declaration_employer\n","sub_path":"dev/dev8files/devplus_hr_payroll_tn/report/report_declaration_employer.py","file_name":"report_declaration_employer.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"436037060","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/rcmp/__init__.py\n# Compiled at: 2013-09-24 13:27:40\n\"\"\"\n##############\n:py:mod:`RCMP`\n##############\n\n:py:mod:`Rcmp` is a more flexible replacement for :py:mod:`filecmp`\nfrom the standard `Python `_ library.\n\nThe basic idea here is that depending on content, files don't always\nhave to be *entirely* bitwise identical in order to be equivalent or\n\"close enough\" for many purposes like comparing the results of two\nbuilds. For example, some (broken) file formats embed a time stamp\nindicating the time when a file was produced even though the file\nsystem already tracks this information. Build the same file twice and\nthe two copies will initially appear to be different due to the\nembedded time stamp. Only when the irrelevant embedded time stamp\ndifferences are ignored do the two files show out to otherwise be the\nsame.\n\n:py:mod:`Rcmp` includes a flexible extension structure to allow for precisely\nthese sorts of living and evolving comparisons.\n\nExtended Path Names\n===================\n\n:py:mod:`Rcmp` is capable of recursively descending into a number\nof different file types including:\n\n* file system directories\n* archival and aggregating types including:\n\n * `ar `_\n * `cpio `_\n * `tar `_\n\n* compressed files including:\n\n * `zip `_\n * `gzip `_\n\nIn order to describe file locations which may extend beyond the\ntraditional file system paths, :py:mod:`rcmp` introduces an extended\npath naming scheme. Traditional paths are described using the\ntraditional slash separated list of names, :file:`/etc/hosts`. And\ncomponents which are included in other files, like a file located\n*within* a `tar `_\narchive, are described using a sequence of brace encapsulated file\nformat separaters. So, for instance, a file named :file:`foo` located\nwithin a gzip compressed, (:file:`.gz`), tar archive named\n:file:`tarchive.tar` would be described as\n:file:`tarchive.tar.gz{{gzip}}tarchive.tar{{tar}}foo`. And these can\nbe combined as with\n:file:`/home/rich/tarchive.tar.gz{{gzip}}tarchive.tar{{tar}}foo`.\n\nScript Usage\n============\n\n:py:mod:`Rcmp` is both a library and a command line script for driving\nthe library.\n\nClass Architecture\n==================\n\n.. autoclass:: Item\n :members:\n\n.. autoclass:: Items\n :members:\n\n.. autoclass:: Same\n :members:\n\n.. autoclass:: Different\n :members:\n\n.. autoclass:: Comparator\n :members:\n\n.. autoclass:: Box\n :members:\n\n.. autoclass:: Comparison\n :members:\n\n.. autoclass:: ComparisonList\n :members:\n\nComparators\n===========\n\n..fixme:: comparators should probably be zero instance strategies.\n\nListed in default order of application:\n\n.. autoclass:: NoSuchFileComparator\n.. autoclass:: InodeComparator\n.. autoclass:: EmptyFileComparator\n.. autoclass:: DirComparator\n.. autoclass:: ArMemberMetadataComparator\n.. autoclass:: BitwiseComparator\n.. autoclass:: SymlinkComparator\n\n.. autoclass:: BuriedPathComparator\n\n.. autoclass:: ElfComparator\n.. autoclass:: ArComparator\n.. autoclass:: AMComparator\n.. autoclass:: ConfigLogComparator\n.. autoclass:: KernelConfComparator\n.. autoclass:: ZipComparator\n.. autoclass:: TarComparator\n.. autoclass:: GzipComparator\n.. autoclass:: Bz2Comparator\n.. autoclass:: CpioMemberMetadataComparator\n.. autoclass:: CpioComparator\n.. autoclass:: DateBlotBitwiseComparator\n.. autoclass:: FailComparator\n\nUtilities\n=========\n\n.. autofunction:: date_blot\n.. autofunction:: ignoring\n\nExceptions\n==========\n\n.. autoexception:: RcmpException\n.. autoexception:: IndeterminateResult\n\nLogging strategy:\n=================\n\nRcmp uses the python standard logging facility. The only non-obvious\nbits are that definitive differences are logged at WARNING level.\nDefinitive Sames are logged at WARNING - 1. And indefinite results\nare logged at WARNING - 2. This allows for linearly increasing\nvolumes of logging info starting with the information that is usually\nmore important first.\n\n.. Note:: I keep thinking that it would be better to create an\n IgnoringComparator that simply returned Same. It would make much\n of the code much simpler. However, it would mean that we'd build\n entire trees in some cases and compare them all just to produce\n constants. This way we clip the tree.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n__docformat__ = b'restructuredtext en'\n__all__ = [\n b'Item',\n b'Items',\n b'Same',\n b'Different',\n b'Comparator',\n b'Box',\n b'Comparison',\n b'ComparisonList',\n b'rootItem',\n b'ignoring',\n b'date_blotNoSuchFileComparator',\n b'InodeComparator',\n b'EmptyFileComparator',\n b'DirComparator',\n b'ArMemberMetadataComparator',\n b'BitwiseComparator',\n b'SymlinkComparator',\n b'BuriedPathComparator',\n b'ElfComparator',\n b'ArComparator',\n b'AMComparator',\n b'ConfigLogComparator',\n b'KernelConfComparator',\n b'ZipComparator',\n b'TarComparator',\n b'GzipComparator',\n b'Bz2Comparator',\n b'CpioMemberMetadataComparator',\n b'CpioComparator',\n b'DateBlotBitwiseComparator',\n b'FailComparator']\nlzma = False\nimport abc\nif lzma:\n import backports.lzma as lzma\nimport bz2file as bz2, contextlib, difflib, errno, fnmatch, gzip, io, logging, mmap, operator, os, re, stat, subprocess, sys, tarfile, tempfile, zipfile, elffile, arpy, cpiofile\nDIFFERENCES = logging.WARNING\nSAMES = logging.WARNING - 1\nINDETERMINATES = logging.WARNING - 2\nlogging.addLevelName(DIFFERENCES, b'differences')\nlogging.addLevelName(SAMES, b'sames')\nlogging.addLevelName(INDETERMINATES, b'indeterminates')\nlogger = logging.getLogger(__name__)\nimport pprint\npp = pprint.PrettyPrinter()\n\ndef _loggable(cls):\n cls.logger = logging.getLogger((b'{}.{}').format(__name__, cls.__name__))\n return cls\n\n\n_read_count = 3\n\n@_loggable\nclass Item(object):\n \"\"\"\n Things which can be compared are represented internally by\n instances of class :py:class:`Item`. These can be items in the\n file system, like a file or directory, or in an archive, like an\n archive member.\n\n This is used for caching the results from calls like stat and for\n holding content.\n\n :param name: file system name\n :type name: string\n \"\"\"\n\n def __init__(self, name, parent, box=None):\n assert parent\n self._name = name\n self._statbuf = False\n self._fd = False\n self._content = False\n self._link = False\n self._size = None\n self._read_count = 0\n self._native = False\n self.parent = parent\n self._box = box if box else DirComparator\n self.logger.log(logging.DEBUG, b'Item(name = %s, parent = %s, box = %s)', name, parent.name if hasattr(parent, b'name') else b'None', self._box.__name__)\n return\n\n @property\n def box(self):\n return self._box\n\n @box.setter\n def box(self, value):\n \"\"\"setter\"\"\"\n assert value\n self._box = value\n\n @property\n def name(self):\n \"\"\"\n name in the extended file system name space of this :py:class:`Item`.\n\n :rtype: string\n \"\"\"\n return self._name\n\n @property\n def shortname(self):\n return self.box.member_shortname(self)\n\n @property\n def content(self):\n \"\"\"\n The contents of the entire file, in memory.\n\n :rtype: bytearray.\n \"\"\"\n global _read_count\n try:\n if self._content is False:\n self._content = self.parent.box.member_content(self)\n self._read_count += 1\n if self._read_count > _read_count:\n _read_count = self._read_count\n except TypeError:\n self.logger.log(logging.ERROR, b'self = %s, %s', self, self.name)\n self.logger.log(logging.ERROR, b'self.parent = %s, %s', self.parent, self.parent.name)\n self.logger.log(logging.ERROR, b'self.box = %s', self.box)\n self.logger.log(logging.ERROR, b'self.box.member_content = %s', self.box.member_content)\n self.logger.log(logging.ERROR, b'self.parent.box = %s, %s', self.parent.box, self.parent.box.__name__)\n self.logger.log(logging.ERROR, b'self.parent.box.member_content = %s', self.parent.box.member_content)\n raise\n\n return self._content\n\n def reset(self):\n self.logger.log(logging.DEBUG, b'resetting %s', self.name)\n self._content = False\n\n @property\n def stat(self):\n \"\"\"\n If we have a statbuf, return it.\n\n If not, then look one up, cache it, and return it.\n\n :rtype: statbuf\n \"\"\"\n if not self._statbuf:\n self._statbuf = self.parent.box.member_stat(self)\n return self._statbuf\n\n @property\n def exists(self):\n \"\"\"\n Check for existence.\n\n :rtype: boolean\n \"\"\"\n try:\n return self.parent.box.member_exists(self)\n except:\n self.logger.log(logging.DEBUG, b'self = %s, self.box = %s', self, self.box)\n raise\n\n @property\n def inode(self):\n \"\"\"\n Return the inode number from stat.\n\n :rtype: string\n \"\"\"\n return self.box.member_inode(self)\n\n @property\n def device(self):\n \"\"\"\n Return device number from stat.\n\n :rtype: string\n \"\"\"\n return self.box.member_device(self)\n\n @property\n def size(self):\n \"\"\"\n Return our size. Look it up in stat, (and cache the result), if\n we don't already know what it is.\n\n :rtype: int\n \"\"\"\n if self._size is None:\n self._size = self.parent.box.member_size(self)\n return self._size\n\n @property\n def isdir(self):\n \"\"\"\n Return True if and only if we are represent a file system\n directory.\n\n :rtype: boolean\n \"\"\"\n try:\n return self.parent.box.member_isdir(self)\n except:\n self.logger.log(logging.DEBUG, b'isdir self = %s, self.box = %s', self.name, self.box)\n raise\n\n @property\n def isreg(self):\n \"\"\"\n Return True if and only if we represent a regular file.\n\n :rtype: boolean\n \"\"\"\n return self.parent.box.member_isreg(self)\n\n @property\n def islnk(self):\n \"\"\"\n Return True if and only if we represent a symbolic link.\n\n :rtype: boolean\n \"\"\"\n return self.parent.box.member_islnk(self)\n\n @property\n def link(self):\n \"\"\"\n Return a string representing the path to which the symbolic link\n points. This presumes that we are a symbolic link.\n\n :rtype: string\n \"\"\"\n if not self._link:\n self._link = self.parent.box.member_link(self)\n return self._link\n\n\nclass Items(object):\n \"\"\"\n There is a global set of all instances of class :py:class:`Item`\n stored in the singular class :py:class:`Items`.\n\n This exists primarily to prevent us from creating a duplicate\n :py:class:`Item` for the same path name.\n\n .. note:: The class is used directly here as a global aggregator,\n a singleton. It is never instantiated but instead the class\n itself is used as a singleton.\n \"\"\"\n _content = {}\n\n @classmethod\n def find_or_create(cls, name, parent, box=None):\n \"\"\"\n Look up an :py:class:`Item` with *name*. If necessary, create it.\n\n :param name: the name of the :py:class`Item` to look up\n :type name: string\n :rtype: :py:class:`Item`\n \"\"\"\n if not box:\n box = DirComparator\n if name in cls._content:\n return cls._content[name]\n else:\n x = Item(name, parent, box)\n cls._content[name] = x\n return x\n\n @classmethod\n def delete(cls, name):\n \"\"\"\n Delete an :py:class:`Item` from the set.\n\n :param name: name of the :py:class:`Item` to be deleted.\n :type name: string\n \"\"\"\n del cls._content[name]\n\n @classmethod\n def reset(cls):\n cls._content = {}\n\n\nclass Same(object):\n \"\"\"\n Returned to indicate an authoritative claim of sufficient\n identicality. No further comparators need be tried.\n\n .. note:: The class itself is used as a constant. It is never\n instantiated.\n \"\"\"\n pass\n\n\nclass Different(object):\n \"\"\"\n Returned to indicate an authoritative claim of difference. No\n further comparators need be tried.\n\n .. note:: The class itself is used as a constant. It is never\n instantiated.\n \"\"\"\n pass\n\n\n@_loggable\nclass Comparator(object):\n \"\"\"\n Represents a single comparison heuristic. This is an abstract\n class. It is intended solely to act as a base class for\n subclasses. It is never instantiated. (lie - fixme).\n\n Subclasses based on :py:class:`Comparator` implement individual\n heuristics for comparing items when applied to a\n :py:class:`Comparison`. There are many :py:class:`Comparator`\n subclasses included.\n\n ..note:: :py:class:`Comparator`s are strategies. That is, there are no\n instantiation variables nor properties.\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n @staticmethod\n @abc.abstractmethod\n def _applies(thing):\n return False\n\n @classmethod\n def applies(cls, comparison):\n \"\"\"\n Return True if and only if we apply to the given comparison.\n\n :type comparison: :py:class:`Comparison`\n :rtype: boolean\n \"\"\"\n return reduce(operator.iand, [ cls._applies(i) for i in comparison.pair ])\n\n @classmethod\n @abc.abstractmethod\n def cmp(cls, comparison):\n \"\"\"\n Apply ourselves to the given :py:class:`Comparison`.\n\n If can make an authoritative determination about whether the\n :py:class:`Items` are alike then return either\n :py:class:`Same` or :py:class:`Different`. If we can make no\n such determination, then return a non-True value.\n\n :type comparison: :py:class:`Comparison`\n :rtype: :py:class:`Same`, :py:class:`Different`, or a non-True value\n \"\"\"\n cls.logger.error(b\"%s.cmp() isn't overridden.\", cls.__name__)\n raise NotImplementedError\n return False\n\n @classmethod\n def _log_item(cls, item):\n if item.exists and item.islnk:\n return (item.name, item.link)\n else:\n return item.name\n\n @classmethod\n def _log_string(cls, s, comparison):\n return (b'{0} {1} {2}').format(s, cls.__name__, comparison.pair[0].name.partition(os.sep)[2])\n\n @classmethod\n def _log_unidiffs(cls, content, names):\n try:\n cls.logger.log(DIFFERENCES, (b'\\n').join(difflib.unified_diff(content[0].split(b'\\n'), content[1].split(b'\\n'), names[0], names[1], b'', b'', 3, b'')))\n except UnicodeError:\n pass\n\n @classmethod\n def _log_unidiffs_comparison(cls, comparison):\n cls._log_unidiffs([ i.content for i in comparison.pair ], [ i.name for i in comparison.pair ])\n\n @classmethod\n def _log_different(cls, comparison):\n cls.logger.log(DIFFERENCES, cls._log_string(b'Different', comparison))\n\n @classmethod\n def _log_same(cls, comparison):\n cls.logger.log(SAMES, cls._log_string(b'Same', comparison))\n\n @classmethod\n def _log_indeterminate(cls, comparison):\n cls.logger.log(INDETERMINATES, cls._log_string(b'Indeterminate', comparison))\n\n\nclass DatePattern(object):\n\n def __init__(self, pattern, replacement):\n self.pattern = pattern\n self.replacement = replacement\n self.compiled = re.compile(pattern)\n\n\ndow = b'(Sun|Mon|Tue|Wed|Thu|Fri|Sat)'\nmoy = b'(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)'\nlmoy = b'(January|February|March|April|May|June|July|August|September|October|November|December)'\ndate_patterns = [\n DatePattern(dow + b' ' + moy + b' *[0-9]{1,2} [0-9]{2}:[0-9]{2}:[0-9]{2} (PST|PDT) [0-9]{4}', b'Day Mon 00 00:00:00 LOC 2011'),\n DatePattern(dow + b' ' + moy + b' *[0-9]{1,2} [0-9]{2}:[0-9]{2}:[0-9]{2} [0-9]{4}', b'Day Mon 00 00:00:00 2011'),\n DatePattern(b'(?i) *[0-9]{1,2} (JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC) [0-9]{4} [0-9]{2}:[0-9]{2}', b'00 MON 2011 00:00'),\n DatePattern(lmoy + b' *[0-9]{1,2}\\\\\\\\?, [0-9]{4}', b'Month 00, 2011'),\n DatePattern(dow + b' ' + moy + b' *[0-9]{1,2} *[0-9]{4}', b'Day Mon 00 2011'),\n DatePattern(dow + b' *[0-9]{1,2} *' + moy + b' *[0-9]{4}', b'Day 00 Mon 2011'),\n DatePattern(dow + b' *[0-9]{1,2} *' + lmoy + b' *[0-9]{4}', b'Day 00 Month 2011'),\n DatePattern(b'20*[0-9]{2}-*[0-9]{2}-*[0-9]{2}', b'2011-00-00'),\n DatePattern(moy + b' [0-9]{4}', b'Mon 2011'),\n DatePattern(b'[0-9]{2}:[0-9]{2}:[0-9]{2}', b'00:00:00'),\n DatePattern(b'[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{6}Z', b'00000000T000000Z')]\n\ndef date_blot(input_string):\n \"\"\"Convert dates embedded in a string into innocuous constants of uniform length.\n \n :param input_string: input string\n :rtype: string\n \"\"\"\n retval = input_string\n for pat in date_patterns:\n try:\n retval = pat.compiled.sub(pat.replacement, retval)\n except UnicodeError:\n pass\n\n return retval\n\n\ndef fntore(names):\n \"\"\"\n Convert a list of wildcard style patterns into a list of compiled regexps.\n \"\"\"\n return [ re.compile(fnmatch.translate(n)) for n in names ]\n\n\ndef fntoreconcat(names):\n \"\"\"\n Convert a list of wildcard style patterns into a list of compiled regexps.\n \"\"\"\n return [\n re.compile((b'|').join([ fnmatch.translate(n) for n in names ]))]\n\n\ndef ignoring(ignores, fname):\n \"\"\"\n Given a list of file names to be ignored and a specific file name\n to check, return the first ignore pattern from the list that\n matches the file name.\n\n :param ignores: ignore patterns\n :type ignores: list of strings\n :param fname: file name to check\n :type fname: string\n :rtype: string or False (Can be used as a predicate.)\n \"\"\"\n for ignore in ignores:\n try:\n if ignore.match(fname):\n return ignore\n except AttributeError:\n logger.log(logging.ERROR, b'ignore = %s', ignore)\n raise\n\n return False\n\n\n@_loggable\nclass InodeComparator(Comparator):\n \"\"\"\n Objects with the same inode and device are identical.\n \"\"\"\n\n @classmethod\n def _applies(cls, item):\n return item.box is DirComparator\n\n @classmethod\n def cmp(cls, comparison):\n if reduce(operator.eq, [ i.inode for i in comparison.pair ]) and reduce(operator.eq, [ i.device for i in comparison.pair ]):\n cls._log_same(comparison)\n return Same\n else:\n cls._log_indeterminate(comparison)\n return False\n\n\n@_loggable\nclass EmptyFileComparator(Comparator):\n \"\"\"\n Two files which are each empty are equal. In particular, we don't\n need to open them or read them to make this determination.\n \"\"\"\n\n @classmethod\n def _applies(cls, item):\n return item.isreg\n\n @classmethod\n def cmp(cls, comparison):\n if comparison.pair[0].size == 0 and comparison.pair[1].size == 0:\n cls._log_same(comparison)\n return Same\n else:\n cls._log_indeterminate(comparison)\n return False\n\n\nclass RcmpException(Exception):\n \"\"\"Base class for all :py:mod:`rcmp` exceptions\"\"\"\n pass\n\n\nclass IndeterminateResult(RcmpException):\n \"\"\"\n Raised when we can't make any authoritative determination. At the\n top level, this is an error condition as this case indicates that\n we've failed to accomplish our job. Note that this is\n significantly different from the non-True value returned by\n :py:class:`Comparator` subclasses to indicate that they have no\n authoritative result.\n \"\"\"\n pass\n\n\nclass BadZipfile(RcmpException):\n \"\"\"Raised when we fail to open a zip archive\"\"\"\n pass\n\n\nclass _Packer(object):\n \"\"\"\n just for aggregation, not intended for instantiation.\n \"\"\"\n\n def __init__(self, joiner=b'/'):\n self.joiner = joiner\n\n def join(self, left, right):\n return (b'{0}{1}{2}').format(left, self.joiner, right)\n\n def split(self, path):\n return path.split(self.joiner)\n\n\nclass Box(Comparator):\n \"\"\"\n This is an abstract base class intended for comparators on things\n which are composed of other things. So, for instance, a\n directory, or a file archive.\n\n ..note:: subclasses are strategies - they have no properties.\n \"\"\"\n __metaclass__ = abc.ABCMeta\n _packer = None\n\n @classmethod\n def member_shortname(cls, member):\n return cls._packer.split(member.name)[(-1)]\n\n @staticmethod\n @abc.abstractmethod\n def _applies(thing):\n raise NotImplementedError\n\n @classmethod\n @abc.abstractmethod\n def box_keys(cls, item):\n raise NotImplementedError\n\n @classmethod\n def _no_mate(cls, name, logger):\n cls.logger.log(DIFFERENCES, b'Different %s No mate: %s', cls.__name__, name)\n\n @classmethod\n def _expand(cls, ignoring, item):\n for shortname in cls.box_keys(item):\n fullname = cls._packer.join(item.name, shortname)\n ignore = ignoring(fullname)\n if ignore:\n cls.logger.log(SAMES, b'Ignoring %s cause %s', fullname, ignore)\n continue\n newitem = Items.find_or_create(fullname, item, cls)\n cls.logger.log(logging.DEBUG, b'%s expands %s -> %s', cls.__name__, item.name, shortname)\n yield (shortname, newitem)\n\n @staticmethod\n def _mates(item, container):\n return item.shortname in container.box.box_keys(container)\n\n @classmethod\n def _outer_join(cls, comparison, invert=False, spool=True):\n result = False\n if invert:\n rparent, lparent = [ p for p in comparison.pair ]\n else:\n lparent, rparent = [ p for p in comparison.pair ]\n for shortname, litem in cls._expand(comparison.ignoring, lparent):\n rname = cls._packer.join(rparent.name, shortname)\n ignore = comparison.ignoring(litem.name)\n if ignore:\n cls.logger.log(SAMES, b'Ignoring %s cause %s', lname, ignore)\n continue\n ritem = Items.find_or_create(rname, rparent, cls)\n if cls._mates(litem, rparent):\n if spool:\n cls.logger.log(logging.DEBUG, b'spooling %s', litem.name)\n comparison.children.append(Comparison(litem=litem, ritem=ritem, comparators=comparison.comparators, ignores=comparison.ignores, exit_asap=comparison.exit_asap))\n else:\n cls._no_mate(litem.name, logger)\n result = Different\n\n return result\n\n @classmethod\n def _left_outer_join(cls, comparison):\n return cls._outer_join(comparison)\n\n @classmethod\n def _right_outer_join(cls, comparison):\n return cls._outer_join(comparison, invert=True, spool=False)\n\n @classmethod\n def _inner_join(cls, comparison):\n retval = Same\n for c in comparison.children:\n r = c.cmp()\n if not r:\n cls._log_indeterminate(comparison)\n raise IndeterminateResult\n if r == Different:\n cls._log_different(comparison)\n retval = Different\n if comparison.exit_asap:\n return retval\n\n return retval\n\n @classmethod\n def cmp(cls, comparison):\n \"\"\"\n Compare our lists and return the result.\n \"\"\"\n cls.logger.log(logging.DEBUG, b'Box.cmp(%s, ...', cls.__name__)\n retval = Same\n comparison.pair[0].box = comparison.pair[1].box = cls\n if cls._left_outer_join(comparison) == Different or cls._right_outer_join(comparison) == Different:\n retval = Different\n if comparison.exit_asap:\n comparison.reset()\n return retval\n if cls._inner_join(comparison) == Different:\n retval = Different\n if comparison.exit_asap:\n comparison.reset()\n return retval\n if retval == Same:\n cls._log_same(comparison)\n comparison.reset()\n return retval\n\n @staticmethod\n @abc.abstractmethod\n def member_content(member):\n raise NotImplementedError\n\n @staticmethod\n def member_stat(member):\n \"\"\"\n If member has a statbuf, return it.\n\n If not, then look one up, cache it, and return it.\n\n :rtype: statbuf\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def member_exists(member):\n \"\"\"\n Check for existence.\n\n :rtype: boolean\n \"\"\"\n Box.logger.log(logging.DEBUG, b'member_exists: member = %s, parent.box(%s) -> %s', member.name, member.parent.name, member.parent.box.box_keys(member.parent))\n return member.shortname in member.parent.box.box_keys(member.parent)\n\n @staticmethod\n def member_inode(member):\n \"\"\"\n Return the inode number from stat.\n\n :rtype: string\n \"\"\"\n Box.logger.log(logging.ERROR, b'member_inode not implemented for %s', member.name)\n raise NotImplementedError\n\n @staticmethod\n def member_device(member):\n \"\"\"\n Return device number from stat.\n\n :rtype: string\n \"\"\"\n Box.logger.log(logging.ERROR, b'member_device not implemented for %s', member.name)\n raise NotImplementedError\n\n @staticmethod\n def member_size(member):\n \"\"\"\n Return our size.\n\n :rtype: int\n \"\"\"\n Box.logger.log(logging.ERROR, b'member_size not implemented for %s', member.name)\n raise NotImplementedError\n\n @staticmethod\n def member_isdir(member):\n \"\"\"\n Return True if and only if we are represent a directory.\n\n So far, none of the archive formats recur. That is, they're\n all flat collections of files rather than being collections of\n collections necessarily. Although such can be created, they\n aren't an inherent part of the file format.\n\n :rtype: boolean\n \"\"\"\n Box.logger.log(logging.ERROR, b'member_isdir not implemented for %s', member.name)\n raise NotImplementedError\n\n @staticmethod\n def member_isreg(member):\n \"\"\"\n Return True if and only if member represents a regular file.\n\n :rtype: boolean\n \"\"\"\n Box.logger.log(logging.ERROR, b'member_isreg not implemented for %s', member.name)\n raise NotImplementedError\n\n @staticmethod\n def member_islnk(member):\n \"\"\"\n Return True if and only if we represent a symbolic link.\n\n :rtype: boolean\n \"\"\"\n Box.logger.log(logging.ERROR, b'member_islnk not implemented for %s', member.name)\n raise NotImplementedError\n\n @staticmethod\n def member_link(member):\n \"\"\"\n Return a string representing the path to which the symbolic link\n points. This presumes that we are a symbolic link.\n\n :rtype: string\n \"\"\"\n Box.logger.log(logging.ERROR, b'member_link not implemented for %s', member.name)\n raise NotImplementedError\n\n\nclass ContentOnlyBox(Box):\n \"\"\"\n Some containers like zip and gzip only have members with actual\n content. That is, no symlinks, no devices, etc.\n \"\"\"\n\n @staticmethod\n def member_isreg(member):\n return True\n\n @staticmethod\n def member_isdir(member):\n return False\n\n @staticmethod\n def member_islnk(member):\n return False\n\n\nclass UnixBox(Box):\n \"\"\"\n Archivers like tar and cpio are capable of tracking hard and soft\n links as well as devices, directories and ownerships, etc.\n \"\"\"\n\n @staticmethod\n def member_isdir(member):\n return False\n\n\n@_loggable\nclass DirComparator(Box):\n \"\"\"\n Objects which are directories are special. They match if their\n contents match.\n\n .. fixme: this could be a box too.\n \"\"\"\n _packer = _Packer(b'/')\n\n @staticmethod\n def _applies(item):\n return item.isdir\n\n @classmethod\n def box_keys(cls, item):\n if not hasattr(item, b'dirs'):\n item.dirs = os.listdir(item.name)\n return item.dirs\n\n @staticmethod\n @contextlib.contextmanager\n def member_mmap(member):\n with open(member.name, b'rb') as (fd):\n yield mmap.mmap(fd.fileno(), 0, mmap.MAP_SHARED, mmap.PROT_READ)\n fd.close()\n\n @staticmethod\n def member_content(member):\n with open(member.name, b'rb') as (fd):\n return fd.read()\n\n @staticmethod\n def member_exists(member):\n \"\"\"\n Check for existence.\n\n :rtype: boolean\n \"\"\"\n return os.path.exists(member.name)\n\n @staticmethod\n def member_stat(member):\n \"\"\"\n :rtype: statbuf\n \"\"\"\n return os.lstat(member.name)\n\n @staticmethod\n def member_inode(member):\n \"\"\"\n Return the inode number from stat.\n\n :rtype: string\n \"\"\"\n return member.stat.st_ino\n\n @staticmethod\n def member_device(member):\n \"\"\"\n Return device number from stat.\n\n :rtype: string\n \"\"\"\n return member.stat.st_dev\n\n @staticmethod\n def member_size(member):\n \"\"\"\n Return our size.\n\n :rtype: int\n \"\"\"\n return member.stat.st_size\n\n @staticmethod\n def member_isdir(member):\n \"\"\"\n Return True if and only if we are represent a file system\n directory.\n\n :rtype: boolean\n \"\"\"\n return stat.S_ISDIR(member.stat.st_mode)\n\n @staticmethod\n def member_isreg(member):\n \"\"\"\n Return True if and only if we represent a regular file.\n\n :rtype: boolean\n \"\"\"\n return stat.S_ISREG(member.stat.st_mode)\n\n @staticmethod\n def member_islnk(member):\n \"\"\"\n Return True if and only if we represent a symbolic link.\n\n :rtype: boolean\n \"\"\"\n return stat.S_ISLNK(member.stat.st_mode)\n\n @staticmethod\n def member_link(member):\n \"\"\"\n Return a string representing the path to which the symbolic link\n points. This presumes that we are a symbolic link.\n\n :rtype: string\n \"\"\"\n return os.readlink(member.name)\n\n\n@_loggable\nclass BitwiseComparator(Comparator):\n \"\"\"\n Objects which are bitwise identical are close enough.\n \"\"\"\n\n @staticmethod\n def _applies(item):\n BitwiseComparator.logger.log(logging.DEBUG, b'testing whether BitwiseComparator applies to %s', item.name)\n return item.isreg\n\n @classmethod\n def cmp(cls, comparison):\n if not reduce(operator.eq, [ i.size for i in comparison.pair ]):\n cls._log_indeterminate(comparison)\n return False\n if reduce(operator.eq, [ bool(i._content) for i in comparison.pair ] + [True]) or comparison.pair[0].parent.box != DirComparator:\n if comparison.pair[0].content == comparison.pair[1].content:\n comparison.reset()\n cls._log_same(comparison)\n return Same\n else:\n cls._log_indeterminate(comparison)\n return False\n\n with contextlib.nested(DirComparator.member_mmap(comparison.pair[0]), DirComparator.member_mmap(comparison.pair[1])) as (m1, m2):\n if m1 == m2:\n cls._log_same(comparison)\n return Same\n comparison.pair[0]._content = m1[:]\n comparison.pair[1]._content = m2[:]\n cls._log_indeterminate(comparison)\n retval = False\n return retval\n\n\n@_loggable\nclass DateBlotBitwiseComparator(Comparator):\n \"\"\"\n Objects which are bitwise identical after date blotting are close\n enough. But this should only be tried late.\n \"\"\"\n\n @staticmethod\n def _applies(item):\n return item.isreg\n\n @classmethod\n def cmp(cls, comparison):\n if reduce(operator.eq, [ date_blot(i.content) for i in comparison.pair ]):\n cls._log_same(comparison)\n retval = Same\n else:\n cls._log_indeterminate(comparison)\n retval = False\n return retval\n\n\n@_loggable\nclass NoSuchFileComparator(Comparator):\n \"\"\"\n Objects are different if either one is missing.\n \"\"\"\n\n @staticmethod\n def _applies(item):\n return True\n\n @classmethod\n def cmp(cls, comparison):\n e = [ i.exists for i in comparison.pair ]\n if reduce(operator.ne, e):\n cls._log_different(comparison)\n return Different\n if e[0] is False:\n cls._log_same(comparison)\n return Same\n cls._log_indeterminate(comparison)\n return False\n\n\n@_loggable\nclass ElfComparator(Comparator):\n \"\"\"\n Elf files are different if any of the important sections are\n different.\n \"\"\"\n _magic = b'\\x7fELF'\n\n @staticmethod\n def _applies(item):\n return item.content.find(ElfComparator._magic, 0, len(ElfComparator._magic)) == 0\n\n @classmethod\n def cmp(cls, comparison):\n e = [ i.content.find(cls._magic, 0, len(cls._magic)) == 0 for i in comparison.pair ]\n if not reduce(operator.iand, e):\n cls._log_different(comparison)\n return Different\n else:\n e = [ elffile.open(name=i.name, block=i.content) for i in comparison.pair ]\n if e[0].close_enough(e[1]):\n cls._log_same(comparison)\n return Same\n cls._log_different(comparison)\n with tempfile.NamedTemporaryFile(delete=False) as (left):\n leftname = left.name\n left.write(comparison.pair[0].content)\n lcontent = subprocess.check_output(str((b'objdump -sfh {}').format(leftname)).split())\n os.remove(leftname)\n with tempfile.NamedTemporaryFile(delete=False) as (right):\n rightname = right.name\n right.write(comparison.pair[1].content)\n rcontent = subprocess.check_output(str((b'objdump -sfh {}').format(rightname)).split())\n os.remove(rightname)\n cls._log_unidiffs([lcontent, rcontent], [ i.name for i in comparison.pair ])\n return Different\n\n\n@_loggable\nclass ArMemberMetadataComparator(Comparator):\n \"\"\"\n Verify the metadata of each member of an ar archive.\n \"\"\"\n\n @staticmethod\n def _applies(item):\n return item.parent is not None and item.parent.box is ArComparator\n\n @classmethod\n def cmp(cls, comparison):\n cls.logger.log(logging.DEBUG, b'cmp: pair[0] = %s', comparison.pair[0].name)\n cls.logger.log(logging.DEBUG, b'cmp: parent = %s', comparison.pair[0].parent.name)\n left, right = [ i.parent.ar.archived_files[i.shortname].header for i in comparison.pair ]\n if left.uid == right.uid and left.gid == right.gid and left.mode == right.mode:\n return False\n else:\n cls._log_different(comparison)\n return Different\n\n\n@contextlib.contextmanager\ndef openar(filename, fileobj):\n \"\"\"\n \"\"\"\n ar = arpy.Archive(filename=filename, fileobj=fileobj)\n ar.read_all_headers()\n yield ar\n ar.close()\n\n\n@_loggable\nclass ArComparator(ContentOnlyBox):\n \"\"\"\n Ar archive files are different if any of the important members are\n different.\n\n .. note:: This is a strategy - there are no instance\n properties. Rather, the content is stored in the comparison\n pairs.\n \"\"\"\n _magic = b'!\\n'\n _packer = _Packer(b'{ar}')\n\n @staticmethod\n def _applies(item):\n return item.content.find(ArComparator._magic, 0, len(ArComparator._magic)) == 0\n\n @classmethod\n def box_keys(cls, item):\n cls.logger.log(logging.DEBUG, b'%s.box_keys(%s) -> %s', cls.__name__, item.name, item.ar.archived_files.keys())\n return item.ar.archived_files.keys()\n\n @staticmethod\n def member_size(member):\n return member.parent.ar.archived_files[member.shortname].header.size\n\n @staticmethod\n def member_content(member):\n return member.parent.ar.archived_files[member.shortname].read()\n\n @classmethod\n def cmp(cls, comparison):\n with contextlib.nested(openar(comparison.pair[0].name, io.BytesIO(comparison.pair[0].content)), openar(comparison.pair[1].name, io.BytesIO(comparison.pair[1].content))) as (comparison.pair[0].ar, comparison.pair[1].ar):\n return super(cls, cls).cmp(comparison)\n\n\n@_loggable\nclass CpioMemberMetadataComparator(Comparator):\n \"\"\"\n Verify the metadata of each member of a cpio archive.\n \"\"\"\n\n @staticmethod\n def _applies(item):\n return item.parent.box is CpioComparator\n\n @classmethod\n def cmp(cls, comparison):\n left, right = [ i.parent.cpio.get_member(i.shortname) for i in comparison.pair ]\n if left.mode == right.mode and left.uid == right.uid and left.gid == right.gid and left.rdevmajor == right.rdevmajor and left.rdevminor == right.rdevminor and left.filesize == right.filesize:\n if left.filesize == 0:\n return Same\n else:\n return False\n\n else:\n cls._log_different(comparison)\n return Different\n\n\n@contextlib.contextmanager\ndef opencpio(filename, guts):\n \"\"\"\n \"\"\"\n cpio = cpiofile.CpioFile().open(name=filename, block=guts)\n yield cpio\n cpio.close()\n\n\n@_loggable\nclass CpioComparator(UnixBox):\n \"\"\"\n Cpio archive files are different if any of the important members\n are different.\n\n .. note:: This is a strategy - there are no instance\n properties. Rather, the content is stored in the comparison\n pairs.\n \"\"\"\n _packer = _Packer(b'{cpio}')\n\n @staticmethod\n def _applies(item):\n return bool(cpiofile.valid_magic(item.content))\n\n @classmethod\n def box_keys(cls, item):\n return item.cpio.names\n\n @staticmethod\n def member_size(member):\n return member.parent.cpio.get_member(member.shortname).filesize\n\n @staticmethod\n def member_content(member):\n return member.parent.cpio.get_member(member.shortname).content\n\n @staticmethod\n def member_isreg(member):\n return stat.S_ISREG(member.parent.cpio.get_member(member.shortname).mode)\n\n @staticmethod\n def member_islnk(member):\n return stat.S_ISLNK(member.parent.cpio.get_member(member.shortname).mode)\n\n @staticmethod\n def member_link(member):\n return member.content\n\n @classmethod\n def cmp(cls, comparison):\n with contextlib.nested(opencpio(comparison.pair[0].name, comparison.pair[0].content), opencpio(comparison.pair[1].name, comparison.pair[0].content)) as (comparison.pair[0].cpio, comparison.pair[1].cpio):\n return super(cls, cls).cmp(comparison)\n\n\n@_loggable\nclass TarMemberMetadataComparator(Comparator):\n \"\"\"\n Verify the metadata of each member of an ar archive.\n \"\"\"\n\n @staticmethod\n def _applies(item):\n return item.parent.box is TarComparator\n\n @classmethod\n def cmp(cls, comparison):\n left, right = [ i.parent.box.getmember(i) for i in comparison.pair ]\n if left.mode == right.mode and left.type == right.type and left.linkname == right.linkname and left.uid == right.uid and left.gid == right.gid and left.uname == right.uname and left.gname == right.gname:\n if left.size == 0:\n return Same\n else:\n return False\n\n else:\n cls._log_different(comparison)\n return Different\n\n\nimport contextlib\n\n@contextlib.contextmanager\ndef opentar(filename, mode, fileobj):\n \"\"\"\n .. todo:: remove opentar once we move to python-2.7\n \"\"\"\n tar = tarfile.open(name=filename, mode=mode, fileobj=fileobj)\n yield tar\n tar.close()\n\n\n@_loggable\nclass TarComparator(UnixBox):\n \"\"\"\n Tar archive files are different if any of the important members\n are different.\n\n .. note:: must be called *after* GzipComparator in order to duck\n the Python tarfile module's pathological performace with compressed\n archives.\n\n .. note:: This is a strategy - there are no instance\n properties. Rather, the content is stored in the comparison\n pairs.\n \"\"\"\n _packer = _Packer(b'{tar}')\n\n @staticmethod\n def _applies(item):\n try:\n tarfile.open(fileobj=io.BytesIO(item.content)).close()\n except:\n return False\n\n return True\n\n @staticmethod\n def getmember(item):\n if not hasattr(item, b'member'):\n item.member = item.parent.tar.getmember(item.shortname)\n return item.member\n\n @classmethod\n def box_keys(cls, item):\n if not hasattr(item, b'names'):\n item.names = item.tar.getnames()\n return item.names\n\n @staticmethod\n def member_size(member):\n return member.parent.box.getmember(member).size\n\n @staticmethod\n def member_content(member):\n info = member.parent.box.getmember(member)\n if not info:\n raise AssertionError\n if info.isdir() or info.isdev():\n return b''\n fileobj = member.parent.tar.extractfile(member.shortname)\n fileobj or TarComparator.logger.log(logging.ERROR, b'member_content could not find %s, (%s), in %s', member.shortname, member.name, member.parent.name)\n raise NotImplementedError\n return fileobj.read()\n\n @staticmethod\n def member_isreg(member):\n return member.parent.box.getmember(member).isreg()\n\n @staticmethod\n def member_islnk(member):\n return member.parent.box.getmember(member).issym()\n\n @staticmethod\n def member_link(member):\n return member.parent.box.getmember(member).linkname\n\n @classmethod\n def cmp(cls, comparison):\n with contextlib.nested(opentar(comparison.pair[0].name, b'r', io.BytesIO(comparison.pair[0].content)), opentar(comparison.pair[1].name, b'r', io.BytesIO(comparison.pair[1].content))) as (comparison.pair[0].tar, comparison.pair[1].tar):\n return super(cls, cls).cmp(comparison)\n\n\nimport contextlib\n\n@contextlib.contextmanager\ndef openzip(file, mode):\n \"\"\"\n .. todo:: remove openzip once we move to python-2.7\n \"\"\"\n zip = zipfile.ZipFile(file, mode)\n if zip.testzip():\n raise BadZipfile\n yield zip\n zip.close()\n\n\n@_loggable\nclass ZipComparator(ContentOnlyBox):\n \"\"\"\n Zip archive files are different if any of the members are different.\n\n .. note:: This is a strategy - there are no instance\n properties. Rather, the content is stored in the comparison\n pairs.\n \"\"\"\n _myname = b'zip'\n _packer = _Packer((b'{{}}').format(_myname))\n\n @staticmethod\n def _applies(item):\n \"\"\"\n \"\"\"\n try:\n zipfile.ZipFile(io.BytesIO(item.content), b'r').close()\n except:\n return False\n\n return True\n\n @classmethod\n def box_keys(cls, item):\n return item.zip.namelist()\n\n @staticmethod\n def member_size(member):\n return member.parent.zip.getinfo(member.shortname).file_size\n\n @staticmethod\n def member_content(member):\n return member.parent.zip.read(member.shortname)\n\n @classmethod\n def cmp(cls, comparison):\n with contextlib.nested(openzip(io.BytesIO(comparison.pair[0].content), b'r'), openzip(io.BytesIO(comparison.pair[1].content), b'r')) as (comparison.pair[0].zip, comparison.pair[1].zip):\n if comparison.pair[0].zip.comment != comparison.pair[1].zip.comment:\n cls._log_different(comparison)\n return Different\n else:\n return super(cls, cls).cmp(comparison)\n\n\n@_loggable\nclass AMComparator(Comparator):\n \"\"\"\n Automake generated Makefiles have some nondeterminisms. They're\n the same if they're the same aside from that. (May also need to\n make some allowance for different tool sets later.)\n \"\"\"\n\n @staticmethod\n def _applies(item):\n if not item.name.endswith(b'Makefile'):\n return False\n p = -1\n for i in range(5):\n p = item.content.find(b'\\n', p + 1, p + 132)\n if p is -1:\n return False\n\n return item.content.find(b'generated by automake', 0, p) > -1\n\n @classmethod\n def cmp(cls, comparison):\n left, right = [ i.content.decode(b'utf8') for i in comparison.pair ]\n left, right = [ date_blot(i) for i in [left, right] ]\n left, right = [ re.sub(b'(?m)^MODVERSION = .*$', b'MODVERSION = ...', i, 0) for i in [left, right] ]\n left, right = [ re.sub(b'(?m)^BUILDINFO = .*$', b'BUILDINFO = ...', i, 0) for i in [left, right] ]\n if left == right:\n cls._log_same(comparison)\n return Same\n else:\n cls._log_different(comparison)\n cls._log_unidiffs([left, right], [ i.name for i in comparison.pair ])\n return Different\n\n\n@_loggable\nclass ConfigLogComparator(Comparator):\n \"\"\"\n When autoconf tests fail, there's a line written to the config.log\n which exposes the name of the underlying temporary file. Since\n the name of this temporary file changes from build to build, it\n introduces a nondeterminism.\n\n .. note:: I'd ignore config.log files, (and started to do exactly\n that), but it occurs to me that differences in autoconf\n configuration are quite likely to cause build differences. So\n I've been more surgical.\n \"\"\"\n\n @staticmethod\n def _applies(item):\n if item.name.endswith(b'config.log'):\n trigger = b'generated by GNU Autoconf'\n else:\n if item.name.endswith(b'config.status'):\n trigger = b'Generated by configure.'\n elif item.name.endswith(b'config.h'):\n trigger = b'Generated from config.h.in by configure.'\n else:\n return False\n p = -1\n for i in range(8):\n p = item.content.find(b'\\n', p + 1)\n if p is -1:\n return False\n\n return item.content.find(trigger, 0, p) > -1\n\n @classmethod\n def cmp(cls, comparison):\n left, right = [ i.content for i in comparison.pair ]\n left, right = [ re.sub(b'(?m)/cc.{6}\\\\.([os])', b'/cc------.\\x01', i, 0) for i in [\n left, right]\n ]\n left, right = [ re.sub(b'(?m)MODVERSION.*$', b'MODVERSION...', i, 0) for i in [\n left, right]\n ]\n left, right = [ date_blot(i) for i in [left, right] ]\n if left == right:\n cls._log_same(comparison)\n return Same\n else:\n cls._log_different(comparison)\n cls._log_unidiffs([left, right], [ i.name for i in comparison.pair ])\n return Different\n\n\n@_loggable\nclass KernelConfComparator(Comparator):\n \"\"\"\n When \"make config\" is run in the kernel, it generates an auto.conf\n file which includes a time stamp. I think these files are\n important enough to merit more surgical checking. This comparator\n blots out the 4th line.\n \"\"\"\n\n @staticmethod\n def _applies(item):\n if item.name.endswith(b'auto.conf'):\n trigger = b\"Automatically generated make config: don't edit\"\n else:\n if item.name.endswith(b'autoconf.h'):\n trigger = b\"Automatically generated C config: don't edit\"\n else:\n return False\n p = -1\n for i in range(8):\n p = item.content.find(b'\\n', p + 1)\n if p is -1:\n return False\n\n return item.content.find(trigger, 0, p) > -1\n\n @classmethod\n def cmp(cls, comparison):\n left, right = [ i.content.split(b'\\n') for i in comparison.pair ]\n del left[3]\n del right[3]\n if left == right:\n cls._log_same(comparison)\n return Same\n else:\n cls._log_different(comparison)\n cls._log_unidiffs([left, right], [ i.name for i in comparison.pair ])\n return Different\n\n\n@_loggable\nclass ZipMemberMetadataComparator(Comparator):\n \"\"\"\n Verify the metadata of each member of a zipfile.\n \"\"\"\n\n @staticmethod\n def _applies(item):\n return item.box is ZipComparator\n\n @classmethod\n def cmp(cls, comparison):\n left, right = [ i.parent.zip.getinfo(i.shortname) for i in comparison.pair ]\n if left.compress_type == right.compress_type and left.comment == right.comment and left.create_system == right.create_system and left.create_version == right.create_version and left.extract_version == right.extract_version and left.reserved == right.reserved and left.flag_bits == right.flag_bits and left.volume == right.volume and left.internal_attr == right.internal_attr and left.external_attr == right.external_attr:\n return False\n else:\n cls._log_different(comparison)\n return Different\n\n\nclass Encoder(ContentOnlyBox):\n \"\"\"\n Most UN*X compression programs compress a single stream of data.\n Similarly, many encryption programs do the same.\n \"\"\"\n __metaclass__ = abc.ABCMeta\n _content_name = None\n\n @staticmethod\n @abc.abstractmethod\n def open(filename, mode, fileobj):\n raise NotImplementedError\n\n @classmethod\n def box_keys(cls, item):\n return [cls._content_name]\n\n @staticmethod\n def member_size(member):\n return len(member.content)\n\n @classmethod\n def cmp(cls, comparison):\n for p in comparison.pair:\n p.box = cls\n\n return Comparison(litem=Item(cls._packer.join(comparison.pair[0].name, cls._content_name), comparison.pair[0], box=cls), ritem=Item(cls._packer.join(comparison.pair[1].name, cls._content_name), comparison.pair[1], box=cls), comparators=comparison.comparators, ignores=comparison.ignores, exit_asap=comparison.exit_asap).cmp()\n\n\n@_loggable\nclass GzipComparator(Encoder):\n \"\"\"\n Gzip archives only have one member but the archive itself sadly\n includes a timestamp. You can see the timestamp using \"gzip -l -v\".\n \"\"\"\n _myname = b'gzip'\n _packer = _Packer((b'{{{}}}').format(_myname))\n _content_name = (b'{{{}content}}').format(_myname)\n\n @staticmethod\n @contextlib.contextmanager\n def open(filename, mode, fileobj):\n gz = gzip.GzipFile(filename, mode, 9, fileobj)\n yield gz\n gz.close()\n\n @staticmethod\n def _applies(item):\n return bytes(item.content[0:2]) == b'\\x1f\\x8b'\n\n @staticmethod\n def member_content(member):\n with GzipComparator.open(member.parent.name, b'rb', io.BytesIO(member.parent.content)) as (gzipobj):\n return gzipobj.read()\n\n\n@_loggable\nclass BZ2Comparator(Encoder):\n \"\"\"\n BZ2 archives only have one member.\n \"\"\"\n _myname = b'bz2'\n _packer = _Packer((b'{{{}}}').format(_myname))\n _content_name = (b'{{{}content}}').format(_myname)\n\n @staticmethod\n @contextlib.contextmanager\n def open(filename, mode, fileobj):\n \"\"\"\n .. todo:: remove openzip once we drop python-2.6\n \"\"\"\n bobj = bz2.BZ2File(fileobj if fileobj else filename, mode, None, 9)\n yield bobj\n bobj.close()\n return\n\n @staticmethod\n def _applies(item):\n return bytes(item.content[0:2]) == b'BZ'\n\n @staticmethod\n def member_content(member):\n with BZ2Comparator.open(member.parent.name, b'rb', io.BytesIO(member.parent.content)) as (bz2obj):\n return bz2obj.read()\n\n\n@_loggable\nclass XZComparator(Encoder):\n \"\"\"\n XZ archives only have one member.\n \"\"\"\n _myname = b'xz'\n _packer = _Packer((b'{{{}}}').format(_myname))\n _content_name = (b'{{{}content}}').format(_myname)\n\n @staticmethod\n @contextlib.contextmanager\n def open(filename, mode, fileobj):\n xzobj = lzma.LZMAFile(fileobj if fileobj else filename, mode)\n yield xzobj\n xzobj.close()\n\n @staticmethod\n def _applies(item):\n \"\"\"\n ..note:: lzma format files have no magic number. So while the lzma\n library can open them, we don't really have a way to recognize\n them easily other than just attempting to open and living with\n failures. But that seems pretty expensive and besides, who\n uses lzma?\n \"\"\"\n return bytes(item.content[0:6]) == b'\\xfd7zXZ\\x00'\n\n @staticmethod\n def member_content(member):\n with XZComparator.open(member.parent.name, b'rb', io.BytesIO(member.parent.content)) as (xzobj):\n return xzobj.read()\n\n\n@_loggable\nclass FailComparator(Comparator):\n \"\"\"\n Used as a catchall - just return Difference\n \"\"\"\n\n @staticmethod\n def _applies(item):\n return True\n\n @classmethod\n def cmp(cls, comparison):\n cls._log_different(comparison)\n cls.logger.log(DIFFERENCES, b'\\n')\n cls._log_unidiffs_comparison(comparison)\n return Different\n\n\ndef _findCommonSuffix(this, that):\n \"\"\"\n find common trailing subpath. return a 3-tuple consisting of the\n unique part of the arguments followed by the common part.\n \"\"\"\n if not this or not that:\n return (this, that, b'')\n else:\n this_head, this_tail = os.path.split(this)\n that_head, that_tail = os.path.split(that)\n if this_tail == that_tail:\n x, y, z = _findCommonSuffix(this_head, that_head)\n return (\n x, y, os.path.join(z, this_tail))\n return (this, that, b'')\n\n\n@_loggable\nclass BuriedPathComparator(Comparator):\n \"\"\"\n Files which differ only in that they have their paths buried in them aren't really different.\n\n (currently unused).\n \"\"\"\n\n @staticmethod\n def _applies(item):\n return item.isreg\n\n @classmethod\n def cmp(cls, comparison):\n this, that = comparison.pair\n this.head, that.head, tail = _findCommonSuffix(this.name, that.name)\n if this.content.find(bytes(this.head)) >= 0:\n this_content, that_content = [ bytearray(t.content).replace(bytes(t.head), b'@placeholder@') for t in comparison.pair ]\n if this_content == that_content:\n cls._log_same(comparison)\n return Same\n cls._log_indeterminate(comparison)\n return False\n\n\n@_loggable\nclass SymlinkComparator(Comparator):\n \"\"\"\n Symlinks are equal if they point to the same place.\n \"\"\"\n\n @staticmethod\n def _applies(item):\n return item.islnk\n\n @classmethod\n def cmp(cls, comparison):\n this, that = [ p.link for p in comparison.pair ]\n if this == that:\n cls._log_same(comparison)\n return Same\n else:\n cls._log_different(comparison)\n return Different\n\n\n@_loggable\nclass MapComparator(Comparator):\n \"\"\"\n Linker map files include a reference to the output file which is\n typically a generated temp file name.\n \"\"\"\n\n @staticmethod\n def _applies(item):\n try:\n retval = item.content.startswith(b'Archive member included')\n except UnicodeDecodeError:\n retval = False\n\n return retval\n\n _pattern = re.compile(b'tmp-\\\\d*')\n\n @classmethod\n def cmp(cls, comparison):\n munged = [ cls._pattern.sub(b'tmp-0', i.content) for i in comparison.pair ]\n if reduce(operator.eq, munged):\n cls._log_same(comparison)\n return Same\n else:\n cls._log_indeterminate(comparison)\n return False\n\n\n@_loggable\nclass _ComparisonCommon(object):\n \"\"\"\n This is a base class that holds utilities common to both\n :py:class:`Comparison` and :py:class:`ComparisonList`. It is not\n intended to be instantiated.\n \n :param comparators: comparators to be applied\n :type comparators: list of :py:class:`Comparator`\n :param ignores: fnmatch style wild card patterns\n :type ignores: list of strings\n :param exit_asap: exit as soon as possible (Indeterminate is always raised asap)\n :type exit_asap: boolean\n \"\"\"\n default_comparators = [\n NoSuchFileComparator,\n InodeComparator,\n EmptyFileComparator,\n DirComparator,\n ArMemberMetadataComparator,\n BitwiseComparator,\n SymlinkComparator,\n ElfComparator,\n ArComparator,\n AMComparator,\n ConfigLogComparator,\n KernelConfComparator,\n BZ2Comparator,\n GzipComparator,\n ZipComparator,\n TarMemberMetadataComparator,\n TarComparator,\n CpioMemberMetadataComparator,\n CpioComparator,\n MapComparator,\n DateBlotBitwiseComparator,\n FailComparator]\n\n def __init__(self, comparators=False, ignores=[], exit_asap=False):\n self.comparators = comparators if comparators is not False else self.default_comparators\n self.ignores = ignores\n self.exit_asap = exit_asap\n\n def ignoring(self, fname):\n return ignoring(self.ignores, fname)\n\n def cmp(self):\n self.logger.log(logging.FATAL, b'%s not implemented', self.__class__.__name__)\n\n\n@_loggable\nclass Comparison(_ComparisonCommon):\n \"\"\"\n Represents a pair of objects to be compared.\n\n An instance of :py:class:`Comparison` comprises a pair of\n :py:class:`Item`, a list of :py:class:`Comparator`, and a method\n for applying the list of :py:class:`Comparator` to the pair of\n :py:class:`Item` and returning an answer.\n\n If exit_asap is true, the first difference will end the\n comparison. If it is not true, the comparison will continue\n despite knowing that our aggregate result is that we are\n :py:class:`Different`. This is useful for getting a complete list\n of all differences.\n\n exit_asap=False is like \"make -k\" in the sense that it reports on\n all differences rather than stopping after the first.\n\n .. todo:: exit_asap is not currently functional.\n\n :param lname: path name of the first thing, (the leftmost one)\n :type lname: string\n :param rname: path name of the second thing, (the rightmost one)\n :type rname: string\n :param comparators: list of comparators to be applied\n :type comparators: list of :py:class:`Comparator`\n :param ignores: wild card patterns of path names to be ignored\n :type ignores: list of strings\n :param exit_asap: exit as soon as possible\n :type exit_asap: boolean\n \"\"\"\n\n @property\n def pair(self):\n \"\"\"\n A 2 item list of the items to be compared\n\n .. todo:: this should be a tuple.\n \"\"\"\n return self._pair\n\n @pair.setter\n def pair(self, value):\n \"\"\"setter\"\"\"\n self._pair = value\n\n def reset(self):\n self.logger.log(logging.DEBUG, b'resetting %s', self.pair[0].name)\n for item in self.pair:\n item.reset()\n\n def __init__(self, lname=b'', rname=b'', litem=False, ritem=False, comparators=False, ignores=[], exit_asap=False):\n _ComparisonCommon.__init__(self, comparators=comparators, ignores=ignores, exit_asap=exit_asap)\n if rname and not ritem:\n ritem = Items.find_or_create(rname, root, DirComparator)\n if lname and not litem:\n litem = Items.find_or_create(lname, root, DirComparator)\n self.pair = (litem, ritem)\n self.children = []\n for item in self.pair:\n i = self.ignoring(item.name)\n if i:\n self.logger.log(logging.ERROR, b'Creating comparison using ignored item %s cause %s', item.name, i)\n raise sys.exit(1)\n\n def cmp(self):\n \"\"\"\n Compare our pair of :py:class:`Item`.\n\n Run through our list of :py:class:`Comparator` calling each\n one in turn with our pair of :py:class:`Item`. Each comparator\n is expected to return either:\n\n any non True value, (null, False, etc)\n indicating an indeterminate result, that is, that this particular\n comparator could make no authoritative determinations and that the\n next comparator in the list should be tried\n\n :py:class:`Same`\n an authoritative declaration that the items are\n sufficiently alike and thus no further comparators need be\n tried\n\n :py:class:`Different`\n an authoritative declaration that the items are\n insufficiently alike and thus no further comparators need\n be tried.\n\n If no :py:class:`Comparator` returns non-null, then\n :py:exc:`IndeterminateResult` will be raised.\n\n .. todo:: exit_asap is not currently functional.\n \"\"\"\n for comparator in self.comparators:\n if not comparator.applies(self):\n self.logger.log(logging.DEBUG, b'does not apply - %s %s', comparator, self._pair[0].name)\n continue\n self.logger.log(logging.DEBUG, b'applies - %s %s', comparator, self._pair[0].name)\n result = comparator.cmp(self)\n if result:\n self.logger.log(logging.DEBUG, b'%s %s', result.__name__, self.__class__.__name__)\n self.reset()\n return result\n\n self.logger.log(INDETERMINATES, b'indeterminate result for %s', [ p.name for p in self._pair ])\n raise IndeterminateResult\n\n\n@_loggable\nclass ComparisonList(_ComparisonCommon):\n \"\"\"\n Represents a pair of lists of path names to be compared - one from\n column a, one from column b, etc.\n\n An instance of :py:class:`ComparisonList` is very similar to a\n :py:class:`Comparison` except that instead of a pair of Items, it\n comprises a pair of lists of path names\n\n :param stuff: path names to be compared\n :type stuff: a (2-element) list of lists of string\n\n In all other ways, this class resembles :py:class:`Comparison`.\n \"\"\"\n\n def __init__(self, stuff, comparators=False, ignores=[], exit_asap=False):\n _ComparisonCommon.__init__(self, comparators=comparators, ignores=ignores, exit_asap=exit_asap)\n self.stuff = []\n for lst in stuff:\n new_lst = []\n for fname in lst:\n cause = self.ignoring(fname)\n if cause:\n self.logger.log(SAMES, b\"ignoring '%s' cause '%s' in %s\", fname, cause, self.__class__.__name__)\n else:\n new_lst.append(fname)\n\n self.stuff.append(new_lst)\n\n def cmp(self):\n Comparison.__doc__\n length = [ len(i) for i in self.stuff ]\n result = Same\n if not reduce(operator.eq, length):\n self.logger.log(DIFFERENCES, b'Different %s lists are of different sizes: %s', self.__class__.__name__, length)\n retval = Different\n if self.exit_asap:\n return retval\n for i in range(0, max(length)):\n comparison = Comparison(litem=Items.find_or_create(self.stuff[0][i], root), ritem=Items.find_or_create(self.stuff[1][i], root), comparators=self.comparators, ignores=self.ignores, exit_asap=self.exit_asap)\n c = comparison.cmp()\n if not c:\n self.logger.log(INDETERMINATES, b'Indeterminate %s', self.__class__.__name__)\n raise IndeterminateResult\n else:\n comparison.reset()\n if c is Different:\n self.logger.log(logging.DEBUG, b'Different %s', self.__class__.__name__)\n result = Different\n if self.exit_asap:\n return result\n\n if result is Same:\n self.logger.log(SAMES, b'Same %s', self.__class__.__name__)\n return result\n\n\nroot = Item(b'{root}', True)","sub_path":"pycfiles/rcmp-0.8-py2.7/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":64033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"524209215","text":"#!/usr/bin/env python\n\"\"\"Compute DDT PC1 projections.\"\"\"\n\n\nfrom __future__ import division, print_function\n\n\n__all__ = ['DDTManager']\n__author__ = 'Hao Zhang'\n__copyright__ = 'Copyright @2017 LAMDA'\n__date__ = '2017-09-29'\n__email__ = 'zhangh0214@gmail.com'\n__license__ = 'CC BY-SA 3.0'\n__status__ = 'Development'\n__updated__ = '2017-10-03'\n__version__ = '1.20'\n\n\nimport argparse\nimport itertools\nimport os\nimport sys\nif sys.version[0] == '2':\n filter = itertools.ifilter\n input = raw_input\n map = itertools.imap\n range = xrange\n zip = itertools.izip\n\nimport numpy as np\nimport sklearn.decomposition\n\n\nclass DDTManager(object):\n \"\"\"Manager class for split Oxford images, perform PCA on each cluster,\n and compute PC1 projection on Oxford features.\n\n Attributes:\n _paths, dict of (str, str): Data and project paths.\n _pca_model: PCA models for DDT.\n \"\"\"\n def __init__(self, paths):\n self._paths = paths\n self._pca_model = None\n\n def fit(self):\n \"\"\"Load via pool5 features and fit a PCA model.\"\"\"\n print('Load via pool5 features.')\n pool5_list = sorted([\n f for f in os.listdir(self._paths['via_all_pool5'])\n if os.path.isfile(os.path.join(self._paths['via_all_pool5'], f))])\n m = len(pool5_list)\n all_pool5 = []\n for i, f in enumerate(pool5_list):\n if i % 100 == 0:\n print('procesing %d/%d.' % (i, m))\n pool5_i = np.load(os.path.join(self._paths['via_all_pool5'], f))\n D, H, W = pool5_i.shape\n assert D == 512\n pool5_i = np.reshape(pool5_i, (D, H * W)).T\n all_pool5.append(pool5_i)\n all_pool5 = np.vstack(all_pool5)\n assert all_pool5.shape[1] == 512\n\n print('Fit PCA.')\n self._pca_model = sklearn.decomposition.PCA(n_components=1)\n self._pca_model.fit(all_pool5)\n\n def apply(self, pool5_path, pc1_path):\n \"\"\"Project test pool5 features according to the PCA model, for all\n images/cropped query images, respectively.\n\n Args:\n pool5_path, str: Pool5 feature path.\n pc1_path, str: PC1 projection path.\n \"\"\"\n print('Compute PC1 projection for %s.' % pool5_path)\n pool5_names = sorted([\n f for f in os.listdir(self._paths[pool5_path])\n if os.path.isfile(os.path.join(self._paths[pool5_path], f))])\n m = len(pool5_names)\n for i, f in enumerate(pool5_names):\n if i % 100 == 0:\n print('Processing %d/%d' % (i, m))\n pool5_i = np.load(os.path.join(self._paths[pool5_path], f))\n D, H, W = pool5_i.shape\n assert D == 512\n pool5_i = np.reshape(pool5_i, (D, H * W)).T\n pc1_i = self._pca_model.transform(pool5_i)\n np.save(os.path.join(self._paths[pc1_path],\n os.path.splitext(os.path.basename(f))[0]),\n pc1_i)\n\n\ndef main():\n \"\"\"Main function of the program.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--test', dest='test', type=str, required=True,\n help='Dataset to evaluate.')\n parser.add_argument('--via', dest='via', type=str, required=True,\n help='Dataset to assistant PCA whitening.')\n args = parser.parse_args()\n if args.test not in ['oxford', 'paris']:\n raise AttributeError('--test parameter must be oxford/paris.')\n if args.via not in ['oxford', 'paris']:\n raise AttributeError('--via parameter must be oxford/paris.')\n\n test_image_root = os.path.join('/data/zhangh/data/', args.test)\n test_data_root = os.path.join('/data/zhangh/project/ddt/data/', args.test)\n via_data_root = os.path.join('/data/zhangh/project/ddt/data/', args.via)\n paths = {\n 'test_all_pool5': os.path.join(test_image_root, 'pool5/all/'),\n 'test_crop_pool5': os.path.join(test_image_root, 'pool5/crop/'),\n 'test_full_pool5': os.path.join(test_image_root, 'pool5/full/'),\n 'via_all_pool5': os.path.join(via_data_root, 'pool5/'),\n 'test_all_pc1': os.path.join(test_data_root, 'pc1/all/'),\n 'test_crop_pc1': os.path.join(test_data_root, 'pc1/crop/'),\n 'test_full_pc1': os.path.join(test_data_root, 'pc1/full/'),\n }\n for k in paths:\n assert os.path.isdir(paths[k])\n\n ddt_manager = DDTManager(paths)\n ddt_manager.fit()\n ddt_manager.apply('test_all_pool5', 'test_all_pc1')\n ddt_manager.apply('test_crop_pool5', 'test_crop_pc1')\n ddt_manager.apply('test_full_pool5', 'test_full_pc1')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"versions/ver1.22/ddt/ddt.py","file_name":"ddt.py","file_ext":"py","file_size_in_byte":4656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"608146644","text":"\nimport logging\nimport time\nfrom multiprocessing import Pool \nimport numpy as np\n\nlog = logging.getLogger(__name__)\n\n__all__ = [\"Recommender\", \"Evaluation\"]\n\nclass Recommender(object):\n def __init__(self, matrix):\n super(Recommender, self).__init__()\n self.matrix = matrix\n self.num_users = matrix.shape[0]\n self.num_items = matrix.shape[1]\n\n def train(self, before=None, after=None):\n raise NotImplementedError\n\n def predict(self, user, item):\n raise NotImplementedError\n\n def recommend(self, user, num=5, ruleout=True):\n scores = []\n for poi in xrange(self.num_items):\n scores.append((poi, self.predict(user, poi)))\n scores.sort(key=lambda x: x[1], reverse=True)\n\n if self.matrix is not None and ruleout:\n ruleouts = set(np.nonzero(self.matrix[user])[1])\n else:\n ruleouts = set()\n\n result = []\n for poi, score in scores:\n if poi in ruleouts:\n continue\n result.append(poi)\n if len(result) >= num:\n break\n return result \n\n \ndef _proxy_test(args):\n evaluation, user, full = args\n bingos = evaluation.hits(user)\n n = len(bingos)\n if full and n > 0:\n log.debug(\"user %i hit %s\" % (user, bingos))\n return (user, n)\n\n\nclass Evaluation(object):\n def __init__(self, matrix, model, N=5, users=None, _pool_num=6):\n \"\"\"\n Evaluate a model.Report precision and recall.\n matrix: test checkin matrix, `sparse matrix`\n model: model for test, must has `recommend` methid\n N : recommend N pois\n users: users for test, should be iterated\n _pool_num: thread number to test, most cases default is ok.\n if 0, then turn off multiple threads.\n usage:\n >>> from scipy import sparse\n >>> import numpy as np\n >>> matrix = sparse.csr_matrix(np.matrix([[0, 1], [1, 1]]))\n >>> class M(object):\n ... def recommend(self, u, N):\n ... if u == 0:\n ... return [1, -1, -1, -1, -1]\n ... return [-1, -1, -1, -1, -1]\n >>> ev = Evaluation(matrix, model=M(), users=[0, 1], _pool_num=0)\n >>> ev.test()\n (0.5, 0.1)\n\n \"\"\"\n self.matrix = matrix\n self.N = N\n self.model = model\n self._pool_num = _pool_num\n self.num_users = matrix.shape[0]\n self.num_items = matrix.shape[1]\n if users is None:\n self.users = xrange(self.num_users)\n else:\n self.users = users\n\n def hits(self, user):\n pois = set(np.nonzero(self.matrix[user])[1])\n if len(pois) <= 0:\n return []\n result = self.model.recommend(user, self.N)\n return list(set(pois) & set(result))\n\n def test(self, full=False):\n t0 = time.time()\n def prepare():\n for user in self.users:\n yield (self, user, full)\n\n if self._pool_num > 0:\n pool = Pool(self._pool_num)\n matchs = pool.map(_proxy_test, prepare()) \n pool.close()\n pool.join()\n else:\n matchs = []\n for arg in prepare():\n matchs.append(_proxy_test(arg))\n \n nhits = sum([n for u, n in matchs])\n _recall = 0.0\n valid_num = 0\n for user, n in matchs:\n pois = np.nonzero(self.matrix[user])[1]\n if len(pois) > 0:\n valid_num += 1\n _recall += float(n) / len(pois)\n\n if valid_num == 0:\n raise ValueError(\"Checkin matrix should not be empty.\")\n prec = float(nhits) / (valid_num * self.N)\n _recall = float(_recall) / valid_num \n t1 = time.time()\n log.info(\"recall : %.4f\" % _recall)\n log.info(\"precision: %.4f\" % prec)\n log.debug('time %.4f seconds' % (t1 - t0))\n return (_recall, prec)\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"472710049","text":"\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport datetime\nfrom qutip import *\nimport qutip.control.pulseoptim as pulseoptim\n\nfrom .miscellaneous import toLatex\n\ndef plotExpectation(time,observables,result):\n \"\"\"\n observables = [{'Hcavity':Hcavity}]\n \"\"\"\n fig, axs = plt.subplots(len(observables), 1)\n if len(observables)==1: axs = [axs]\n for i, row in enumerate(observables):\n for name, observable in row.items():\n expectation = expect(observable, result.states)\n axs[i].plot(time, expectation, label=name)\n axs[i].legend()\n fig.tight_layout(); plt.show()\n\ndef plotWigners(\n rhoList,radius,\n nCol=6,titleList=None, ptraceIndex=None, \n):\n if(len(rhoList) 1 else axs[col]\n ax.contourf(\n x, x, W, 100, cmap=plt.get_cmap('RdBu'),\n norm=colors.Normalize(-.25,.25), \n )\n if titleList: ax.set_title(titleList[i])\n plt.tight_layout(); plt.show()\n\ndef makeTitle(t):\n return \"t = {:.2f}\".format(t)\n\ndef plotWignersIntermediateStates(\n time,result,number,\n radius, nCol=6,titleList=None, ptraceIndex=None, \n):\n N = len( result.states )\n if number > 1: number = number-1\n step = N // number \n indices = np.arange(0,N,step)\n rhoList = [ket2dm(result.states[i]) for i in indices]\n rhoList.append(result.states[-1])\n titleList = [makeTitle(time[i]) for i in indices]\n titleList.append(makeTitle(time[-1]))\n plotWigners(\n rhoList,radius,\n nCol,titleList, ptraceIndex, \n )\n\ndef plotOccupations(\n rhoList,\n nCol=6,titleList=None, ptraceIndex=None, \n):\n if(len(rhoList) 1 else axs[col]\n ax.set_ylim(-0.2, 1.2)\n ax.bar(x, np.abs(rho.diag()))\n if titleList: ax.set_title(titleList[i])\n plt.tight_layout(); plt.show()\n\ndef plotOccupationsIntermediateStates(\n time,result,number,\n nCol=6,titleList=None, ptraceIndex=None, \n):\n N = len( result.states )\n if number > 1: number = number-1\n step = N // number \n indices = np.arange(0,N,step)\n rhoList = [ket2dm(result.states[i]) for i in indices]\n rhoList.append(result.states[-1])\n titleList = [makeTitle(time[i]) for i in indices]\n titleList.append(makeTitle(time[-1]))\n plotOccupations( \n rhoList,\n nCol,titleList, ptraceIndex, \n )\n\ndef plotOptimalControl(result,controlsName,title='title'):\n fig, axs = plt.subplots(2,1)\n initial = result.initial_amps\n final = result.final_amps\n def stack(x,j): return np.hstack((x[:, j], x[-1, j]))\n pulseNames = ['initial','final']\n for i,pulse in enumerate([initial,final]):\n for j in range(pulse.shape[1]):\n axs[i].step(result.time, stack(pulse,j),\n label=toLatex(controlsName[j]))\n axs[i].set_title(pulseNames[i]+\" control\")\n axs[i].legend()\n fig.tight_layout(); plt.show()\n print('*'*16 +' '+ title + \" summary \" + '*'*16)\n print(\"Final fidelity error {}\".format(result.fid_err))\n print(\"Final gradient normal {}\".format(result.grad_norm_final))\n print(\"Terminated due to {}\".format(result.termination_reason))\n print(\"Number of iterations {}\".format(result.num_iter))\n print(\"Completed in {} HH:MM:SS.US\".format(\n datetime.timedelta(seconds=result.wall_time)))","sub_path":"build/lib/yarn/qutipHelpers/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"592016163","text":"# fmt: off\nimport sys # isort: skip\nfrom pathlib import Path # isort: skip\nROOT = Path(__file__).resolve().parent.parent\nsys.path.append(str(ROOT))\n# fmt: on\n\nfrom pathlib import Path\n\nfrom rmt.updated_features import FEATURE_OUTFILES, Levelvars\nfrom rmt.updated_predict import FeatureSlice, summarize_all_updated_predictions\n\nif __name__ == \"__main__\":\n DEGREES = [3, 5, 7, 9]\n for feature, fname in FEATURE_OUTFILES.items():\n df = summarize_all_updated_predictions(\n feature_cls=feature,\n degrees=DEGREES,\n feature_slices=[*FeatureSlice],\n debug=False,\n )\n outfile = ROOT.parent / fname\n df.to_json(outfile)\n print(f\"Saved predictions to {outfile}\")\n","sub_path":"code/scripts/predict_everything_updated.py","file_name":"predict_everything_updated.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"119332944","text":"import mogp_emulator\nimport numpy as np\n\n# simple MICE examples with 2 inputs\n\n# simulator function -- needs to take a single input and output a single number\n\ndef f(x): \n return np.exp(-np.sum((x-2.)**2, axis = -1)/2.)\n \n# Base design -- requires a list of parameter bounds if you would like to use\n# uniform distributions. If you want to use different distributions, you\n# can use any of the standard distributions available in scipy to create\n# the appropriate ppf function (the inverse of the cumulative distribution).\n# Internally, the code creates the design on the unit hypercube and then uses\n# the distribution to map from [0,1] to the real parameter space.\n\ned = mogp_emulator.LatinHypercubeDesign([(0., 5.), (0., 5.)])\n\n###################################################################################\n\n# first example -- run entire design internally within the MICE class.\n\n# first argument is base design (required), second is simulator function (optional,\n# but required if you want the code to run the simualtions internally)\n\n# Other optional arguments include: \n# n_samples (number of sequential design steps, optional, default is not specified\n# meaning that you will specify when running the sequential design)\n# n_init (size of initial design, default 10)\n# n_cand (number of candidate points, default is 50)\n# nugget (nugget parameter for design GP, default is to set adaptively)\n# nugget_s (nugget parameter for candidate GP, default is 1.) \n\nmd = mogp_emulator.MICEDesign(ed, f, n_samples = 20, n_init = 5, n_cand = 100)\n\nmd.run_sequential_design()\n\n# get design and outputs\n\ninputs = md.get_inputs()\ntargets = md.get_targets()\n\nprint(\"Example 1:\")\nprint(\"Design inputs:\\n\", inputs)\nprint(\"Design targets:\\n\", targets)\nprint()\n\n###################################################################################\n\n# second example: run design manually\n\nmd2 = mogp_emulator.MICEDesign(ed, n_init = 5, n_cand = 100)\n\ninit_design = md2.generate_initial_design()\n\nprint(\"Example 2:\")\nprint(\"Initial design:\\n\", init_design)\n\n# run initial points manually\n\ninit_targets = np.array([f(s) for s in init_design])\n\n# set initial targets\n\nmd2.set_initial_targets(init_targets)\n\n# run 20 sequential design steps\n\nfor d in range(20):\n next_point = md2.get_next_point()\n next_target = f(next_point)\n md2.set_next_target(next_target)\n \n# look at design and outputs\n\ninputs = md2.get_inputs()\ntargets = md2.get_targets()\n\nprint(\"Final inputs:\\n\", inputs)\nprint(\"Final targets:\\n\", targets)\n\ngp = mogp_emulator.GaussianProcess(inputs, targets)\ngp.learn_hyperparameters()\n\ntesting = ed.sample(200000)\n\nmean, unc, _ = gp.predict(testing)\n\nprint(mean)","sub_path":"mogp_emulator/demos/mice_demos.py","file_name":"mice_demos.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"102745403","text":"import sys\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom dtsr.config import Config\nfrom dtsr.signif import bootstrap\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n\n\nif __name__ == '__main__':\n\n argparser = argparse.ArgumentParser('''\n Computes pairwise significance of error differences between DTSR models and competitors.\n Assumes models are named using the template _, where is\n shared between models that should be compared. For example, if the config file contains\n 4 models --- DTSR_TASK1, DTSR_TASK2, COMPETITOR_TASK1, and COMPETITOR_TASK2 --- the script\n will perform 2 comparisons: DTSR_TASK1 vs COMPETITOR_TASK1 and DTSR_TASK2 vs. COMPETITOR_TASK2.\n ''')\n argparser.add_argument('config_path', help='Path to configuration (*.ini) file')\n argparser.add_argument('-m', '--models', nargs='*', default=[], help='Path to configuration (*.ini) file')\n argparser.add_argument('-p', '--partition', type=str, default='dev', help='Name of partition to use (one of \"train\", \"dev\", \"test\")')\n argparser.add_argument('-M', '--metric', type=str, default='loss', help='Metric to use for comparison (either \"loss\" or \"loglik\")')\n argparser.add_argument('-t', '--tails', type=int, default=2, help='Number of tails (1 or 2)')\n args, unknown = argparser.parse_known_args()\n\n p = Config(args.config_path)\n if len(args.models) > 0:\n models = args.models\n else:\n models = p.model_list[:]\n\n run_baseline = False\n run_dtsr = False\n for m in models:\n if not run_baseline and m.startswith('LM') or m.startswith('GAM'):\n run_baseline = True\n elif not run_dtsr and m.startswith('DTSR'):\n run_dtsr = True\n\n sys.stderr.write('\\n')\n dtsr_models = [x for x in models if x.startswith('DTSR')]\n\n if args.metric == 'loss':\n file_name = '/%s_losses_%s.txt'%(p.loss, args.partition)\n else:\n file_name = '/loglik_%s.txt'%args.partition\n\n for i in range(len(dtsr_models)):\n m1 = dtsr_models[i]\n for j in range(i+1, len(dtsr_models)):\n m2 = dtsr_models[j]\n name = '%s_v_%s' %(m1, m2)\n a = pd.read_csv(p.outdir + '/' + m1 + file_name, sep=' ', header=None, skipinitialspace=True)\n b = pd.read_csv(p.outdir + '/' + m2 + file_name, sep=' ', header=None, skipinitialspace=True)\n select = np.logical_and(np.isfinite(np.array(a)), np.isfinite(np.array(b)))\n diff = float(len(a) - select.sum())\n p_value, base_diff, diffs = bootstrap(a[select], b[select], n_iter=10000, n_tails=args.tails, mode=args.metric)\n sys.stderr.write('\\n')\n with open(p.outdir + '/' + name + '_' + args.partition + '.txt', 'w') as f:\n f.write('='*50 + '\\n')\n f.write('Model comparison: %s vs %s\\n' %(m1, m2))\n if diff > 0:\n f.write('%d NaN rows filtered out (out of %d)\\n' %(diff, len(a)))\n f.write('Partition: %s\\n' %args.partition)\n f.write('Loss difference: %.4f\\n' %base_diff)\n f.write('p: %.4e\\n' %p_value)\n f.write('='*50 + '\\n')\n plt.hist(diffs, bins=1000)\n plt.savefig(p.outdir + '/' + name + '_' + args.partition + '.png')\n plt.close('all')\n","sub_path":"dtsr/bin/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"238122707","text":"from PyQt5 import QtCore, QtWidgets\nfrom PyQt5.Qt import *\nfrom PyQt5.QtCore import *\n\nclass imageBox(QtWidgets.QLabel):\n signal_refresh_list = QtCore.pyqtSignal(int)\n\n def __init__(self, parent=None):\n super(imageBox, self).__init__(parent)\n self.points = []\n self.wires = []\n self.currentPoint = QPoint(0, 0)\n self.anchorPoint = QPoint(-1, -1)\n self.setCursor(Qt.CrossCursor)\n self.setMouseTracking(True)\n self.setContextMenuPolicy(Qt.CustomContextMenu)\n self.contextMenu = QMenu(self)\n self.customContextMenuRequested.connect(self.showMenu)\n self.reselectLast = self.contextMenu.addAction('Reselect last')\n self.abandon = self.contextMenu.addAction('Abandon')\n self.apply = self.contextMenu.addAction('Apply')\n self.reselectLast.triggered.connect(self.reselectLastMenu_triggered)\n self.abandon.triggered.connect(self.abandonMenu_triggered)\n self.apply.triggered.connect(self.applyMenu_triggered)\n\n def showMenu(self):\n self.contextMenu.exec_(QCursor.pos())\n\n def reselectLastMenu_triggered(self):\n if len(self.points) > 1:\n self.points.pop(-1)\n self.anchorPoint = self.points[len(self.points) - 1]\n self.update()\n elif len(self.points) == 1:\n self.points = []\n self.anchorPoint = QPoint(-1, -1)\n self.update()\n\n def abandonMenu_triggered(self):\n self.points = []\n self.anchorPoint = QPoint(-1, -1)\n self.update()\n\n def applyMenu_triggered(self):\n if len(self.points) >= 2:\n self.wires.append(self.points)\n self.points = []\n self.anchorPoint = QPoint(-1, -1)\n self.update()\n self.signal_refresh_list.emit(len(self.wires))\n\n def mouseMoveEvent(self, QMouseEvent):\n self.currentPoint = QMouseEvent.pos()\n self.update()\n\n\n def mousePressEvent(self, QMouseEvent):\n if QMouseEvent.buttons() == QtCore.Qt.LeftButton:\n self.anchorPoint = QMouseEvent.pos()\n self.points.append(self.anchorPoint)\n self.update()\n\n def paintEvent(self, QPaintEvent):\n p = QPainter(self)\n p.setPen(QPen(Qt.red, 2, Qt.SolidLine))\n font = QFont('宋体', 16, QFont.Bold, True)\n p.setFont(font)\n for idx, w in enumerate(self.wires):\n p.drawPolyline(self.convert2polygon(w))\n p.drawText(w[0].x(), w[0].y(), str(idx))\n if len(self.points) > 1:\n p.drawPolyline(self.convert2polygon(self.points))\n if self.anchorPoint.x() >= 0 and self.anchorPoint.y() >= 0:\n p.drawLine(self.anchorPoint.x(), self.anchorPoint.y(), self.currentPoint.x(), self.currentPoint.y())\n\n def convert2polygon(self, points):\n return QPolygon(points)\n\n def deleteAt(self, index):\n self.wires.pop(index)\n self.update()\n\n def clearAll(self):\n self.points = []\n self.wires = []\n self.currentPoint = QPoint(0, 0)\n self.anchorPoint = QPoint(-1, -1)\n self.update()\n","sub_path":"imageBox.py","file_name":"imageBox.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"45326284","text":"import pandas as pd\nimport os.path\nimport xlwt\nimport xlrd\n\nexcel_names = []\nfor i in range(1,5):\n\tfileName = \"results/tagsFrame week-\"+str(i)+\".xlsx\"\n\tif os.path.exists(fileName):\n\t\texcel_names.append(fileName)\n\nexcels = [pd.ExcelFile(name) for name in excel_names]\nframes = [x.parse(x.sheet_names[0], header=None,index_col=None) for x in excels]\nframes[1:] = [df[1:] for df in frames]\ncombined = pd.concat(frames)\ncombined.to_excel(\"results/tagsFrame.xlsx\", header=False, index=False)","sub_path":"compilation/concat.py","file_name":"concat.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"273860783","text":"# -*- coding: utf-8 -*-\n\"\"\" catch22 features\nA transformer for the catch22 features\n\"\"\"\n\n__author__ = \"Matthew Middlehurst\"\n__all__ = [\"Catch22\"]\n\nimport math\n\nimport numpy as np\nimport pandas as pd\nfrom joblib import Parallel, delayed\nfrom numba import njit\nfrom numba.typed import List\n\nfrom sktime.transformations.base import _PanelToTabularTransformer\nfrom sktime.datatypes._panel._convert import from_nested_to_2d_array\nfrom sktime.utils.validation.panel import check_X\n\n\nclass Catch22(_PanelToTabularTransformer):\n \"\"\"Canonical Time-series Characteristics (catch22)\n\n @article{lubba2019catch22,\n title={catch22: CAnonical Time-series CHaracteristics},\n author={Lubba, Carl H and Sethi, Sarab S and Knaute, Philip and\n Schultz, Simon R and Fulcher, Ben D and Jones, Nick S},\n journal={Data Mining and Knowledge Discovery},\n volume={33},\n number={6},\n pages={1821--1852},\n year={2019},\n publisher={Springer}\n }\n\n Overview: Input n series with d dimensions of length m\n Transforms series into the 22 catch22 features extracted from the hctsa\n toolbox.\n\n Fulcher, B. D., & Jones, N. S. (2017). hctsa: A computational framework\n for automated time-series phenotyping using massive feature extraction.\n Cell systems, 5(5), 527-531.\n\n Fulcher, B. D., Little, M. A., & Jones, N. S. (2013). Highly comparative\n time-series analysis: the empirical structure of time series and their\n methods. Journal of the Royal Society Interface, 10(83), 20130048.\n\n Original catch22 package implementations:\n https://github.com/chlubba/catch22\n\n For the Java version, see\n https://github.com/uea-machine-learning/tsml/blob/master/src/main/java\n /tsml/transformers/Catch22.java\n\n \"\"\"\n\n def __init__(\n self,\n outlier_norm=False,\n n_jobs=1,\n ):\n self.outlier_norm = outlier_norm\n\n self.n_jobs = n_jobs\n\n super(Catch22, self).__init__()\n\n def transform(self, X, y=None):\n \"\"\"transforms data into the catch22 features\n\n Parameters\n ----------\n X : pandas DataFrame or 3d numpy array, input time series\n y : array_like, target values (optional, ignored)\n\n Returns\n -------\n Pandas dataframe containing 22 features for each input series\n \"\"\"\n self.check_is_fitted()\n X = check_X(X, enforce_univariate=False, coerce_to_numpy=True)\n n_instances = X.shape[0]\n X = np.reshape(X, (n_instances, -1))\n\n c22_list = Parallel(n_jobs=self.n_jobs)(\n delayed(self._transform_case)(\n X[i],\n )\n for i in range(n_instances)\n )\n\n return pd.DataFrame(c22_list)\n\n def _transform_case(self, series):\n outlier_series = series\n if self.outlier_norm:\n std = np.std(outlier_series)\n if std > 0:\n outlier_series = (outlier_series - np.mean(outlier_series)) / std\n\n smin = np.min(series)\n smax = np.max(series)\n smean = np.mean(series)\n\n nfft = int(np.power(2, np.ceil(np.log(len(series)) / np.log(2))))\n fft = np.fft.fft(series - smean, n=nfft)\n ac = _autocorr(series, fft)\n acfz = _ac_first_zero(ac)\n\n c22 = np.zeros(22)\n c22[0] = Catch22.DN_HistogramMode_5(series, smin, smax)\n c22[1] = Catch22.DN_HistogramMode_10(series, smin, smax)\n c22[2] = Catch22.SB_BinaryStats_diff_longstretch0(series, smean)\n c22[3] = Catch22.DN_OutlierInclude_p_001_mdrmd(outlier_series)\n c22[4] = Catch22.DN_OutlierInclude_n_001_mdrmd(outlier_series)\n c22[5] = Catch22.CO_f1ecac(ac)\n c22[6] = Catch22.CO_FirstMin_ac(ac)\n c22[7] = Catch22.SP_Summaries_welch_rect_area_5_1(series, fft)\n c22[8] = Catch22.SP_Summaries_welch_rect_centroid(series, fft)\n c22[9] = Catch22.FC_LocalSimple_mean3_stderr(series)\n c22[10] = Catch22.CO_trev_1_num(series)\n c22[11] = Catch22.CO_HistogramAMI_even_2_5(series, smin, smax)\n c22[12] = Catch22.IN_AutoMutualInfoStats_40_gaussian_fmmi(ac)\n c22[13] = Catch22.MD_hrv_classic_pnn40(series)\n c22[14] = Catch22.SB_BinaryStats_mean_longstretch1(series)\n c22[15] = Catch22.SB_MotifThree_quantile_hh(series)\n c22[16] = Catch22.FC_LocalSimple_mean1_tauresrat(series, acfz)\n c22[17] = Catch22.CO_Embed2_Dist_tau_d_expfit_meandiff(series, acfz)\n c22[18] = Catch22.SC_FluctAnal_2_dfa_50_1_2_logi_prop_r1(series)\n c22[19] = Catch22.SC_FluctAnal_2_rsrangefit_50_1_logi_prop_r1(series)\n c22[20] = Catch22.SB_TransitionMatrix_3ac_sumdiagcov(series, acfz)\n c22[21] = Catch22.PD_PeriodicityWang_th0_01(series)\n\n return c22\n\n def _transform_single_feature(self, X, feature):\n \"\"\"transforms data into a specified catch22 feature\n\n Parameters\n ----------\n X : pandas DataFrame, input time series\n feature : int, catch22 feature id or String, catch22 feature\n name.\n\n Returns\n -------\n Numpy array containing a catch22 feature for each input series\n \"\"\"\n if isinstance(feature, (int, np.integer)) or isinstance(\n feature, (float, np.float)\n ):\n if feature > 21 or feature < 0:\n raise ValueError(\"Invalid catch22 feature ID\")\n elif isinstance(feature, str):\n if feature in feature_names:\n feature = feature_names.index(feature)\n else:\n raise ValueError(\"Invalid catch22 feature name\")\n else:\n raise ValueError(\"catch22 feature name or ID required\")\n\n if isinstance(X, pd.DataFrame):\n X = from_nested_to_2d_array(X, return_numpy=True)\n\n n_instances = X.shape[0]\n X = np.reshape(X, (n_instances, -1))\n\n c22_list = Parallel(n_jobs=self.n_jobs)(\n delayed(self._transform_case_single)(\n X[i],\n feature,\n )\n for i in range(n_instances)\n )\n\n return np.asarray(c22_list)\n\n def _transform_case_single(self, series, feature):\n args = [series]\n\n if feature == 0 or feature == 1 or feature == 11:\n smin = np.min(series)\n smax = np.max(series)\n args = [series, smin, smax]\n elif feature == 2:\n smean = np.mean(series)\n args = [series, smean]\n elif feature == 3 or feature == 4:\n if self.outlier_norm:\n std = np.std(series)\n if std > 0:\n series = (series - np.mean(series)) / std\n args = [series]\n elif feature == 7 or feature == 8:\n smean = np.mean(series)\n nfft = int(np.power(2, np.ceil(np.log(len(series)) / np.log(2))))\n fft = np.fft.fft(series - smean, n=nfft)\n args = [series, fft]\n elif feature == 5 or feature == 6 or feature == 12:\n smean = np.mean(series)\n nfft = int(np.power(2, np.ceil(np.log(len(series)) / np.log(2))))\n fft = np.fft.fft(series - smean, n=nfft)\n ac = _autocorr(series, fft)\n args = [ac]\n elif feature == 16 or feature == 17 or feature == 20:\n smean = np.mean(series)\n nfft = int(np.power(2, np.ceil(np.log(len(series)) / np.log(2))))\n fft = np.fft.fft(series - smean, n=nfft)\n ac = _autocorr(series, fft)\n acfz = _ac_first_zero(ac)\n args = [series, acfz]\n\n return features[feature](*args)\n\n @staticmethod\n def DN_HistogramMode_5(X, smin, smax):\n # Mode of z-scored distribution (5-bin histogram)\n return _histogram_mode(X, 5, smin, smax)\n\n @staticmethod\n def DN_HistogramMode_10(X, smin, smax):\n # Mode of z-scored distribution (10-bin histogram)\n return _histogram_mode(X, 10, smin, smax)\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def SB_BinaryStats_diff_longstretch0(X, smean):\n # Longest period of consecutive values above the mean\n mean_binary = np.zeros(len(X))\n for i in range(len(X)):\n if X[i] - smean > 0:\n mean_binary[i] = 1\n\n return _long_stretch(mean_binary, 1)\n\n @staticmethod\n def DN_OutlierInclude_p_001_mdrmd(X):\n # Time intervals between successive extreme events above the mean\n return _outlier_include(X)\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def DN_OutlierInclude_n_001_mdrmd(X):\n # Time intervals between successive extreme events below the mean\n return _outlier_include(-X)\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def CO_f1ecac(X_ac):\n # First 1/e crossing of autocorrelation function\n threshold = 0.36787944117144233 # 1 / np.exp(1)\n for i in range(1, len(X_ac)):\n if (X_ac[i - 1] - threshold) * (X_ac[i] - threshold) < 0:\n return i\n return len(X_ac)\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def CO_FirstMin_ac(X_ac):\n # First minimum of autocorrelation function\n for i in range(1, len(X_ac) - 1):\n if X_ac[i] < X_ac[i - 1] and X_ac[i] < X_ac[i + 1]:\n return i\n return len(X_ac)\n\n @staticmethod\n def SP_Summaries_welch_rect_area_5_1(X, X_fft):\n # Total power in lowest fifth of frequencies in the Fourier power spectrum\n return _summaries_welch_rect(X, False, X_fft)\n\n @staticmethod\n def SP_Summaries_welch_rect_centroid(X, X_fft):\n # Centroid of the Fourier power spectrum\n return _summaries_welch_rect(X, True, X_fft)\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def FC_LocalSimple_mean3_stderr(X):\n # Mean error from a rolling 3-sample mean forecasting\n if len(X) - 3 < 3:\n return 0\n res = _local_simple_mean(X, 3)\n return np.std(res)\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def CO_trev_1_num(X):\n # Time-reversibility statistic, ((x_t+1 − x_t)^3)_t\n y = np.zeros(len(X) - 1)\n for i in range(len(y)):\n y[i] = np.power(X[i + 1] - X[i], 3)\n return np.mean(y)\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def CO_HistogramAMI_even_2_5(X, smin, smax):\n # Automutual information, m = 2, τ = 5\n new_min = smin - 0.1\n new_max = smax + 0.1\n bin_width = (new_max - new_min) / 5\n\n histogram = np.zeros((5, 5))\n sumx = np.zeros(5)\n sumy = np.zeros(5)\n v = 1.0 / (len(X) - 2)\n for i in range(len(X) - 2):\n idx1 = int((X[i] - new_min) / bin_width)\n idx2 = int((X[i + 2] - new_min) / bin_width)\n\n histogram[idx1][idx2] += v\n sumx[idx1] += v\n sumy[idx2] += v\n\n nsum = 0\n for i in range(5):\n for n in range(5):\n if histogram[i][n] > 0:\n nsum += histogram[i][n] * np.log(\n histogram[i][n] / sumx[i] / sumy[n]\n )\n\n return nsum\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def IN_AutoMutualInfoStats_40_gaussian_fmmi(X_ac):\n # First minimum of the automutual information function\n tau = int(min(40, np.ceil(len(X_ac) / 2)))\n\n diffs = np.zeros(tau - 1)\n prev = -0.5 * np.log(1 - np.power(X_ac[1], 2))\n for i in range(len(diffs)):\n corr = -0.5 * np.log(1 - np.power(X_ac[i + 2], 2))\n diffs[i] = corr - prev\n prev = corr\n\n for i in range(len(diffs) - 1):\n if diffs[i] * diffs[i + 1] < 0 and diffs[i] < 0:\n return i + 1\n\n return tau\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def MD_hrv_classic_pnn40(X):\n # Proportion of successive differences exceeding 0.04σ (Mietus 2002)\n diffs = np.zeros(len(X) - 1)\n for i in range(len(diffs)):\n diffs[i] = np.abs(X[i + 1] - X[i]) * 1000\n\n nsum = 0\n for diff in diffs:\n if diff > 40:\n nsum += 1\n\n return nsum / len(diffs)\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def SB_BinaryStats_mean_longstretch1(X):\n # Longest period of successive incremental decreases\n diff_binary = np.zeros(len(X) - 1)\n for i in range(len(diff_binary)):\n if X[i + 1] - X[i] >= 0:\n diff_binary[i] = 1\n\n return _long_stretch(diff_binary, 0)\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def SB_MotifThree_quantile_hh(X):\n # Shannon entropy of two successive letters in equiprobable 3-letter\n # symbolization\n indicies = np.argsort(X)\n p = List()\n bins = np.zeros(len(X))\n q1 = int(len(X) / 3)\n q2 = q1 * 2\n l1 = List()\n for i in range(q1 + 1):\n l1.append(indicies[i])\n p.append(l1)\n l2 = List()\n for i in range(q1 + 1, q2 + 1):\n bins[indicies[i]] = 1\n l2.append(indicies[i])\n p.append(l2)\n l3 = List()\n for i in range(q2 + 1, len(indicies)):\n bins[indicies[i]] = 2\n l3.append(indicies[i])\n p.append(l3)\n\n nsum = 0\n for i in range(3):\n o = p[i]\n if len(X) - 1 in o:\n o.remove(len(X) - 1)\n\n for n in range(3):\n nsum2 = 0\n\n for v in o:\n if bins[v + 1] == n:\n nsum2 += 1\n\n if nsum2 > 0:\n nsum2 /= len(X) - 1\n nsum += nsum2 * np.log(nsum2)\n\n return -nsum\n\n @staticmethod\n def FC_LocalSimple_mean1_tauresrat(X, acfz):\n # Change in correlation length after iterative differencing\n if len(X) < 2:\n return 0\n res = _local_simple_mean(X, 1)\n mean = np.mean(res)\n\n nfft = int(np.power(2, np.ceil(np.log(len(res)) / np.log(2))))\n fft = np.fft.fft(res - mean, n=nfft)\n ac = _autocorr(res, fft)\n\n return _ac_first_zero(ac) / acfz\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def CO_Embed2_Dist_tau_d_expfit_meandiff(X, acfz):\n # Exponential fit to successive distances in 2-d embedding space\n tau = acfz\n if tau > len(X) / 10:\n tau = int(len(X) / 10)\n\n d = np.zeros(len(X) - tau - 1)\n d_mean = 0\n for i in range(len(d)):\n n = np.sqrt(\n np.power(X[i + 1] - X[i], 2) + np.power(X[i + tau + 1] - X[i + tau], 2)\n )\n d[i] = n\n d_mean += n\n d_mean /= len(X) - tau - 1\n\n smin = np.min(d)\n smax = np.max(d)\n srange = smax - smin\n std = np.std(d)\n\n if std == 0:\n return np.nan\n\n num_bins = int(\n np.ceil(srange / (3.5 * np.std(d) / np.power(len(d), 0.3333333333333333)))\n )\n\n if num_bins == 0:\n return np.nan\n bin_width = srange / num_bins\n\n histogram = np.zeros(num_bins)\n for val in d:\n idx = int((val - smin) / bin_width)\n if idx >= num_bins:\n idx = num_bins - 1\n histogram[idx] += 1\n\n sum = 0\n for i in range(num_bins):\n center = ((smin + bin_width * i) * 2 + bin_width) / 2\n n = np.exp(-center / d_mean) / d_mean\n if n < 0:\n n = 0\n\n sum += np.abs(histogram[i] / len(d) - n)\n\n return sum / num_bins\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def SC_FluctAnal_2_dfa_50_1_2_logi_prop_r1(X):\n # Proportion of slower timescale fluctuations that scale with DFA (50%\n # sampling)\n cs = np.zeros(int(len(X) / 2))\n cs[0] = X[0]\n for i in range(1, len(cs)):\n cs[i] = cs[i - 1] + X[i * 2]\n\n return _fluct_prop(cs, len(X), True)\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def SC_FluctAnal_2_rsrangefit_50_1_logi_prop_r1(X):\n # Proportion of slower timescale fluctuations that scale with linearly rescaled\n # range fits\n cs = np.zeros(len(X))\n cs[0] = X[0]\n for i in range(1, len(X)):\n cs[i] = cs[i - 1] + X[i]\n\n return _fluct_prop(cs, len(X), False)\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def SB_TransitionMatrix_3ac_sumdiagcov(X, acfz):\n # Trace of covariance of transition matrix between symbols in 3-letter alphabet\n ds = np.zeros(int((len(X) - 1) / acfz + 1))\n for i in range(len(ds)):\n ds[i] = X[i * acfz]\n indicies = np.argsort(ds)\n\n bins = np.zeros(len(ds), dtype=np.int32)\n q1 = int(len(ds) / 3)\n q2 = q1 * 2\n for i in range(q1 + 1, q2 + 1):\n bins[indicies[i]] = 1\n for i in range(q2 + 1, len(indicies)):\n bins[indicies[i]] = 2\n\n t = np.zeros((3, 3))\n for i in range(len(ds) - 1):\n t[bins[i + 1]][bins[i]] += 1\n t /= len(ds) - 1\n\n means = np.zeros(3)\n for i in range(3):\n means[i] = np.mean(t[i])\n\n cov = np.zeros((3, 3))\n for i in range(3):\n for n in range(3):\n covariance = 0\n for j in range(3):\n covariance += (t[i][j] - means[i]) * (t[n][j] - means[n])\n covariance /= 2\n\n cov[i][n] = covariance\n cov[n][i] = covariance\n\n ssum = 0\n for i in range(3):\n ssum += cov[i][i]\n\n return ssum\n\n @staticmethod\n @njit(fastmath=True, cache=True)\n def PD_PeriodicityWang_th0_01(X):\n # Periodicity measure of (Wang et al. 2007)\n y_spline = _spline_fit(X)\n\n y_sub = np.zeros(len(X))\n for i in range(len(X)):\n y_sub[i] = X[i] - y_spline[i]\n\n acmax = int(np.ceil(len(X) / 3.0))\n acf = np.zeros(acmax)\n for tau in range(1, acmax + 1):\n covariance = 0\n for i in range(len(X) - tau):\n covariance += y_sub[i] * y_sub[i + tau]\n acf[tau - 1] = covariance / (len(X) - tau)\n\n troughs = np.zeros(acmax, dtype=np.int32)\n peaks = np.zeros(acmax, dtype=np.int32)\n n_troughs = 0\n n_peaks = 0\n for i in range(1, acmax - 1):\n slope_in = acf[i] - acf[i - 1]\n slope_out = acf[i + 1] - acf[i]\n\n if slope_in < 0 and slope_out > 0:\n troughs[n_troughs] = i\n n_troughs += 1\n elif slope_in > 0 and slope_out < 0:\n peaks[n_peaks] = i\n n_peaks += 1\n\n out = 0\n for i in range(n_peaks):\n j = -1\n while troughs[j + 1] < peaks[i] and j + 1 < n_troughs:\n j += 1\n\n if j == -1 or acf[peaks[i]] - acf[troughs[j]] < 0.01 or acf[peaks[i]] < 0:\n continue\n\n out = peaks[i]\n break\n\n return out\n\n\n@njit(fastmath=True, cache=True)\ndef _histogram_mode(X, num_bins, smin, smax):\n bin_width = (smax - smin) / num_bins\n\n if bin_width == 0:\n return np.nan\n\n histogram = np.zeros(num_bins)\n for val in X:\n idx = int((val - smin) / bin_width)\n idx = num_bins - 1 if idx >= num_bins else idx\n histogram[idx] += 1\n\n edges = np.zeros(num_bins + 1, dtype=np.float32)\n for i in range(len(edges)):\n edges[i] = i * bin_width + smin\n\n max_count = 0\n num_maxs = 1\n max_sum = 0\n for i in range(num_bins):\n v = (edges[i] + edges[i + 1]) / 2\n if histogram[i] > max_count:\n max_count = histogram[i]\n num_maxs = 1\n max_sum = v\n elif histogram[i] == max_count:\n num_maxs += 1\n max_sum += v\n\n return max_sum / num_maxs\n\n\n@njit(fastmath=True, cache=True)\ndef _long_stretch(X_binary, val):\n last_val = 0\n max_stretch = 0\n for i in range(len(X_binary)):\n if X_binary[i] != val or i == len(X_binary) - 1:\n stretch = i - last_val\n if stretch > max_stretch:\n max_stretch = stretch\n last_val = i\n\n return max_stretch\n\n\n@njit(fastmath=True, cache=True)\ndef _outlier_include(X):\n total = 0\n threshold = 0\n for v in X:\n if v >= 0:\n total += 1\n if v > threshold:\n threshold = v\n\n if threshold < 0.01:\n return 0\n\n num_thresholds = int(threshold / 0.01) + 1\n means = np.zeros(num_thresholds)\n dists = np.zeros(num_thresholds)\n medians = np.zeros(num_thresholds)\n for i in range(num_thresholds):\n d = i * 0.01\n\n r = List()\n for n in range(len(X)):\n if X[n] >= d:\n r.append(n + 1)\n\n if len(r) == 0:\n continue\n\n diff = np.zeros(len(r) - 1)\n for n in range(len(diff)):\n diff[n] = r[n + 1] - r[n]\n\n means[i] = np.mean(diff) if len(diff) > 0 else 9999999999\n dists[i] = len(diff) * 100 / total\n medians[i] = _typed_list_median(r) / (len(X) / 2) - 1\n\n mj = 0\n fbi = num_thresholds - 1\n for i in range(num_thresholds):\n if dists[i] > 2:\n mj = i\n if means[i] == 9999999999:\n fbi = num_thresholds - 1 - i\n\n trim_limit = max(mj, fbi)\n\n return np.median(medians[: trim_limit + 1])\n\n\n@njit(fastmath=True, cache=True)\ndef _typed_list_median(X):\n X.sort()\n if len(X) % 2 == 1:\n return X[int(len(X) / 2)]\n else:\n return (X[int(len(X) / 2) - 1] + X[int(len(X) / 2)]) / 2\n\n\ndef _autocorr(X, X_fft):\n ca = np.fft.ifft(_multiply_complex_arr(X_fft))\n return _get_acf(X, ca)\n\n\n@njit(fastmath=True, cache=True)\ndef _multiply_complex_arr(X_fft):\n c = np.zeros(len(X_fft), dtype=np.complex128)\n for i, n in enumerate(X_fft):\n c[i] = n * (n.real + 1j * -n.imag)\n return c\n\n\n@njit(fastmath=True, cache=True)\ndef _get_acf(X, ca):\n acf = np.zeros(len(X))\n if ca[0].real != 0:\n for i in range(len(X)):\n acf[i] = ca[i].real / ca[0].real\n return acf\n\n\n@njit(fastmath=True, cache=True)\ndef _summaries_welch_rect(X, centroid, X_fft):\n new_length = int(len(X_fft) / 2) + 1\n p = np.zeros(new_length)\n pi2 = 2 * math.pi\n p[0] = (np.power(_complex_magnitude(X_fft[0]), 2) / len(X)) / pi2\n for i in range(1, new_length - 1):\n p[i] = ((np.power(_complex_magnitude(X_fft[i]), 2) / len(X)) * 2) / pi2\n p[new_length - 1] = (\n np.power(_complex_magnitude(X_fft[new_length - 1]), 2) / len(X)\n ) / pi2\n\n w = np.zeros(new_length)\n a = 1.0 / len(X_fft)\n for i in range(0, new_length):\n w[i] = i * a * math.pi * 2\n\n if centroid:\n cs = np.zeros(new_length)\n cs[0] = p[0]\n for i in range(1, new_length):\n cs[i] = cs[i - 1] + p[i]\n\n threshold = cs[new_length - 1] / 2\n for i in range(1, new_length):\n if cs[i] > threshold:\n return w[i]\n return np.nan\n else:\n tau = np.floor(new_length / 5)\n nsum = 0\n for i in range(tau):\n nsum += p[i]\n\n return nsum * (w[1] - w[0])\n\n\n@njit(fastmath=True, cache=True)\ndef _complex_magnitude(c):\n return np.sqrt(c.real * c.real + c.imag * c.imag)\n\n\n@njit(fastmath=True, cache=True)\ndef _local_simple_mean(X, train_length):\n res = np.zeros(len(X) - train_length)\n for i in range(len(res)):\n nsum = 0\n for n in range(train_length):\n nsum += X[i + n]\n res[i] = X[i + train_length] - nsum / train_length\n return res\n\n\n@njit(fastmath=True, cache=True)\ndef _ac_first_zero(X_ac):\n for i in range(1, len(X_ac)):\n if X_ac[i] <= 0:\n return i\n\n return len(X_ac)\n\n\n@njit(fastmath=True, cache=True)\ndef _fluct_prop(X, og_length, dfa):\n a = List()\n a.append(5)\n smin = 1.6094379124341003 # Math.log(5);\n smax = np.log(og_length / 2)\n inc = (smax - smin) / 49\n for i in range(1, 50):\n val = int(np.round(np.exp(smin + inc * i) + 0.000000000001))\n if val != a[len(a) - 1]:\n a.append(val)\n n_tau = len(a)\n\n if n_tau < 12:\n return np.nan\n\n f = np.zeros(n_tau)\n for i in range(n_tau):\n tau = a[i]\n buff_size = int(len(X) / tau)\n lag = 0\n if buff_size == 0:\n buff_size = 1\n lag = 1\n\n buffer = np.zeros((buff_size, tau))\n count = 0\n for n in range(buff_size):\n for j in range(tau - lag):\n buffer[n][j] = X[count]\n count += 1\n\n d = np.zeros(tau)\n for n in range(tau):\n d[n] = n + 1\n\n for n in range(buff_size):\n c1, c2 = _linear_regression(d, buffer[n], tau, 0)\n\n for j in range(tau):\n buffer[n][j] = buffer[n][j] - (c1 * (j + 1) + c2)\n\n if dfa:\n for j in range(tau):\n f[i] += buffer[n][j] * buffer[n][j]\n else:\n f[i] += np.power(np.max(buffer[n]) - np.min(buffer[n]), 2)\n\n if dfa:\n f[i] = np.sqrt(f[i] / (buff_size * tau))\n else:\n f[i] = np.sqrt(f[i] / buff_size)\n\n log_a = np.zeros(n_tau)\n log_f = np.zeros(n_tau)\n for i in range(n_tau):\n log_a[i] = np.log(a[i])\n log_f[i] = np.log(f[i])\n\n sserr = np.zeros(n_tau - 11)\n for i in range(6, n_tau - 5):\n c1_1, c1_2 = _linear_regression(log_a, log_f, i, 0)\n c2_1, c2_2 = _linear_regression(log_a, log_f, n_tau - i + 1, i - 1)\n\n sum1 = 0\n for n in range(i):\n sum1 += np.power(log_a[n] * c1_1 + c1_2 - log_f[n], 2)\n sserr[i - 6] += np.sqrt(sum1)\n\n sum2 = 0\n for n in range(n_tau - i + 1):\n sum2 += np.power(log_a[n + i - 1] * c2_1 + c2_2 - log_f[n + i - 1], 2)\n sserr[i - 6] += np.sqrt(sum2)\n\n return (np.argmin(sserr) + 6) / n_tau\n\n\n@njit(fastmath=True, cache=True)\ndef _linear_regression(X, y, n, lag):\n sumx = 0\n sumx2 = 0\n sumxy = 0\n sumy = 0\n for i in range(lag, n + lag):\n sumx += X[i]\n sumx2 += X[i] * X[i]\n sumxy += X[i] * y[i]\n sumy += y[i]\n\n denom = n * sumx2 - sumx * sumx\n if denom == 0:\n return 0, 0\n\n return (n * sumxy - sumx * sumy) / denom, (sumy * sumx2 - sumx * sumxy) / denom\n\n\n@njit(fastmath=True, cache=True)\ndef _spline_fit(X):\n breaks = np.array([0, len(X) / 2 - 1, len(X) - 1])\n h0 = np.array([breaks[1] - breaks[0], breaks[2] - breaks[1]])\n h_copy = np.array([h0[0], h0[1], h0[0], h0[1]])\n hl = np.array([h_copy[3], h_copy[2], h_copy[1]])\n hr = np.array([h_copy[0], h_copy[1], h_copy[2]])\n\n hlCS = np.zeros(3)\n hlCS[0] = hl[0]\n for i in range(1, 3):\n hlCS[i] = hlCS[i - 1] + hl[i]\n\n bl = np.zeros(3)\n for i in range(3):\n bl[i] = breaks[0] - hlCS[i]\n\n hrCS = np.zeros(3)\n hrCS[0] = hr[0]\n for i in range(1, 3):\n hrCS[i] = hrCS[i - 1] + hr[i]\n\n br = np.zeros(3)\n for i in range(3):\n br[i] = breaks[2] - hrCS[i]\n\n breaksExt = np.zeros(9)\n for i in range(3):\n breaksExt[i] = bl[2 - i]\n breaksExt[i + 3] = breaks[i]\n breaksExt[i + 6] = br[i]\n\n hExt = np.zeros(8)\n for i in range(8):\n hExt[i] = breaksExt[i + 1] - breaksExt[i]\n\n coeffs = np.zeros((32, 4))\n for i in range(0, 32, 4):\n coeffs[i][0] = 1\n\n ii = np.zeros((4, 8), dtype=np.int32)\n for i in range(8):\n ii[0][i] = i\n ii[1][i] = min(1 + i, 7)\n ii[2][i] = min(2 + i, 7)\n ii[3][i] = min(3 + i, 7)\n\n H = np.zeros(32)\n for i in range(32):\n H[i] = hExt[ii[i % 4][int(i / 4)]]\n\n for k in range(1, 4):\n for j in range(k):\n for u in range(32):\n coeffs[u][j] *= H[u] / (k - j)\n\n Q = np.zeros((4, 8))\n for u in range(32):\n for m in range(4):\n Q[u % 4][int(u / 4)] += coeffs[u][m]\n\n for u in range(8):\n for m in range(1, 4):\n Q[m][u] += Q[m - 1][u]\n\n for u in range(32):\n if u % 4 > 0:\n coeffs[u][k] = Q[u % 4 - 1][int(u / 4)]\n\n fmax = np.zeros(32)\n for i in range(8):\n for j in range(4):\n fmax[i * 4 + j] = Q[3][i]\n\n for j in range(k + 1):\n for u in range(32):\n coeffs[u][j] /= fmax[u]\n\n for i in range(29):\n for j in range(k + 1):\n coeffs[i][j] -= coeffs[3 + i][j]\n\n for i in range(0, 32, 4):\n coeffs[i][k] = 0\n\n scale = np.ones(32)\n for k in range(3):\n for i in range(32):\n scale[i] /= H[i]\n\n for i in range(32):\n coeffs[i][3 - (k + 1)] *= scale[i]\n\n jj = np.zeros((4, 2), dtype=np.int32)\n for i in range(4):\n for j in range(2):\n if i == 0:\n jj[i][j] = 4 * (1 + j)\n else:\n jj[i][j] = 3\n\n for i in range(1, 4):\n for j in range(2):\n jj[i][j] += jj[i - 1][j]\n\n coeffs_out = np.zeros((8, 4))\n for i in range(8):\n coeffs_out[i] = coeffs[jj[i % 4][int(i / 4)] - 1]\n\n xsB = np.zeros(len(X) * 4)\n indexB = np.zeros(len(xsB), dtype=np.int32)\n breakInd = 1\n for i in range(len(X)):\n if i >= breaks[1] and breakInd < 2:\n breakInd += 1\n\n for j in range(4):\n xsB[i * 4 + j] = i - breaks[breakInd - 1]\n indexB[i * 4 + j] = j + (breakInd - 1) * 4\n\n vB = np.zeros(len(xsB))\n for i in range(len(xsB)):\n vB[i] = coeffs_out[indexB[i]][0]\n\n for i in range(1, 4):\n for j in range(len(xsB)):\n vB[j] = vB[j] * xsB[j] + coeffs_out[indexB[j]][i]\n\n A = np.zeros(len(X) * 5)\n breakInd = 0\n for i in range(len(xsB)):\n if i / 4 >= breaks[1]:\n breakInd = 1\n A[i % 4 + breakInd + int(i / 4) * 5] = vB[i]\n\n AT = np.zeros(len(A))\n ATA = np.zeros(25)\n ATb = np.zeros(5)\n for i in range(len(X)):\n for j in range(5):\n AT[j * len(X) + i] = A[i * 5 + j]\n\n for i in range(5):\n for j in range(5):\n for k in range(len(X)):\n ATA[i * 5 + j] += AT[i * len(X) + k] * A[k * 5 + j]\n\n for i in range(5):\n for k in range(len(X)):\n ATb[i] += AT[i * len(X) + k] * X[k]\n\n AElim = np.zeros((5, 5))\n for i in range(5):\n n = i * 5\n AElim[i] = ATA[n : n + 5]\n\n for i in range(5):\n for j in range(i + 1, 5):\n factor = AElim[j][i] / AElim[i][i]\n ATb[j] = ATb[j] - factor * ATb[i]\n\n for k in range(i, 5):\n AElim[j][k] = AElim[j][k] - factor * AElim[i][k]\n\n x = np.zeros(5)\n for i in range(4, -1, -1):\n bMinusATemp = ATb[i]\n for j in range(i + 1, 5):\n bMinusATemp -= x[j] * AElim[i][j]\n\n x[i] = bMinusATemp / AElim[i][i]\n\n C = np.zeros((5, 8))\n for i in range(32):\n C[int(i % 4 + int(i / 4) % 2)][int(i / 4)] = coeffs_out[i % 8][int(i / 8)]\n\n coeffs_spline = np.zeros((2, 4))\n for j in range(8):\n coefc = int(j / 2)\n coefr = j % 2\n for i in range(5):\n coeffs_spline[coefr][coefc] += C[i][j] * x[i]\n\n y_out = np.zeros(len(X))\n for i in range(len(X)):\n second_half = 0 if i < breaks[1] else 1\n y_out[i] = coeffs_spline[second_half][0]\n\n for i in range(1, 4):\n for j in range(len(X)):\n second_half = 0 if j < breaks[1] else 1\n y_out[j] = (\n y_out[j] * (j - breaks[1] * second_half) + coeffs_spline[second_half][i]\n )\n\n return y_out\n\n\nfeature_names = [\n \"DN_HistogramMode_5\",\n \"DN_HistogramMode_10\",\n \"SB_BinaryStats_diff_longstretch0\",\n \"DN_OutlierInclude_p_001_mdrmd\",\n \"DN_OutlierInclude_n_001_mdrmd\",\n \"CO_f1ecac\",\n \"CO_FirstMin_ac\",\n \"SP_Summaries_welch_rect_area_5_1\",\n \"SP_Summaries_welch_rect_centroid\",\n \"FC_LocalSimple_mean3_stderr\",\n \"CO_trev_1_num\",\n \"CO_HistogramAMI_even_2_5\",\n \"IN_AutoMutualInfoStats_40_gaussian_fmmi\",\n \"MD_hrv_classic_pnn40\",\n \"SB_BinaryStats_mean_longstretch1\",\n \"SB_MotifThree_quantile_hh\",\n \"FC_LocalSimple_mean1_tauresrat\",\n \"CO_Embed2_Dist_tau_d_expfit_meandiff\",\n \"SC_FluctAnal_2_dfa_50_1_2_logi_prop_r1\",\n \"SC_FluctAnal_2_rsrangefit_50_1_logi_prop_r1\",\n \"SB_TransitionMatrix_3ac_sumdiagcov\",\n \"PD_PeriodicityWang_th0_01\",\n]\n\nfeatures = [\n Catch22.DN_HistogramMode_5,\n Catch22.DN_HistogramMode_10,\n Catch22.SB_BinaryStats_diff_longstretch0,\n Catch22.DN_OutlierInclude_p_001_mdrmd,\n Catch22.DN_OutlierInclude_n_001_mdrmd,\n Catch22.CO_f1ecac,\n Catch22.CO_FirstMin_ac,\n Catch22.SP_Summaries_welch_rect_area_5_1,\n Catch22.SP_Summaries_welch_rect_centroid,\n Catch22.FC_LocalSimple_mean3_stderr,\n Catch22.CO_trev_1_num,\n Catch22.CO_HistogramAMI_even_2_5,\n Catch22.IN_AutoMutualInfoStats_40_gaussian_fmmi,\n Catch22.MD_hrv_classic_pnn40,\n Catch22.SB_BinaryStats_mean_longstretch1,\n Catch22.SB_MotifThree_quantile_hh,\n Catch22.FC_LocalSimple_mean1_tauresrat,\n Catch22.CO_Embed2_Dist_tau_d_expfit_meandiff,\n Catch22.SC_FluctAnal_2_dfa_50_1_2_logi_prop_r1,\n Catch22.SC_FluctAnal_2_rsrangefit_50_1_logi_prop_r1,\n Catch22.SB_TransitionMatrix_3ac_sumdiagcov,\n Catch22.PD_PeriodicityWang_th0_01,\n]\n","sub_path":"sktime/transformations/panel/catch22.py","file_name":"catch22.py","file_ext":"py","file_size_in_byte":33479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"314083753","text":"'''\nProgram that takes a list of words and goes through different functions to try\nand spellcheck those words. Then it brings back the top 3 words based on there \nfrequency based on their use.\nOhad Nir\nHomework 7 part 1\nversion: 1.0\n'''\ndef list_of_words(file):\n '''\n Function that takes a word from file and adds it to a list by taking it from\n a file and adding it to a blank list. Then returns that list.\n '''\n word_list = []\n \n for line in open(file):\n word_list.append(line.replace('\\n', ''))\n return word_list\n\ndef dict_of_dict(file):\n '''\n Function that takes a word from file and adds it to a dictionary by taking it from\n a file and adding it to a blank set. Then returns that set.\n '''\n word_dic = dict()\n for line in open(file):\n line = line.replace('\\n', '').split(',')\n \n word_dic[line[0]] = float(line[1])\n return word_dic\n \ndef dict_of_keyboard(file):\n '''\n Function that takes a word from file and adds it to a dictionary by taking it from\n a file and adding it to a blank set. Then returns that set.\n '''\n letter_dic = dict()\n for line in open(file):\n line = line.replace('\\n', '').split(' ')\n letter_dic[line[0]] = []\n for letter in range(1, len(line)):\n letter_dic[line[0]].append(line[letter])\n return letter_dic\n\ndef word_tests(word, diction, key):\n '''\n The main function that takes a word and dictionary as its input and runs \n them through 4 different tests. If a test returns a tuple (Spelling found in\n dictionary, Name of test word was found) then this function will return that\n same tuple. If all the tests fail then it will return \"NO MATCH\".\n '''\n master_list = set()\n \n test_found = found(word, diction)\n if test_found is not None:\n return test_found\n \n test_drop = drop(word, diction)\n if test_drop is not None and test_drop:\n for t in range(len(test_drop)):\n master_list.add(test_drop[t])\n \n test_swap = swap(word, diction)\n if test_swap is not None and test_swap:\n for t in range(len(test_swap)):\n master_list.add(test_swap[t])\n \n test_replace = replace(word, diction, key)\n if test_replace is not None and test_replace:\n for t in range(len(test_replace)):\n master_list.add(test_replace[t])\n \n options = check_potential(master_list, diction)\n if options != []:\n return options\n another_list = []\n another_list.append((word, 'NO MATCH'))\n return another_list\n\ndef check_potential(word_opt, diction):\n '''\n Fucntion that takes as an input a set of possible answers and the dictionary\n then it looks through the possible words and finds there frequency in the \n dictionary and finaly returns the top 3 rated with their frequency.\n '''\n word_opt = list(word_opt)\n frequencey_tup = [(diction[word], word) for word in word_opt]\n frequencey_tup.sort(reverse=True)\n final = [item[1] for item in frequencey_tup]\n return final[0:3]\n \n \n \n\ndef found(word, diction):\n '''\n Fist of 4 test functions. This function checks if word is in the dictionary. \n If it finds it then it will return a tuple of the input word and \"FOUND\".\n '''\n if word in diction:\n List = []\n List.append((word, 'FOUND'))\n return List\n else:\n return None\ndef replace(word, diction, key):\n '''\n function that takes a word, dictionary and a keyboard and checks all the\n possible cominations for that word using the laters that closed to the later\n in the word. Then it compares those words to a dictionary and if its in\n there it returns the word.\n '''\n word_list = list(word)\n words_list = []\n for letter in range(len(word_list)):\n word_list = list(word)\n if word_list[letter] in key.keys():\n for nletter in range(len(key[word_list[letter]])):\n word_list = list(word)\n key_letters = key[word_list[letter]]\n word_list.pop(letter)\n word_list.insert(letter, key_letters[nletter])\n words_list.append(''.join(word_list))\n \n List = []\n for n in words_list:\n if n in diction.keys():\n List.append(n) \n return List\n \ndef drop(word, diction):\n '''\n Second of 4 test fucntions. This function drops one letter of every word and\n adds it to a list. Then converts that list in to a set and tests it with the\n dictionary. If it finds a match function returns a tuple containing the\n correct spelling of the word and \"DROP\".\n '''\n word_list = list(word)\n words_list = []\n for letter in range(len(word_list)):\n word_list = list(word)\n word_list.pop(letter)\n words_list.append(''.join(word_list))\n List = []\n for n in words_list:\n if n in diction.keys():\n List.append(n)\n return List\n \ndef swap(word, diction):\n '''\n Third of 4 test fucntions. This function swaps every naboring word with the \n other and puts it into a list. Then it converts the list into a set and \n tests the words with the dictionary set. If it finds a match it returns a \n tuple of the matched word and \"SWAP\".\n ''' \n word_list = list(word)\n words_list = []\n for letter in range(len(word_list) - 1):\n word_list = list(word)\n letter1 = word_list.pop(letter)\n letter2 = word_list.pop(letter)\n word_list.insert(letter, letter1)\n word_list.insert(letter, letter2)\n words_list.append(''.join(word_list))\n \n List = []\n for n in words_list:\n if n in diction.keys():\n List.append(n)\n return List \n \n#Main body of program\nif __name__ == '__main__': \n dictionary = input('Dictionary file => ')\n print(dictionary)\n words = input('Input file => ')\n print(words)\n keyboard = input('Keyboard file => ')\n print(keyboard)\n #dictionary = 'words_10percent.txt'\n #words = 'input_words.txt'\n #keyboard = 'keyboard.txt'\n \n dic_dic = dict_of_dict(dictionary)\n word_list = list_of_words(words)\n dic_keyword = dict_of_keyboard(keyboard)\n \n for word in word_list:\n word = word.strip()\n spellcheck = word_tests(word, dic_dic, dic_keyword)\n \n if spellcheck[0][1] == 'FOUND':\n print('{:15} -> {:15} :FOUND'.format(word, word))\n \n elif len(spellcheck) == 1 and spellcheck[0][1] == 'NO MATCH':\n print('{:15} -> {:15} :NO MATCH'.format(word, word))\n \n if spellcheck:\n if spellcheck[0][1] != 'NO MATCH' and spellcheck[0][1] != 'FOUND':\n for i in range(len(spellcheck)):\n print('{:15} -> {:15} :MATCH {}'.format(word, spellcheck[i], i+1))","sub_path":"HW7/hw7Part1.py","file_name":"hw7Part1.py","file_ext":"py","file_size_in_byte":6860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"386162791","text":"# Copyright 2013, Kay Hayen, mailto:kay.hayen@gmail.com\n#\n# Part of \"Nuitka\", an optimizing Python compiler that is compatible and\n# integrates with CPython, but also works on its own.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\" Generator for C++ and Python C/API.\n\nThis is the actual C++ code generator. It has methods and should be the only\nplace to know what C++ is like. Ideally it would be possible to replace the\ntarget language by changing this one and the templates, and otherwise nothing\nelse.\n\n\"\"\"\n\nfrom .Identifiers import (\n SpecialConstantIdentifier,\n ModuleVariableIdentifier,\n KeeperAccessIdentifier,\n HelperCallIdentifier,\n EmptyDictIdentifier,\n ThrowingIdentifier,\n CallIdentifier,\n NullIdentifier,\n Identifier\n)\n\nfrom .Indentation import (\n getBlockCode,\n indented\n)\n\nfrom .OrderedEvaluation import (\n getOrderRelevanceEnforcedCallCode,\n getOrderRelevanceEnforcedArgsCode,\n _getAssignmentTempKeeperCode\n)\n\n# imported from here pylint: disable=W0611\nfrom .TupleCodes import (\n getTupleCreationCode,\n getMakeTuplesCode\n)\nfrom .ListCodes import (\n getListCreationCode,\n getMakeListsCode\n)\nfrom .DictCodes import (\n getDictionaryCreationCode,\n getMakeDictsCode\n)\nfrom .SetCodes import getSetCreationCode\n# imported from here pylint: enable=W0611\n\nfrom .ConstantCodes import (\n getConstantsInitCode,\n getConstantsDeclCode,\n getConstantHandle,\n getConstantCode,\n needsPickleInit\n)\n\n# These are here to be imported from here\n# pylint: disable=W0611\nfrom .VariableCodes import (\n getLocalVariableInitCode,\n getVariableHandle,\n getVariableCode\n)\n# pylint: enable=W0611\n\nfrom .ParameterParsing import (\n getDirectFunctionEntryPointIdentifier,\n getParameterEntryPointIdentifier,\n getMethodEntryPointIdentifier,\n getParameterParsingCode,\n)\n\nfrom . import (\n CodeTemplates,\n OperatorCodes,\n CppStrings\n)\n\nfrom nuitka import (\n Variables,\n Constants,\n Builtins,\n Options,\n Utils\n)\n\nfrom ..__past__ import iterItems\n\nimport sys\n\ndef getConstantAccess( context, constant ):\n # Many cases, because for each type, we may copy or optimize by creating\n # empty. pylint: disable=R0911\n\n if type( constant ) is dict:\n if constant:\n return Identifier(\n \"PyDict_Copy( %s )\" % getConstantCode(\n constant = constant,\n context = context\n ),\n 1\n )\n else:\n return EmptyDictIdentifier()\n elif type( constant ) is set:\n if constant:\n return Identifier(\n \"PySet_New( %s )\" % getConstantCode(\n constant = constant,\n context = context\n ),\n 1\n )\n else:\n return Identifier(\n \"PySet_New( NULL )\",\n 1\n )\n elif type( constant ) is list:\n if constant:\n return Identifier(\n \"LIST_COPY( %s )\" % getConstantCode(\n constant = constant,\n context = context\n ),\n 1\n )\n else:\n return Identifier(\n \"PyList_New( 0 )\",\n 1\n )\n else:\n return getConstantHandle(\n context = context,\n constant = constant\n )\n\ndef _defaultToNullIdentifier( identifier ):\n if identifier is not None:\n return identifier\n else:\n return NullIdentifier()\n\ndef _defaultToNoneIdentifier( identifier ):\n if identifier is not None:\n return identifier\n else:\n return SpecialConstantIdentifier( constant_value = None )\n\ndef getReturnCode( identifier, via_exception, context ):\n if via_exception:\n if identifier is None:\n identifier = getConstantHandle(\n context = context,\n constant = None\n )\n\n return \"throw ReturnValueException( %s );\" % (\n identifier.getCodeExportRef()\n )\n else:\n if identifier is not None:\n return \"return %s;\" % identifier.getCodeExportRef()\n else:\n return \"return;\"\n\ndef getYieldCode( identifier, in_handler ):\n if in_handler:\n return Identifier(\n \"YIELD_VALUE_FROM_HANDLER( generator, %s )\" % (\n identifier.getCodeExportRef(),\n ),\n 0\n )\n else:\n return Identifier(\n \"YIELD_VALUE( generator, %s )\" % identifier.getCodeExportRef(),\n 0\n )\n\ndef getMetaclassVariableCode( context ):\n assert Utils.python_version < 300\n\n context.addGlobalVariableNameUsage( \"__metaclass__\" )\n\n package_var_identifier = ModuleVariableIdentifier(\n var_name = \"__metaclass__\",\n module_code_name = context.getModuleCodeName()\n )\n\n return \"( %s.isInitialized( false ) ? %s : NULL )\" % (\n package_var_identifier.getCode(),\n package_var_identifier.getCodeTemporaryRef()\n )\n\ndef getBuiltinImportCode( context, order_relevance, module_identifier,\n globals_dict, locals_dict, import_list, level ):\n assert type( module_identifier ) is not str\n assert type( globals_dict ) is not str\n assert type( locals_dict ) is not str\n\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"IMPORT_MODULE\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"import\",\n order_relevance = order_relevance,\n args = (\n module_identifier,\n globals_dict,\n locals_dict,\n import_list,\n level\n ),\n context = context\n )\n\ndef getImportFromStarCode( context, module_identifier ):\n if not context.hasLocalsDict():\n return \"IMPORT_MODULE_STAR( %s, true, %s );\" % (\n getModuleAccessCode(\n context = context\n ),\n module_identifier.getCodeTemporaryRef()\n )\n else:\n return \"IMPORT_MODULE_STAR( locals_dict.asObject(), false, %s );\" % (\n module_identifier.getCodeTemporaryRef()\n )\n\n\ndef getMaxIndexCode():\n return Identifier( \"PY_SSIZE_T_MAX\", 0 )\n\ndef getMinIndexCode():\n return Identifier( \"0\", 0 )\n\ndef getIndexValueCode( number ):\n return Identifier( \"%s\" % number, 0 )\n\ndef getIndexCode( identifier ):\n return Identifier(\n \"CONVERT_TO_INDEX( %s )\" % identifier.getCodeTemporaryRef(),\n 0\n )\n\ndef getDirectionFunctionCallCode( function_identifier, arguments,\n order_relevance, closure_variables,\n extra_arguments, context ):\n function_identifier = getDirectFunctionEntryPointIdentifier(\n function_identifier = function_identifier\n )\n\n return getOrderRelevanceEnforcedArgsCode(\n helper = function_identifier,\n export_ref = 1,\n ref_count = 1,\n tmp_scope = \"call_tmp\",\n prefix_args = [\n _defaultToNullIdentifier( extra_argument ).getCodeTemporaryRef()\n for extra_argument in\n extra_arguments\n ],\n suffix_args = getClosureVariableProvisionCode(\n context = context,\n closure_variables = closure_variables\n ),\n order_relevance = order_relevance,\n args = arguments,\n context = context\n )\n\n\ndef getCallCodeNoArgs( called_identifier ):\n return Identifier(\n \"CALL_FUNCTION_NO_ARGS( %(function)s )\" % {\n \"function\" : called_identifier.getCodeTemporaryRef(),\n },\n 1\n )\n\ndef getCallCodePosArgs( context, order_relevance, called_identifier,\n argument_tuple ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"CALL_FUNCTION_WITH_POSARGS\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"call\",\n order_relevance = order_relevance,\n args = ( called_identifier, argument_tuple ),\n context = context\n )\n\ndef getCallCodeKeywordArgs( context, order_relevance, called_identifier,\n argument_dictionary ):\n\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"CALL_FUNCTION_WITH_KEYARGS\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"call\",\n order_relevance = order_relevance,\n args = ( called_identifier, argument_dictionary ),\n context = context\n )\n\ndef getCallCodePosKeywordArgs( context, order_relevance, called_identifier,\n argument_tuple, argument_dictionary ):\n\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"CALL_FUNCTION\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"call\",\n order_relevance = order_relevance,\n args = ( called_identifier, argument_tuple,\n argument_dictionary ),\n context = context\n )\n\ndef getUnpackNextCode( iterator_identifier, count ):\n return Identifier(\n \"UNPACK_NEXT( %s, %d )\" % (\n iterator_identifier.getCodeTemporaryRef(),\n count - 1\n ),\n 1\n )\n\ndef getUnpackCheckCode( iterator_identifier, count ):\n return \"UNPACK_ITERATOR_CHECK( %s, %d );\" % (\n iterator_identifier.getCodeTemporaryRef(),\n count\n )\n\ndef getSpecialAttributeLookupCode( attribute, source ):\n return Identifier(\n \"LOOKUP_SPECIAL( %s, %s )\" % (\n source.getCodeTemporaryRef(),\n attribute.getCodeTemporaryRef()\n ),\n 1\n )\n\ndef getAttributeLookupCode( attribute, source ):\n return Identifier(\n \"LOOKUP_ATTRIBUTE( %s, %s )\" % (\n source.getCodeTemporaryRef(),\n attribute.getCodeTemporaryRef()\n ),\n 1\n )\n\ndef getAttributeCheckCode( context, order_relevance, attribute, source ):\n return getBoolFromCode(\n code = getAttributeCheckBoolCode(\n order_relevance = order_relevance,\n source = source,\n attribute = attribute,\n context = context\n )\n )\n\ndef getAttributeCheckBoolCode( context, order_relevance, source, attribute ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"HAS_ATTRIBUTE\",\n export_ref = 0,\n ref_count = None,\n tmp_scope = \"hasattr\",\n order_relevance = order_relevance,\n args = ( source, attribute ),\n context = context\n )\n\ndef getAttributeGetCode( context, order_relevance, source, attribute, default ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"BUILTIN_GETATTR\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"getattr\",\n order_relevance = order_relevance,\n args = (\n source,\n attribute,\n _defaultToNullIdentifier( default )\n ),\n context = context\n )\n\ndef getAttributeSetCode( context, order_relevance, attribute, source, value ):\n result = getOrderRelevanceEnforcedArgsCode(\n helper = \"BUILTIN_SETATTR\",\n export_ref = 0,\n ref_count = None,\n tmp_scope = \"setattr\",\n order_relevance = order_relevance,\n args = (\n source,\n attribute,\n value\n ),\n context = context\n )\n\n # It's a void function \"BUILTIN_SETATTR\", but \"setattr\" returns \"None\".\n return Identifier(\n \"( %s, Py_None )\" % result,\n 0\n )\n\ndef getImportNameCode( import_name, module ):\n return Identifier(\n \"IMPORT_NAME( %s, %s )\" % (\n module.getCodeTemporaryRef(),\n import_name.getCodeTemporaryRef()\n ),\n 1\n )\n\ndef getSubscriptLookupCode( context, order_relevance, subscript, source ):\n helper = \"LOOKUP_SUBSCRIPT\"\n suffix_args = []\n\n if subscript.isConstantIdentifier():\n constant = subscript.getConstant()\n\n if Constants.isIndexConstant( constant ):\n constant_value = int( constant )\n\n if abs( constant_value ) < 2**31:\n helper = \"LOOKUP_SUBSCRIPT_CONST\"\n suffix_args = [ \"%d\" % constant ]\n\n return getOrderRelevanceEnforcedArgsCode(\n helper = helper,\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"subscr\",\n order_relevance = order_relevance,\n args = ( source, subscript ),\n suffix_args = suffix_args,\n context = context\n )\n\n\ndef getHasKeyBoolCode( source, key ):\n return \"HAS_KEY( %s, %s )\" % (\n source.getCodeTemporaryRef(),\n key.getCodeTemporaryRef()\n )\n\ndef getSliceLookupCode( order_relevance, source, lower, upper, context ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"LOOKUP_SLICE\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"slice\",\n order_relevance = order_relevance,\n args = (\n source,\n _defaultToNoneIdentifier( lower ),\n _defaultToNoneIdentifier( upper )\n ),\n context = context\n )\n\ndef getSliceLookupIndexesCode( lower, upper, source ):\n return Identifier(\n \"LOOKUP_INDEX_SLICE( %s, %s, %s )\" % (\n source.getCodeTemporaryRef(),\n lower.getCodeTemporaryRef(),\n upper.getCodeTemporaryRef()\n ),\n 1\n )\n\ndef getSliceObjectCode( order_relevance, lower, upper, step, context ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"MAKE_SLICEOBJ\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"sliceobj\",\n order_relevance = order_relevance,\n args = (\n _defaultToNoneIdentifier( lower ),\n _defaultToNoneIdentifier( upper ),\n _defaultToNoneIdentifier( step )\n ),\n context = context\n )\n\ndef getStatementCode( identifier ):\n return identifier.getCodeDropRef() + \";\"\n\ndef getOperationCode( context, order_relevance, operator, identifiers ):\n # This needs to have one return per operation of Python, and there are many\n # of these, pylint: disable=R0911\n\n prefix_args = []\n ref_count = 1\n\n if operator == \"Pow\":\n helper = \"POWER_OPERATION\"\n elif operator == \"IPow\":\n helper = \"POWER_OPERATION_INPLACE\"\n elif operator == \"Add\":\n helper = \"BINARY_OPERATION_ADD\"\n elif operator == \"Sub\":\n helper = \"BINARY_OPERATION_SUB\"\n elif operator == \"Div\":\n helper = \"BINARY_OPERATION_DIV\"\n elif operator == \"Mult\":\n helper = \"BINARY_OPERATION_MUL\"\n elif operator == \"Mod\":\n helper = \"BINARY_OPERATION_REMAINDER\"\n elif len( identifiers ) == 2:\n helper = \"BINARY_OPERATION\"\n prefix_args = [ OperatorCodes.binary_operator_codes[ operator ] ]\n elif len( identifiers ) == 1:\n impl_helper, ref_count = OperatorCodes.unary_operator_codes[ operator ]\n helper = \"UNARY_OPERATION\"\n prefix_args = [ impl_helper ]\n else:\n assert False, (operator, identifiers)\n\n return getOrderRelevanceEnforcedArgsCode(\n helper = helper,\n export_ref = 0,\n ref_count = ref_count,\n tmp_scope = \"op\",\n order_relevance = order_relevance,\n prefix_args = prefix_args,\n args = identifiers,\n context = context\n )\n\ndef getPrintCode( newline, identifiers, target_file ):\n print_elements_code = []\n\n for identifier in identifiers:\n print_elements_code.append(\n CodeTemplates.template_print_value % {\n \"print_value\" : identifier.getCodeTemporaryRef(),\n \"target_file\" : \"target_file\"\n if target_file is not None\n else \"NULL\"\n }\n )\n\n if newline:\n print_elements_code.append(\n CodeTemplates.template_print_newline % {\n \"target_file\" : \"target_file\"\n if target_file is not None\n else \"NULL\"\n }\n )\n\n if target_file is not None:\n return CodeTemplates.template_print_statement % {\n \"target_file\" : _defaultToNullIdentifier( target_file ).getCodeExportRef(),\n \"print_elements_code\" : indented( print_elements_code )\n }\n else:\n return \"\\n\".join( print_elements_code )\n\n\ndef getClosureVariableProvisionCode( context, closure_variables ):\n result = []\n\n for variable in closure_variables:\n assert variable.isClosureReference()\n\n variable = variable.getProviderVariable()\n\n result.append(\n getVariableCode(\n context = context,\n variable = variable\n )\n )\n\n return result\n\ndef getConditionalExpressionCode( condition_code, identifier_no, identifier_yes ):\n if identifier_yes.getCheapRefCount() == identifier_no.getCheapRefCount():\n if identifier_yes.getCheapRefCount() == 0:\n codes_yes = identifier_yes.getCodeTemporaryRef()\n codes_no = identifier_no.getCodeTemporaryRef()\n ref_count = 0\n else:\n codes_yes = identifier_yes.getCodeExportRef()\n codes_no = identifier_no.getCodeExportRef()\n ref_count = 1\n else:\n codes_yes = identifier_yes.getCodeExportRef()\n codes_no = identifier_no.getCodeExportRef()\n ref_count = 1\n\n return Identifier(\n CodeTemplates.template_conditional_expression % {\n \"condition\" : condition_code,\n \"yes\" : codes_yes,\n \"no\" : codes_no\n },\n ref_count\n )\n\ndef getFunctionCreationCode( context, function_identifier, order_relevance,\n default_args, closure_variables ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"MAKE_FUNCTION_%s\" % function_identifier,\n export_ref = 1,\n ref_count = 1,\n tmp_scope = \"make_func\",\n suffix_args = getClosureVariableProvisionCode(\n context = context,\n closure_variables = closure_variables\n ),\n order_relevance = order_relevance,\n args = default_args,\n context = context\n )\n\ndef getBranchCode( condition_code, yes_codes, no_codes ):\n assert yes_codes or no_codes\n\n if no_codes is None:\n return CodeTemplates.template_branch_one % {\n \"condition\" : condition_code,\n \"branch_code\" : indented( yes_codes if yes_codes is not None else \"\" )\n }\n else:\n assert no_codes, no_codes\n\n return CodeTemplates.template_branch_two % {\n \"condition\" : condition_code,\n \"branch_yes_code\" : indented( yes_codes if yes_codes is not None else \"\" ),\n \"branch_no_code\" : indented( no_codes )\n }\n\ndef getLoopContinueCode( needs_exceptions ):\n if needs_exceptions:\n return \"throw ContinueException();\"\n else:\n return \"CONSIDER_THREADING(); continue;\"\n\ndef getLoopBreakCode( needs_exceptions ):\n if needs_exceptions:\n return \"throw BreakException();\"\n else:\n return \"break;\"\n\ndef getComparisonExpressionCode( context, comparator, order_relevance, left,\n right ):\n # There is an awful lot of cases, pylint: disable=R0912\n\n if comparator in OperatorCodes.normal_comparison_codes:\n helper = OperatorCodes.normal_comparison_codes[ comparator ]\n assert helper.startswith( \"SEQUENCE_CONTAINS\" )\n\n ref_count = 0\n elif comparator in OperatorCodes.rich_comparison_codes:\n helper = \"RICH_COMPARE_%s\" % (\n OperatorCodes.rich_comparison_codes[ comparator ]\n )\n ref_count = 1\n elif comparator == \"Is\":\n # This is special, and \"==\" enforces order of evalulation already, or so\n # we believe.\n return getBoolFromCode(\n code = \"( %s == %s )\" % (\n left.getCodeTemporaryRef(),\n right.getCodeTemporaryRef()\n )\n )\n elif comparator == \"IsNot\":\n # This is special, and \"!=\" enforces order of evalulation already, or so\n # we believe.\n return getBoolFromCode(\n code = \"( %s != %s )\" % (\n left.getCodeTemporaryRef(),\n right.getCodeTemporaryRef()\n )\n )\n else:\n assert False, comparator\n\n return getOrderRelevanceEnforcedArgsCode(\n helper = helper,\n export_ref = 0,\n ref_count = ref_count,\n tmp_scope = \"cmp\",\n order_relevance = order_relevance,\n args = ( left, right ),\n context = context\n )\n\n\ndef getComparisonExpressionBoolCode( context, comparator, order_relevance, left,\n right ):\n # There is an awful lot of cases, pylint: disable=R0912\n\n if comparator in OperatorCodes.normal_comparison_codes:\n helper = \"%s_BOOL\" % (\n OperatorCodes.normal_comparison_codes[ comparator ]\n )\n assert helper.startswith( \"SEQUENCE_CONTAINS\" )\n elif comparator in OperatorCodes.rich_comparison_codes:\n helper = \"RICH_COMPARE_BOOL_%s\" % (\n OperatorCodes.rich_comparison_codes[ comparator ]\n )\n elif comparator == \"Is\":\n # This is special, and \"==\" enforces order of evalulation already, or so\n # we believe.\n return \"( %s == %s )\" % (\n left.getCodeTemporaryRef(),\n right.getCodeTemporaryRef()\n )\n elif comparator == \"IsNot\":\n # This is special, and \"!=\" enforces order of evalulation already, or so\n # we believe.\n return \"( %s != %s )\" % (\n left.getCodeTemporaryRef(),\n right.getCodeTemporaryRef()\n )\n else:\n assert False, comparator\n\n return getOrderRelevanceEnforcedArgsCode(\n helper = helper,\n export_ref = 0,\n ref_count = None,\n tmp_scope = \"cmp\",\n order_relevance = order_relevance,\n args = ( left, right ),\n context = context\n )\n\ndef getConditionNotBoolCode( condition ):\n return \"(!( %s ))\" % condition\n\ndef getConditionAndCode( operands ):\n return \"( %s )\" % \" && \".join( operands )\n\ndef getConditionOrCode( operands ):\n return \"( %s )\" % \" || \".join( operands )\n\ndef getConditionSelectionCode( condition_code, yes_code, no_code ):\n return \"( %s ) ? ( %s ) : ( %s )\" % (\n condition_code,\n yes_code,\n no_code\n )\n\ndef getConditionCheckTrueCode( condition ):\n return \"CHECK_IF_TRUE( %s )\" % condition.getCodeTemporaryRef()\n\ndef getConditionCheckFalseCode( condition ):\n return \"CHECK_IF_FALSE( %s )\" % condition.getCodeTemporaryRef()\n\ndef getTrueExpressionCode():\n return \"true\"\n\ndef getFalseExpressionCode():\n return \"false\"\n\ndef getAttributeAssignmentCode( order_relevance, target, attribute,\n identifier ):\n return getOrderRelevanceEnforcedCallCode(\n order_relevance = order_relevance,\n helper = \"SET_ATTRIBUTE\",\n names = ( \"identifier\", \"target\", \"attribute\" ),\n values = ( identifier, target, attribute )\n )\n\ndef getAttributeDelCode( target, attribute ):\n return \"DEL_ATTRIBUTE( %s, %s );\" % (\n target.getCodeTemporaryRef(),\n attribute.getCodeTemporaryRef()\n )\n\ndef getSliceAssignmentIndexesCode( target, lower, upper, identifier ):\n return \"SET_INDEX_SLICE( %s, %s, %s, %s );\" % (\n target.getCodeTemporaryRef(),\n lower.getCodeTemporaryRef(),\n upper.getCodeTemporaryRef(),\n identifier.getCodeTemporaryRef()\n )\n\ndef getSliceAssignmentCode( order_relevance, target, lower, upper, identifier ):\n return getOrderRelevanceEnforcedCallCode(\n order_relevance = order_relevance,\n helper = \"SET_SLICE\",\n names = ( \"identifier\", \"target\", \"lower\", \"upper\" ),\n values = (\n identifier,\n target,\n _defaultToNoneIdentifier( lower ),\n _defaultToNoneIdentifier( upper )\n )\n )\n\ndef getSliceDelCode( target, lower, upper ):\n return \"DEL_SLICE( %s, %s, %s );\" % (\n target.getCodeTemporaryRef(),\n _defaultToNoneIdentifier( lower ).getCodeTemporaryRef(),\n _defaultToNoneIdentifier( upper ).getCodeTemporaryRef()\n )\n\ndef getLineNumberCode( source_ref ):\n if source_ref.shallSetCurrentLine():\n return \"frame_guard.setLineNumber( %d );\\n\" % source_ref.getLineNumber()\n else:\n return \"\"\n\ndef getLineNumberCommentCode( source_ref ):\n return \"// line %d\\n\" % source_ref.getLineNumber()\n\n\ndef getLoopCode( loop_body_codes, needs_break_exception,\n needs_continue_exception ):\n if needs_break_exception and needs_continue_exception:\n while_loop_template = \\\n CodeTemplates.template_loop_break_continue_catching\n indentation = 2\n elif needs_break_exception:\n while_loop_template = CodeTemplates.template_loop_break_catching\n indentation = 2\n elif needs_continue_exception:\n while_loop_template = CodeTemplates.template_loop_continue_catching\n indentation = 2\n else:\n while_loop_template = CodeTemplates.template_loop_simple\n indentation = 1\n\n return while_loop_template % {\n \"loop_body_codes\" : indented(\n loop_body_codes if loop_body_codes is not None else \"\",\n indentation\n ),\n }\n\ndef getVariableAssignmentCode( context, variable, identifier ):\n assert isinstance( variable, Variables.Variable ), variable\n\n # This ought to be impossible to happen, as an assignment to an overflow\n # variable would have made it a local one.\n assert not variable.isMaybeLocalVariable()\n\n if variable.isTempVariableReference():\n referenced = variable.getReferenced()\n\n if not referenced.isDeclared():\n referenced.markAsDeclared()\n\n return getLocalVariableInitCode(\n context = context,\n variable = variable.getReferenced(),\n init_from = identifier\n )\n elif not referenced.getNeedsFree():\n # So won't get a reference, and take none, or else it may get lost,\n # which we don't want to happen.\n\n # This must be true, otherwise the needs no free statement was made\n # in error.\n assert identifier.getCheapRefCount() == 0\n\n return \"%s = %s;\" % (\n getVariableCode(\n variable = variable,\n context = context\n ),\n identifier.getCodeTemporaryRef()\n )\n\n if identifier.getCheapRefCount() == 0:\n identifier_code = identifier.getCodeTemporaryRef()\n assign_code = \"0\"\n else:\n identifier_code = identifier.getCodeExportRef()\n assign_code = \"1\"\n\n # TODO: Move the assignment code to the variable object.\n if variable.isModuleVariable():\n return \"UPDATE_STRING_DICT%s( _moduledict_%s, (Nuitka_StringObject *)%s, %s );\" % (\n assign_code,\n context.getModuleCodeName(),\n getConstantCode(\n constant = variable.getName(),\n context = context\n ),\n identifier_code\n )\n\n return \"%s.assign%s( %s );\" % (\n getVariableCode(\n variable = variable,\n context = context\n ),\n assign_code,\n identifier_code\n )\n\ndef getAssignmentTempKeeperCode( source_identifier, variable, context ):\n ref_count = source_identifier.getCheapRefCount()\n variable_name = variable.getName()\n\n assert not ref_count or variable.getReferenced().getNeedsFree(), \\\n ( variable, variable.getReferenced().getNeedsFree(), ref_count,\n source_identifier, source_identifier.__class__ )\n\n return _getAssignmentTempKeeperCode(\n source_identifier = source_identifier,\n variable_name = variable_name,\n context = context\n )\n\ndef getTempKeeperHandle( variable, context ):\n variable_name = variable.getName()\n ref_count = context.getTempKeeperRefCount( variable_name )\n\n if ref_count == 1:\n return KeeperAccessIdentifier(\n \"%s.asObject()\" % variable_name\n )\n else:\n # TODO: Could create an identifier, where 0 is just cheap, and 1 is\n # still available, may give nicer to read code occasionally.\n return Identifier(\n \"%s.asObject0()\" % variable_name,\n 0\n )\n\ndef getVariableDelCode( context, tolerant, variable ):\n assert isinstance( variable, Variables.Variable ), variable\n\n if variable.isModuleVariable():\n var_name = variable.getName()\n\n context.addGlobalVariableNameUsage( var_name )\n\n return \"_mvar_%s_%s.del( %s );\" % (\n context.getModuleCodeName(),\n var_name,\n \"true\" if tolerant else \"false\"\n )\n else:\n return \"%s.del( %s );\" % (\n getVariableCode(\n variable = variable,\n context = context\n ),\n \"true\" if tolerant else \"false\"\n )\n\ndef getSubscriptAssignmentCode( order_relevance, subscribed, subscript,\n identifier ):\n return getOrderRelevanceEnforcedCallCode(\n order_relevance = order_relevance,\n helper = \"SET_SUBSCRIPT\",\n names = ( \"identifier\", \"subscribed\", \"subscript\" ),\n values = ( identifier, subscribed, subscript )\n )\n\ndef getSubscriptDelCode( order_relevance, subscribed, subscript ):\n return getOrderRelevanceEnforcedCallCode(\n order_relevance = order_relevance,\n helper = \"DEL_SUBSCRIPT\",\n names = ( \"subscribed\", \"subscript\" ),\n values = ( subscribed, subscript )\n )\n\ndef getTryFinallyCode( context, needs_continue, needs_break,\n needs_return_value_catch, needs_return_value_reraise,\n aborting, code_tried, code_final, try_count ):\n tb_making = getTracebackMakingIdentifier( context )\n\n rethrow_setups = \"\"\n rethrow_catchers = \"\"\n rethrow_raisers = \"\"\n\n values = {\n \"try_count\" : try_count\n }\n\n if needs_continue:\n rethrow_setups += CodeTemplates.try_finally_template_setup_continue % (\n values\n )\n rethrow_catchers += CodeTemplates.try_finally_template_catch_continue % values\n rethrow_raisers += CodeTemplates.try_finally_template_reraise_continue % values\n\n if needs_break:\n rethrow_setups += CodeTemplates.try_finally_template_setup_break % values\n rethrow_catchers += CodeTemplates.try_finally_template_catch_break % values\n rethrow_raisers += CodeTemplates.try_finally_template_reraise_break % values\n\n if needs_return_value_catch:\n rethrow_setups += CodeTemplates.try_finally_template_setup_return_value % values\n rethrow_catchers += CodeTemplates.try_finally_template_catch_return_value % values\n\n if needs_return_value_reraise:\n rethrow_raisers += CodeTemplates.try_finally_template_reraise_return_value % values\n elif not aborting:\n rethrow_raisers += CodeTemplates.try_finally_template_indirect_return_value % values\n else:\n rethrow_raisers += CodeTemplates.try_finally_template_direct_return_value % values\n\n result = CodeTemplates.try_finally_template % {\n \"try_count\" : try_count,\n \"tried_code\" : indented( code_tried ),\n \"final_code\" : indented( code_final, 0 ),\n \"tb_making\" : tb_making.getCodeExportRef(),\n \"rethrow_setups\" : rethrow_setups,\n \"rethrow_catchers\" : rethrow_catchers,\n \"rethrow_raisers\" : rethrow_raisers,\n }\n\n if not rethrow_raisers:\n result = result.rstrip()\n\n return result\n\ndef getTryExceptHandlerCode( exception_identifiers, handler_code,\n needs_frame_detach, first_handler ):\n exception_code = []\n\n cond_keyword = \"if\" if first_handler else \"else if\"\n\n if exception_identifiers:\n exception_code.append(\n \"%s ( %s )\" % (\n cond_keyword,\n \" || \".join(\n \"_exception.matches( %s )\" % (\n exception_identifier.getCodeTemporaryRef()\n )\n for exception_identifier in\n exception_identifiers\n )\n )\n )\n else:\n exception_code.append(\n \"%s (true)\" % cond_keyword\n )\n\n if handler_code is None:\n handler_code = []\n\n if needs_frame_detach:\n handler_code.insert(\n 0,\n CodeTemplates.template_setup_except_handler_detaching % {\n }\n )\n\n exception_code += getBlockCode(\n handler_code\n ).split( \"\\n\" )\n\n return exception_code\n\ndef getTryExceptCode( context, code_tried, handler_codes ):\n exception_code = handler_codes\n exception_code += CodeTemplates.try_except_reraise_unmatched_template.split( \"\\n\" )\n\n tb_making = getTracebackMakingIdentifier( context )\n\n return CodeTemplates.try_except_template % {\n \"tried_code\" : indented( code_tried or \"\" ),\n \"exception_code\" : indented( exception_code ),\n \"guard_class\" : context.getFrameGuardClass(),\n \"tb_making\" : tb_making.getCodeExportRef(),\n }\n\ndef getTryNextExceptStopIterationIdentifier( context ):\n try_count = context.allocateTryNumber()\n\n return Identifier( \"_tmp_unpack_%d\" % try_count, 1 )\n\ndef getTryNextExceptStopIterationCode( source_identifier, handler_code, assign_code, temp_identifier ):\n return CodeTemplates.template_try_next_except_stop_iteration % {\n \"temp_var\" : temp_identifier.getCode(),\n \"handler_code\" : indented( handler_code ),\n \"assignment_code\" : assign_code,\n \"source_identifier\" : source_identifier.getCodeTemporaryRef()\n }\n\n\ndef getRaiseExceptionWithCauseCode( context, order_relevance, exception_type,\n exception_cause ):\n # Must enforce tb_maker to be last.\n exception_tb_maker = getTracebackMakingIdentifier(\n context = context\n )\n\n return getOrderRelevanceEnforcedCallCode(\n order_relevance = order_relevance + [ True ],\n helper = \"RAISE_EXCEPTION_WITH_CAUSE\",\n names = (\n \"exception_type\", \"exception_cause\", \"exception_tb\"\n ),\n values = (\n exception_type, exception_cause, exception_tb_maker\n )\n )\n\ndef getRaiseExceptionWithTypeCode( context, order_relevance, exception_type ):\n # Must enforce tb_maker to be last.\n exception_tb_maker = getTracebackMakingIdentifier(\n context = context\n )\n\n return getOrderRelevanceEnforcedCallCode(\n order_relevance = order_relevance + [ True ],\n helper = \"RAISE_EXCEPTION_WITH_TYPE\",\n names = (\n \"exception_type\", \"exception_tb\"\n ),\n values = (\n exception_type, exception_tb_maker\n )\n )\n\ndef getRaiseExceptionWithValueCode( context, order_relevance, exception_type,\n exception_value, implicit ):\n # Must enforce tb_maker to be last.\n exception_tb_maker = getTracebackMakingIdentifier(\n context = context\n )\n\n if implicit:\n helper = \"RAISE_EXCEPTION_WITH_VALUE_NO_NORMALIZE\"\n else:\n helper = \"RAISE_EXCEPTION_WITH_VALUE\"\n\n return getOrderRelevanceEnforcedCallCode(\n order_relevance = order_relevance + [ True ],\n helper = helper,\n names = (\n \"exception_type\", \"exception_value\", \"exception_tb\"\n ),\n values = (\n exception_type, exception_value, exception_tb_maker\n )\n )\n\ndef getRaiseExceptionWithTracebackCode( order_relevance, exception_type,\n exception_value, exception_tb ):\n return getOrderRelevanceEnforcedCallCode(\n order_relevance = order_relevance,\n helper = \"RAISE_EXCEPTION_WITH_TRACEBACK\",\n names = (\n \"exception_type\", \"exception_value\", \"exception_tb\"\n ),\n values = (\n exception_type, exception_value, exception_tb\n )\n )\n\n\ndef getReRaiseExceptionCode( local, final ):\n if local:\n thrower_code = CodeTemplates.try_except_reraise_template % {}\n else:\n thrower_code = \"RERAISE_EXCEPTION();\"\n\n if final:\n return CodeTemplates.try_except_reraise_finally_template % {\n \"try_count\" : final,\n \"thrower_code\" : thrower_code\n }\n else:\n return thrower_code\n\ndef getRaiseExceptionExpressionCode( context, exception_type, exception_value ):\n # Order is supposed to not matter, as these were run time detected and\n # contain no side effects.\n exception_tb_maker = getTracebackMakingIdentifier(\n context = context\n )\n\n return ThrowingIdentifier(\n \"THROW_EXCEPTION( %s, %s, %s )\" % (\n exception_type.getCodeExportRef(),\n exception_value.getCodeExportRef(),\n exception_tb_maker.getCodeExportRef()\n )\n )\n\ndef getSideEffectsCode( side_effects, identifier ):\n assert side_effects\n\n side_effects_code = \", \".join(\n side_effect.getCodeTemporaryRef()\n for side_effect in\n side_effects\n )\n\n if identifier.getCheapRefCount() == 0:\n return Identifier(\n \"( %s, %s )\" % (\n side_effects_code,\n identifier.getCodeTemporaryRef()\n ),\n 0\n )\n else:\n return Identifier(\n \"( %s, %s )\" % (\n side_effects_code,\n identifier.getCodeExportRef()\n ),\n 1\n )\n\ndef getBuiltinRefCode( context, builtin_name ):\n return Identifier(\n \"LOOKUP_BUILTIN( %s )\" % getConstantCode(\n constant = builtin_name,\n context = context\n ),\n 0\n )\n\ndef getBuiltinOriginalRefCode( builtin_name ):\n return Identifier(\n \"_python_original_builtin_value_%s\" % builtin_name,\n 0\n )\n\ndef getBuiltinAnonymousRefCode( builtin_name ):\n return Identifier(\n \"(PyObject *)%s\" % Builtins.builtin_anon_codes[ builtin_name ],\n 0\n )\n\ndef getExceptionRefCode( exception_type ):\n if exception_type == \"NotImplemented\":\n return Identifier(\n \"Py_NotImplemented\",\n 0\n )\n\n return Identifier(\n \"PyExc_%s\" % exception_type,\n 0\n )\n\ndef getMakeBuiltinExceptionCode( context, order_relevance, exception_type,\n exception_args ):\n\n return getCallCodePosArgs(\n called_identifier = getExceptionRefCode( exception_type ),\n argument_tuple = getTupleCreationCode(\n element_identifiers = exception_args,\n order_relevance = order_relevance,\n context = context,\n ),\n order_relevance = ( False, True ),\n context = context\n )\n\ndef _getLocalVariableList( context, provider ):\n if provider.isExpressionFunctionBody():\n # Sort parameter variables of functions to the end.\n\n start_part = []\n end_part = []\n\n for variable in provider.getVariables():\n if variable.isParameterVariable():\n end_part.append( variable )\n else:\n start_part.append( variable )\n\n variables = start_part + end_part\n\n include_closure = not provider.isUnoptimized() and not provider.isClassDictCreation()\n else:\n variables = provider.getVariables()\n\n include_closure = True\n\n return [\n \"%s\" % getVariableCode(\n variable = variable,\n context = context\n )\n for variable in\n variables\n if not variable.isModuleVariable()\n if not variable.isMaybeLocalVariable()\n if ( not variable.isClosureReference() or include_closure )\n ]\n\n\ndef getLoadDirCode( context, provider ):\n if provider.isPythonModule():\n globals_identifier = getLoadGlobalsCode(\n context = context\n )\n\n return Identifier(\n \"PyDict_Keys( %s )\" % (\n globals_identifier.getCodeTemporaryRef(),\n ),\n 1\n )\n else:\n if context.hasLocalsDict():\n return Identifier(\n \"PyDict_Keys( %s )\" % getLoadLocalsCode(\n context = context,\n provider = provider,\n mode = \"updated\"\n ),\n 1\n )\n else:\n local_list = _getLocalVariableList(\n context = context,\n provider = provider\n )\n\n result = getListCreationCode(\n context = context,\n order_relevance = (),\n element_identifiers = (),\n )\n\n for local_var in local_list:\n result = Identifier(\n \"%s.updateLocalsDir( %s )\" % (\n local_var,\n result.getCodeTemporaryRef()\n ),\n 0\n )\n\n return result\n\ndef getLoadVarsCode( identifier ):\n return Identifier(\n \"LOOKUP_VARS( %s )\" % identifier.getCodeTemporaryRef(),\n 1\n )\n\ndef getLoadGlobalsCode( context ):\n return Identifier(\n \"((PyModuleObject *)%(module_identifier)s)->md_dict\" % {\n \"module_identifier\" : getModuleAccessCode( context )\n },\n 0\n )\n\ndef getLoadLocalsCode( context, provider, mode ):\n if provider.isPythonModule():\n return getLoadGlobalsCode( context )\n elif not context.hasLocalsDict():\n local_list = _getLocalVariableList(\n provider = provider,\n context = context\n )\n\n result = EmptyDictIdentifier()\n\n for local_var in local_list:\n result = Identifier(\n \"%s.updateLocalsDict( %s )\" % (\n local_var,\n result.getCodeExportRef()\n ),\n 1\n )\n\n return result\n else:\n if mode == \"copy\":\n return Identifier(\n \"PyDict_Copy( locals_dict.asObject() )\",\n 1\n )\n elif mode == \"updated\":\n local_list = _getLocalVariableList(\n provider = provider,\n context = context\n )\n\n result = Identifier(\n \"locals_dict.asObject()\",\n 0\n )\n\n for local_var in local_list:\n result = Identifier(\n \"%s.updateLocalsDict( %s )\" % (\n local_var,\n result.getCodeTemporaryRef()\n ),\n 0\n )\n\n return result\n else:\n assert False\n\ndef getSetLocalsCode( new_locals_identifier ):\n return \"locals_dict.assign1( %s );\" % (\n new_locals_identifier.getCodeExportRef()\n )\n\ndef getStoreLocalsCode( context, source_identifier, provider ):\n assert not provider.isPythonModule()\n\n code = \"\"\n\n for variable in provider.getVariables():\n if not variable.isModuleVariable() and \\\n not variable.isMaybeLocalVariable():\n key_identifier = getConstantHandle(\n context = context,\n constant = variable.getName()\n )\n\n var_assign_code = getVariableAssignmentCode(\n context = context,\n variable = variable,\n identifier = getSubscriptLookupCode(\n order_relevance = ( False, False ),\n subscript = key_identifier,\n source = source_identifier,\n context = context\n )\n )\n\n # This ought to re-use the condition code stuff.\n code += \"if ( %s )\\n\" % getHasKeyBoolCode(\n source = source_identifier,\n key = key_identifier\n )\n\n code += getBlockCode( var_assign_code ) + \"\\n\"\n\n return code.rstrip( \"\\n\" )\n\ndef getFutureFlagsCode( future_spec ):\n flags = future_spec.asFlags()\n\n if flags:\n return \" | \".join( flags )\n else:\n return 0\n\ndef getCompileCode( context, order_relevance, source_identifier,\n filename_identifier, mode_identifier, future_flags ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"COMPILE_CODE\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"compile\",\n order_relevance = order_relevance,\n args = (\n source_identifier,\n filename_identifier,\n mode_identifier,\n future_flags\n ),\n context = context\n )\n\n\ndef getEvalCode( context, order_relevance, exec_code, filename_identifier,\n globals_identifier, locals_identifier, mode_identifier,\n future_flags ):\n code_identifier = getCompileCode(\n order_relevance = [ False ] * 4,\n source_identifier = exec_code,\n filename_identifier = filename_identifier,\n mode_identifier = mode_identifier,\n future_flags = Identifier( str( future_flags ), 0 ),\n context = context\n )\n\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"EVAL_CODE\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"eval\",\n order_relevance = order_relevance,\n args = (\n code_identifier,\n globals_identifier,\n locals_identifier\n ),\n context = context\n )\n\n\n\ndef getExecCode( context, exec_code, globals_identifier, locals_identifier, future_flags, provider, source_ref ):\n\n # Filename with origin if improved mode.\n if Options.isFullCompat():\n filename_identifier = getConstantCode(\n constant = \"\",\n context = context\n )\n else:\n filename_identifier = getConstantCode(\n constant = \"\" % source_ref.getAsString(),\n context = context\n )\n\n result = CodeTemplates.exec_template % {\n \"globals_identifier\" : globals_identifier.getCodeExportRef(),\n \"locals_identifier\" : locals_identifier.getCodeExportRef(),\n \"source_identifier\" : exec_code.getCodeTemporaryRef(),\n \"filename_identifier\" : getConstantCode(\n constant = \"\",\n context = context\n ),\n \"mode_identifier\" : getConstantCode(\n constant = \"exec\",\n context = context\n ),\n \"future_flags\" : future_flags,\n }\n\n if provider.isExpressionFunctionBody() and provider.isUnqualifiedExec():\n locals_temp_identifier = Identifier( \"locals_source\", 0 )\n\n result += CodeTemplates.exec_copy_back_template % {\n \"store_locals_code\" : indented(\n getStoreLocalsCode(\n context = context,\n source_identifier = locals_temp_identifier,\n provider = provider\n ),\n )\n }\n\n return getBlockCode( result )\n\ndef getBuiltinSuperCode( order_relevance, type_identifier, object_identifier,\n context ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"BUILTIN_SUPER\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"super\",\n order_relevance = order_relevance,\n args = (\n _defaultToNullIdentifier( type_identifier ),\n _defaultToNullIdentifier( object_identifier )\n ),\n context = context\n )\n\n\ndef getBuiltinIsinstanceCode( context, order_relevance, inst_identifier,\n cls_identifier ):\n return getBoolFromCode(\n code = getBuiltinIsinstanceBoolCode(\n order_relevance = order_relevance,\n inst_identifier = inst_identifier,\n cls_identifier = cls_identifier,\n context = context\n )\n )\n\ndef getBuiltinIsinstanceBoolCode( context, order_relevance, inst_identifier,\n cls_identifier ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"BUILTIN_ISINSTANCE_BOOL\",\n export_ref = 0,\n ref_count = None,\n tmp_scope = \"isinstance\",\n order_relevance = order_relevance,\n args = (\n inst_identifier,\n cls_identifier\n ),\n context = context\n )\n\ndef getBuiltinOpenCode( context, order_relevance, filename, mode, buffering ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"OPEN_FILE\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"open\",\n order_relevance = order_relevance,\n args = (\n _defaultToNullIdentifier( filename ),\n _defaultToNullIdentifier( mode ),\n _defaultToNullIdentifier( buffering )\n ),\n context = context\n )\n\ndef getBuiltinLenCode( identifier ):\n return HelperCallIdentifier(\n \"BUILTIN_LEN\", identifier\n )\n\ndef getBuiltinDir1Code( identifier ):\n return HelperCallIdentifier(\n \"BUILTIN_DIR1\", identifier\n )\n\ndef getBuiltinRange1Code( value ):\n return HelperCallIdentifier(\n \"BUILTIN_RANGE\", value\n )\n\ndef getBuiltinRange2Code( order_relevance, low, high, context ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"BUILTIN_RANGE2\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"range\",\n order_relevance = order_relevance,\n args = (\n low,\n high\n ),\n context = context\n )\n\ndef getBuiltinRange3Code( order_relevance, low, high, step, context ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"BUILTIN_RANGE3\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"range\",\n order_relevance = order_relevance,\n args = (\n low,\n high,\n step\n ),\n context = context\n )\n\ndef getBuiltinChrCode( value ):\n return HelperCallIdentifier( \"BUILTIN_CHR\", value )\n\ndef getBuiltinOrdCode( value ):\n return HelperCallIdentifier( \"BUILTIN_ORD\", value )\n\ndef getBuiltinBinCode( value ):\n return HelperCallIdentifier( \"BUILTIN_BIN\", value )\n\ndef getBuiltinOctCode( value ):\n return HelperCallIdentifier( \"BUILTIN_OCT\", value )\n\ndef getBuiltinHexCode( value ):\n return HelperCallIdentifier( \"BUILTIN_HEX\", value )\n\ndef getBuiltinType1Code( value ):\n return HelperCallIdentifier( \"BUILTIN_TYPE1\", value )\n\ndef getBuiltinIter1Code( value ):\n return HelperCallIdentifier( \"MAKE_ITERATOR\", value )\n\ndef getBuiltinIter2Code( context, order_relevance, callable_identifier,\n sentinel_identifier ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"BUILTIN_ITER2\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"iter\",\n order_relevance = order_relevance,\n args = (\n callable_identifier,\n sentinel_identifier\n ),\n context = context\n )\n\ndef getBuiltinNext1Code( value ):\n return HelperCallIdentifier( \"BUILTIN_NEXT1\", value )\n\ndef getBuiltinNext2Code( context, order_relevance, iterator_identifier,\n default_identifier ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"BUILTIN_NEXT2\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"next\",\n order_relevance = order_relevance,\n args = (\n iterator_identifier,\n default_identifier\n ),\n context = context\n )\n\ndef getBuiltinType3Code( context, order_relevance, name_identifier,\n bases_identifier, dict_identifier ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"BUILTIN_TYPE3\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"type3\",\n order_relevance = order_relevance,\n prefix_args = (\n getConstantCode(\n constant = context.getModuleName(),\n context = context\n ),\n ),\n args = (\n name_identifier,\n bases_identifier,\n dict_identifier\n ),\n context = context\n )\n\ndef getBuiltinTupleCode( identifier ):\n return HelperCallIdentifier( \"TO_TUPLE\", identifier )\n\ndef getBuiltinListCode( identifier ):\n return HelperCallIdentifier( \"TO_LIST\", identifier )\n\ndef getBuiltinDictCode( seq_identifier, dict_identifier ):\n if dict_identifier.isConstantIdentifier() and dict_identifier.getConstant() == {}:\n dict_identifier = None\n\n assert seq_identifier is not None or dict_identifier is not None\n\n if seq_identifier is not None:\n return Identifier(\n \"TO_DICT( %s, %s )\" % (\n seq_identifier.getCodeTemporaryRef(),\n _defaultToNullIdentifier( dict_identifier ).getCodeTemporaryRef()\n ),\n 1\n )\n else:\n return dict_identifier\n\ndef getBuiltinFloatCode( identifier ):\n return HelperCallIdentifier( \"TO_FLOAT\", identifier )\n\ndef getBuiltinLong1Code( context, identifier ):\n if identifier is None:\n identifier = getConstantHandle( context = context, constant = \"0\" )\n\n return HelperCallIdentifier( \"TO_LONG\", identifier )\n\ndef getBuiltinLong2Code( context, order_relevance, identifier, base ):\n if identifier is None:\n identifier = getConstantHandle( context = context, constant = \"0\" )\n\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"TO_LONG2\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"long\",\n order_relevance = order_relevance,\n args = (\n identifier,\n base\n ),\n context = context\n )\n\ndef getBuiltinInt1Code( context, identifier ):\n if identifier is None:\n identifier = getConstantHandle( context = context, constant = \"0\" )\n\n return HelperCallIdentifier( \"TO_INT\", identifier )\n\ndef getBuiltinInt2Code( context, order_relevance, identifier, base ):\n if identifier is None:\n identifier = getConstantHandle( context = context, constant = \"0\" )\n\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"TO_INT2\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"int\",\n order_relevance = order_relevance,\n args = (\n identifier,\n base\n ),\n context = context\n )\n\ndef getBuiltinStrCode( identifier ):\n return HelperCallIdentifier( \"TO_STR\", identifier )\n\ndef getBuiltinUnicode1Code( identifier ):\n return HelperCallIdentifier( \"TO_UNICODE\", identifier )\n\ndef getBuiltinUnicode3Code( context, order_relevance, identifier, encoding,\n errors ):\n return getOrderRelevanceEnforcedArgsCode(\n helper = \"TO_UNICODE3\",\n export_ref = 0,\n ref_count = 1,\n tmp_scope = \"unicode\",\n order_relevance = order_relevance,\n args = (\n identifier,\n _defaultToNullIdentifier( encoding ),\n _defaultToNullIdentifier( errors ),\n ),\n context = context\n )\n\ndef getBoolFromCode( code ):\n assert type( code ) is str\n\n return Identifier(\n \"BOOL_FROM( %s )\" % code,\n 0\n )\n\ndef getBuiltinBoolCode( identifier ):\n return Identifier(\n \"TO_BOOL( %s )\" % identifier.getCodeTemporaryRef(),\n 0\n )\n\ndef getModuleAccessCode( context ):\n return \"_module_%s\" % context.getModuleCodeName()\n\ndef getFrameMakingIdentifier( context ):\n return context.getFrameHandle()\n\ndef getTracebackMakingIdentifier( context ):\n return Identifier(\n \"MAKE_TRACEBACK( %s )\" % (\n getFrameMakingIdentifier( context = context ).getCodeExportRef(),\n ),\n 1\n )\n\ndef getModuleIdentifier( module_name ):\n return module_name.replace( \".\", \"__\" ).replace( \"-\", \"_\" )\n\ndef getPackageIdentifier( module_name ):\n return module_name.replace( \".\", \"__\" )\n\ndef getModuleCode( context, module_name, codes, other_module_names,\n function_decl_codes, function_body_codes ):\n # For the module code, lots of attributes come together.\n # pylint: disable=R0914\n module_identifier = getModuleIdentifier( module_name )\n\n module_globals = \"\\n\".join(\n [\n \"static %s _mvar_%s_%s( &_module_%s, &%s );\" % (\n \"PyObjectGlobalVariable_%s\" % module_identifier,\n module_identifier,\n var_name,\n module_identifier,\n getConstantCode( constant = var_name, context = context )\n )\n for var_name in\n context.getGlobalVariableNames()\n ]\n )\n\n header = CodeTemplates.global_copyright % {\n \"name\" : module_name,\n \"version\" : Options.getVersion()\n }\n\n # Create for for \"inittab\" to use in unfreezing of modules if that is used.\n module_inittab = []\n\n for other_module_name in other_module_names:\n module_inittab.append (\n CodeTemplates.module_inittab_entry % {\n \"module_name\" : other_module_name,\n \"module_identifier\" : getModuleIdentifier( other_module_name ),\n }\n )\n\n module_code = CodeTemplates.module_body_template % {\n \"module_name\" : module_name,\n \"module_name_obj\" : getConstantCode(\n context = context,\n constant = module_name\n ),\n \"module_identifier\" : module_identifier,\n \"module_functions_decl\" : function_decl_codes,\n \"module_functions_code\" : function_body_codes,\n \"module_globals\" : module_globals,\n \"module_code\" : indented( codes ),\n \"module_inittab\" : indented( sorted( module_inittab ) ),\n \"use_unfreezer\" : 1 if other_module_names else 0\n }\n\n return header + module_code\n\ndef getModuleDeclarationCode( module_name, extra_declarations ):\n module_header_code = CodeTemplates.module_header_template % {\n \"module_identifier\" : getModuleIdentifier( module_name ),\n \"extra_declarations\" : extra_declarations\n }\n\n return CodeTemplates.template_header_guard % {\n \"header_guard_name\" : \"__%s_H__\" % getModuleIdentifier( module_name ),\n \"header_body\" : module_header_code\n }\n\ndef getMainCode( codes, code_identifier, context ):\n if code_identifier is None:\n code_identifier = NullIdentifier()\n\n main_code = CodeTemplates.main_program % {\n \"sys_executable\" : getConstantCode(\n constant = \"python.exe\"\n if Options.isWindowsTarget()\n else sys.executable,\n context = context\n ),\n \"python_sysflag_debug\" : sys.flags.debug,\n \"python_sysflag_py3k_warning\" : ( sys.flags.py3k_warning\n if hasattr( sys.flags, \"py3k_warning\" ) else 0 ),\n \"python_sysflag_division_warning\" : ( sys.flags.division_warning\n if hasattr( sys.flags, \"division_warning\" ) else 0 ),\n #\"python_sysflag_division_new\" : sys.flags.division_new, #not supported\n \"python_sysflag_inspect\" : sys.flags.inspect,\n \"python_sysflag_interactive\" : sys.flags.interactive,\n \"python_sysflag_optimize\" : sys.flags.optimize,\n \"python_sysflag_dont_write_bytecode\" : sys.flags.dont_write_bytecode,\n \"python_sysflag_no_user_site\" : sys.flags.no_user_site,\n \"python_sysflag_no_site\" : sys.flags.no_site,\n \"python_sysflag_ignore_environment\" : sys.flags.ignore_environment,\n \"python_sysflag_tabcheck\" : ( sys.flags.tabcheck\n if hasattr( sys.flags, \"tabcheck\" ) else 0 ),\n \"python_sysflag_verbose\" : sys.flags.verbose,\n \"python_sysflag_unicode\" : ( sys.flags.unicode\n if hasattr( sys.flags, \"unicode\" ) else 0 ),\n \"python_sysflag_bytes_warning\" : sys.flags.bytes_warning,\n \"python_sysflag_hash_randomization\" : ( sys.flags.hash_randomization\n if hasattr( sys.flags, \"hash_randomization\" ) else 0 ),\n \"code_identifier\" : code_identifier.getCodeTemporaryRef()\n }\n\n return codes + main_code\n\ndef getFunctionsCode( context ):\n result = \"\"\n\n for _code_name, ( _function_decl, function_code ) in sorted( context.getFunctionsCodes().items() ):\n result += function_code\n\n return result\n\ndef getFunctionsDecl( context ):\n result = \"\"\n\n for _code_name, ( function_decl, _function_code ) in sorted( context.getFunctionsCodes().items() ):\n result += function_decl\n\n return result\n\ndef _getFunctionCreationArgs( defaults_identifier, kw_defaults_identifier,\n annotations_identifier, closure_variables ):\n result = []\n\n if not kw_defaults_identifier.isConstantIdentifier():\n result.append( \"PyObject *kwdefaults\" )\n\n if not defaults_identifier.isConstantIdentifier():\n result.append( \"PyObject *defaults\" )\n\n if annotations_identifier is not None and \\\n not annotations_identifier.isConstantIdentifier():\n result.append( \"PyObject *annotations\" )\n\n for closure_variable in closure_variables:\n result.append( \"PyObjectSharedLocalVariable &python_closure_%s\" % closure_variable.getName() )\n\n return result\n\n\ndef getExportScopeCode( cross_module ):\n if cross_module:\n return \"NUITKA_CROSS_MODULE\"\n else:\n return \"NUITKA_LOCAL_MODULE\"\n\ndef getFunctionMakerDecl( function_identifier, defaults_identifier,\n kw_defaults_identifier, annotations_identifier,\n closure_variables ):\n function_creation_arg_spec = _getFunctionCreationArgs(\n defaults_identifier = defaults_identifier,\n kw_defaults_identifier = kw_defaults_identifier,\n annotations_identifier = annotations_identifier,\n closure_variables = closure_variables\n )\n\n return CodeTemplates.template_function_make_declaration % {\n \"function_identifier\" : function_identifier,\n \"function_creation_arg_spec\" : \", \".join(\n function_creation_arg_spec\n )\n }\n\n\ndef getFunctionDirectDecl( context, function_identifier, closure_variables,\n parameter_variables, file_scope ):\n\n parameter_objects_decl = [\n \"PyObject *_python_par_\" + variable.getName()\n for variable in\n parameter_variables\n ]\n\n for closure_variable in closure_variables:\n parameter_objects_decl.append(\n closure_variable.getDeclarationCode()\n )\n\n result = CodeTemplates.template_function_direct_declaration % {\n \"file_scope\" : file_scope,\n \"function_identifier\" : function_identifier,\n \"direct_call_arg_spec\" : \", \".join( parameter_objects_decl ),\n }\n\n return result\n\ndef _getFuncDefaultValue( defaults_identifier ):\n if defaults_identifier.isConstantIdentifier():\n return defaults_identifier\n else:\n return Identifier( \"defaults\", 1 )\n\n\ndef _getFuncKwDefaultValue( kw_defaults_identifier ):\n if kw_defaults_identifier.isConstantIdentifier():\n return kw_defaults_identifier\n else:\n return Identifier( \"kwdefaults\", 1 )\n\ndef _getFuncAnnotationsValue( annotations_identifier ):\n if annotations_identifier is None:\n return NullIdentifier()\n elif annotations_identifier.isConstantIdentifier():\n return annotations_identifier\n else:\n return Identifier( \"annotations\", 1 )\n\ndef getGeneratorFunctionCode( context, function_name,\n function_identifier, parameters,\n closure_variables, user_variables, function_codes,\n source_ref, function_doc ):\n # We really need this many parameters here. pylint: disable=R0913\n\n # Functions have many details, that we express as variables, with many\n # branches to decide, pylint: disable=R0912,R0914,R0915\n\n parameter_variables, entry_point_code, parameter_objects_decl = getParameterParsingCode(\n function_identifier = function_identifier,\n function_name = function_name,\n parameters = parameters,\n needs_creation = context.isForCreatedFunction(),\n context = context,\n )\n\n context_decl = []\n context_copy = []\n context_free = []\n\n function_parameter_decl = [\n getLocalVariableInitCode(\n context = context,\n variable = variable,\n in_context = True\n )\n for variable in\n parameter_variables\n ]\n\n parameter_context_assign = []\n\n for variable in parameter_variables:\n parameter_context_assign.append(\n \"_python_context->python_var_%s.setVariableNameAndValue( %s, _python_par_%s );\" % (\n variable.getName(),\n getConstantCode(\n constant = variable.getName(),\n context = context\n ),\n variable.getName()\n )\n )\n\n function_var_inits = []\n local_var_decl = []\n\n for user_variable in user_variables:\n local_var_decl.append(\n getLocalVariableInitCode(\n context = context,\n variable = user_variable,\n in_context = True\n )\n )\n function_var_inits.append(\n \"_python_context->python_var_%s.setVariableName( %s );\" % (\n user_variable.getName(),\n getConstantCode(\n constant = user_variable.getName(),\n context = context\n )\n )\n )\n\n for closure_variable in closure_variables:\n assert closure_variable.isShared()\n\n context_decl.append(\n getLocalVariableInitCode(\n context = context,\n variable = closure_variable,\n in_context = True\n )\n )\n context_copy.append(\n \"_python_context->python_closure_%s.shareWith( python_closure_%s );\" % (\n closure_variable.getName(),\n closure_variable.getName()\n )\n )\n\n function_doc = getConstantCode(\n context = context,\n constant = function_doc\n )\n\n function_name_obj = getConstantCode(\n constant = function_name,\n context = context,\n )\n\n instance_context_decl = function_parameter_decl + local_var_decl\n\n if context.isForDirectCall():\n instance_context_decl = context_decl + instance_context_decl\n context_decl = []\n\n if context_decl:\n result = CodeTemplates.genfunc_context_body_template % {\n \"function_identifier\" : function_identifier,\n \"function_common_context_decl\" : indented( context_decl ),\n \"function_instance_context_decl\" : indented( instance_context_decl ),\n \"context_free\" : indented( context_free, 2 ),\n }\n elif instance_context_decl:\n result = CodeTemplates.genfunc_context_local_only_template % {\n \"function_identifier\" : function_identifier,\n \"function_instance_context_decl\" : indented( instance_context_decl )\n }\n else:\n result = \"\"\n\n if instance_context_decl or context_decl:\n context_access_instance = CodeTemplates.generator_context_access_template2 % {\n \"function_identifier\" : function_identifier\n }\n else:\n context_access_instance = \"\"\n\n function_locals = []\n\n if context.hasLocalsDict():\n function_locals += CodeTemplates.function_dict_setup.split( \"\\n\" )\n\n function_locals += function_var_inits\n\n result += CodeTemplates.genfunc_yielder_template % {\n \"function_identifier\" : function_identifier,\n \"function_body\" : indented( function_codes, 2 ),\n \"function_var_inits\" : indented( function_locals, 2 ),\n \"context_access\" : indented( context_access_instance, 2 ),\n }\n\n code_identifier = context.getCodeObjectHandle(\n filename = source_ref.getFilename(),\n arg_names = parameters.getCoArgNames(),\n kw_only_count = parameters.getKwOnlyParameterCount(),\n line_number = source_ref.getLineNumber(),\n code_name = function_name,\n is_generator = True,\n is_optimized = not context.hasLocalsDict()\n )\n\n if context_decl or instance_context_decl:\n if context_decl:\n context_making = CodeTemplates.genfunc_common_context_use_template % {\n \"function_identifier\" : function_identifier,\n }\n else:\n context_making = CodeTemplates.genfunc_local_context_use_template % {\n \"function_identifier\" : function_identifier,\n }\n\n context_making = context_making.split( \"\\n\" )\n\n if context.isForDirectCall():\n context_making += context_copy\n\n generator_making = CodeTemplates.genfunc_generator_with_context_making % {\n \"function_name_obj\" : function_name_obj,\n \"function_identifier\" : function_identifier,\n \"code_identifier\" : code_identifier.getCodeTemporaryRef()\n }\n else:\n generator_making = CodeTemplates.genfunc_generator_without_context_making % {\n \"function_name_obj\" : function_name_obj,\n \"function_identifier\" : function_identifier,\n \"code_identifier\" : code_identifier.getCodeTemporaryRef()\n }\n\n context_making = []\n\n if context.isForDirectCall():\n for closure_variable in closure_variables:\n parameter_objects_decl.append(\n closure_variable.getDeclarationCode()\n )\n\n result += CodeTemplates.genfunc_function_maker_template % {\n \"function_name\" : function_name,\n \"function_identifier\" : function_identifier,\n \"context_making\" : indented( context_making, 1 ),\n \"context_copy\" : indented( parameter_context_assign, 2 ),\n \"generator_making\" : generator_making,\n \"parameter_objects_decl\" : \", \".join( parameter_objects_decl ),\n }\n\n if context.isForCreatedFunction():\n result += entry_point_code\n\n return result\n\ndef getTempKeeperDecl( context ):\n tmp_keepers = context.getTempKeeperUsages()\n return [\n \"PyObjectTempKeeper%s %s;\" % ( ref_count, tmp_variable )\n for tmp_variable, ref_count in sorted( iterItems( tmp_keepers ) )\n ]\n\ndef getFunctionMakerCode( context, function_name, function_qualname,\n function_identifier, parameters, closure_variables,\n defaults_identifier, kw_defaults_identifier,\n annotations_identifier, source_ref, function_doc,\n is_generator ):\n\n function_name_obj = getConstantCode(\n context = context,\n constant = function_name\n )\n\n code_identifier = context.getCodeObjectHandle(\n filename = source_ref.getFilename(),\n arg_names = parameters.getCoArgNames(),\n kw_only_count = parameters.getKwOnlyParameterCount(),\n line_number = source_ref.getLineNumber(),\n code_name = function_name,\n is_generator = is_generator,\n is_optimized = not context.hasLocalsDict()\n )\n\n function_creation_args = _getFunctionCreationArgs(\n defaults_identifier = defaults_identifier,\n kw_defaults_identifier = kw_defaults_identifier,\n annotations_identifier = annotations_identifier,\n closure_variables = closure_variables,\n )\n\n func_defaults = _getFuncDefaultValue(\n defaults_identifier = defaults_identifier\n )\n\n func_kwdefaults = _getFuncKwDefaultValue(\n kw_defaults_identifier = kw_defaults_identifier\n )\n\n func_annotations = _getFuncAnnotationsValue(\n annotations_identifier = annotations_identifier\n )\n\n if Utils.python_version < 330 or function_qualname == function_name:\n function_qualname_obj = \"NULL\"\n else:\n function_qualname_obj = getConstantCode(\n constant = function_qualname,\n context = context\n )\n\n if closure_variables:\n context_copy = []\n\n for closure_variable in closure_variables:\n context_copy.append(\n \"_python_context->python_closure_%s.shareWith( python_closure_%s );\" % (\n closure_variable.getName(),\n closure_variable.getName()\n )\n )\n\n if is_generator:\n template = CodeTemplates.make_genfunc_with_context_template\n else:\n template = CodeTemplates.make_function_with_context_template\n\n result = template % {\n \"function_name_obj\" : function_name_obj,\n \"function_qualname_obj\" : function_qualname_obj,\n \"function_identifier\" : function_identifier,\n \"fparse_function_identifier\" : getParameterEntryPointIdentifier(\n function_identifier = function_identifier,\n is_method = False\n ),\n \"mparse_function_identifier\" : getMethodEntryPointIdentifier(\n function_identifier = function_identifier,\n parameters = parameters\n ),\n \"function_creation_args\" : \", \".join(\n function_creation_args\n ),\n \"code_identifier\" : code_identifier.getCodeTemporaryRef(),\n \"context_copy\" : indented( context_copy ),\n \"function_doc\" : getConstantCode(\n constant = function_doc,\n context = context\n ),\n \"defaults\" : func_defaults.getCodeExportRef(),\n \"kwdefaults\" : func_kwdefaults.getCodeExportRef(),\n \"annotations\" : func_annotations.getCodeExportRef(),\n \"module_identifier\" : getModuleAccessCode(\n context = context\n ),\n }\n else:\n if is_generator:\n template = CodeTemplates.make_genfunc_without_context_template\n else:\n template = CodeTemplates.make_function_without_context_template\n\n result = template % {\n \"function_name_obj\" : function_name_obj,\n \"function_qualname_obj\" : function_qualname_obj,\n \"function_identifier\" : function_identifier,\n \"fparse_function_identifier\" : getParameterEntryPointIdentifier(\n function_identifier = function_identifier,\n is_method = False\n ),\n \"mparse_function_identifier\" : getMethodEntryPointIdentifier(\n function_identifier = function_identifier,\n parameters = parameters\n ),\n \"function_creation_args\" : \", \".join(\n function_creation_args\n ),\n \"code_identifier\" : code_identifier.getCodeTemporaryRef(),\n \"function_doc\" : getConstantCode(\n constant = function_doc,\n context = context\n ),\n \"defaults\" : func_defaults.getCodeExportRef(),\n \"kwdefaults\" : func_kwdefaults.getCodeExportRef(),\n \"annotations\" : func_annotations.getCodeExportRef(),\n \"module_identifier\" : getModuleAccessCode(\n context = context\n ),\n }\n\n return result\n\n\ndef getFunctionContextDefinitionCode( context, function_identifier,\n closure_variables ):\n context_decl = []\n\n # Always empty now, but we may not use C++ destructors for everything in the\n # future, so leave it.\n context_free = []\n\n for closure_variable in closure_variables:\n context_decl.append(\n getLocalVariableInitCode(\n context = context,\n variable = closure_variable,\n in_context = True\n )\n )\n\n return CodeTemplates.function_context_body_template % {\n \"function_identifier\" : function_identifier,\n \"context_decl\" : indented( context_decl ),\n \"context_free\" : indented( context_free ),\n }\n\ndef getFunctionCode( context, function_name, function_identifier, parameters,\n closure_variables, user_variables, function_codes,\n function_doc, file_scope ):\n\n # Functions have many details, that we express as variables, with many\n # branches to decide, pylint: disable=R0912,R0914\n\n parameter_variables, entry_point_code, parameter_objects_decl = getParameterParsingCode(\n function_identifier = function_identifier,\n function_name = function_name,\n parameters = parameters,\n needs_creation = context.isForCreatedFunction(),\n context = context,\n )\n\n function_parameter_decl = [\n getLocalVariableInitCode(\n context = context,\n variable = variable,\n init_from = Identifier( \"_python_par_\" + variable.getName(), 1 )\n )\n for variable in\n parameter_variables\n ]\n\n\n # User local variable initializations\n local_var_inits = [\n getLocalVariableInitCode(\n context = context,\n variable = variable\n )\n for variable in\n user_variables\n ]\n\n function_doc = getConstantCode(\n context = context,\n constant = function_doc\n )\n\n function_locals = []\n\n if context.hasLocalsDict():\n function_locals += CodeTemplates.function_dict_setup.split(\"\\n\")\n\n function_locals += function_parameter_decl + local_var_inits\n\n result = \"\"\n\n if closure_variables and context.isForCreatedFunction():\n context_access_function_impl = CodeTemplates.function_context_access_template % {\n \"function_identifier\" : function_identifier,\n }\n else:\n context_access_function_impl = str( CodeTemplates.function_context_unused_template )\n\n if context.isForDirectCall():\n for closure_variable in closure_variables:\n parameter_objects_decl.append(\n closure_variable.getDeclarationCode()\n )\n\n result += CodeTemplates.function_direct_body_template % {\n \"file_scope\" : file_scope,\n \"function_identifier\" : function_identifier,\n \"context_access_function_impl\" : context_access_function_impl,\n \"direct_call_arg_spec\" : \",\".join( parameter_objects_decl ),\n \"function_locals\" : indented( function_locals ),\n \"function_body\" : indented( function_codes ),\n }\n else:\n result += CodeTemplates.function_body_template % {\n \"function_identifier\" : function_identifier,\n \"context_access_function_impl\" : context_access_function_impl,\n \"parameter_objects_decl\" : \", \".join( parameter_objects_decl ),\n \"function_locals\" : indented( function_locals ),\n \"function_body\" : indented( function_codes ),\n }\n\n if context.isForCreatedFunction():\n result += entry_point_code\n\n return result\n\n\ndef getSelectMetaclassCode( metaclass_identifier, bases_identifier, context ):\n if Utils.python_version < 300:\n assert metaclass_identifier is None\n\n args = [\n bases_identifier.getCodeTemporaryRef(),\n getMetaclassVariableCode( context = context )\n ]\n else:\n args = [\n metaclass_identifier.getCodeTemporaryRef(),\n bases_identifier.getCodeTemporaryRef()\n ]\n\n\n return CallIdentifier( \"SELECT_METACLASS\", args )\n\ndef getStatementTrace( source_desc, statement_repr ):\n return 'puts( \"Execute: %s \" %s );' % (\n source_desc,\n CppStrings.encodeString( statement_repr )\n )\n\n\ndef getConstantsDeclarationCode( context ):\n constants_declarations = CodeTemplates.template_constants_declaration % {\n \"constant_declarations\" : getConstantsDeclCode(\n context = context,\n for_header = True\n )\n }\n\n return CodeTemplates.template_header_guard % {\n \"header_guard_name\" : \"__NUITKA_DECLARATIONS_H__\",\n \"header_body\" : constants_declarations\n }\n\ndef getConstantsDefinitionCode( context ):\n return CodeTemplates.template_constants_reading % {\n \"constant_declarations\" : getConstantsDeclCode(\n context = context,\n for_header = False\n ),\n \"constant_inits\" : getConstantsInitCode(\n context = context\n ),\n \"needs_pickle\" : \"true\" if needsPickleInit() else \"false\"\n }\n\ndef getCurrentExceptionTypeCode():\n return Identifier(\n \"_exception.getType()\",\n 0\n )\n\ndef getCurrentExceptionValueCode():\n return Identifier(\n \"_exception.getValue()\",\n 0\n )\n\ndef getCurrentExceptionTracebackCode():\n return Identifier(\n \"(PyObject *)_exception.getTraceback()\",\n 0\n )\n\ndef getListOperationAppendCode( list_identifier, value_identifier ):\n return Identifier(\n \"APPEND_TO_LIST( %s, %s ), Py_None\" % (\n list_identifier.getCodeTemporaryRef(),\n value_identifier.getCodeTemporaryRef()\n ),\n 0\n )\n\ndef getSetOperationAddCode( set_identifier, value_identifier ):\n return Identifier(\n \"ADD_TO_SET( %s, %s ), Py_None\" % (\n set_identifier.getCodeTemporaryRef(),\n value_identifier.getCodeTemporaryRef()\n ),\n 0\n )\n\ndef getDictOperationSetCode( dict_identifier, key_identifier,\n value_identifier ):\n return Identifier(\n \"DICT_SET_ITEM( %s, %s, %s ), Py_None\" % (\n dict_identifier.getCodeTemporaryRef(),\n key_identifier.getCodeTemporaryRef(),\n value_identifier.getCodeTemporaryRef()\n ),\n 0\n )\n\ndef getDictOperationGetCode( dict_identifier, key_identifier ):\n return Identifier(\n \"DICT_GET_ITEM( %s, %s )\" % (\n dict_identifier.getCodeTemporaryRef(),\n key_identifier.getCodeTemporaryRef(),\n ),\n 1\n )\n\ndef getDictOperationRemoveCode( dict_identifier, key_identifier ):\n return \"DICT_REMOVE_ITEM( %s, %s );\" % (\n dict_identifier.getCodeTemporaryRef(),\n key_identifier.getCodeTemporaryRef()\n )\n\ndef getFrameLocalsUpdateCode( locals_identifier ):\n if locals_identifier.isConstantIdentifier() and \\\n locals_identifier.getConstant() == {}:\n return \"\"\n else:\n return CodeTemplates.template_frame_locals_update % {\n \"locals_identifier\" : locals_identifier.getCodeExportRef()\n }\n\ndef getFrameGuardHeavyCode( frame_identifier, code_identifier, codes,\n locals_code, context ):\n if context.isForDirectCall():\n return_code = CodeTemplates.frame_guard_cpp_return\n else:\n return_code = CodeTemplates.frame_guard_python_return\n\n tb_making = getTracebackMakingIdentifier( context )\n\n return CodeTemplates.frame_guard_full_template % {\n \"frame_identifier\" : frame_identifier,\n \"code_identifier\" : code_identifier.getCodeTemporaryRef(),\n \"codes\" : indented( codes ),\n \"module_identifier\" : getModuleAccessCode( context = context ),\n \"frame_locals\" : indented( locals_code, vert_block = True ),\n \"tb_making\" : tb_making.getCodeExportRef(),\n \"return_code\" : return_code\n }\n\ndef getFrameGuardOnceCode( frame_identifier, code_identifier, locals_identifier,\n codes, context ):\n tb_making = getTracebackMakingIdentifier( context )\n\n return CodeTemplates.frame_guard_once_template % {\n \"frame_identifier\" : frame_identifier,\n \"code_identifier\" : code_identifier.getCodeTemporaryRef(),\n \"codes\" : indented( codes ),\n \"module_identifier\" : getModuleAccessCode( context = context ),\n \"frame_locals\" : locals_identifier.getCodeExportRef(),\n \"tb_making\" : tb_making.getCodeExportRef(),\n \"return_code\" : indented( context.getReturnCode() )\n }\n\ndef getFrameGuardLightCode( frame_identifier, code_identifier, codes, context ):\n tb_making = getTracebackMakingIdentifier( context )\n\n return CodeTemplates.frame_guard_genfunc_template % {\n \"frame_identifier\" : frame_identifier,\n \"code_identifier\" : code_identifier.getCodeTemporaryRef(),\n \"codes\" : indented( codes ),\n \"module_identifier\" : getModuleAccessCode( context = context ),\n \"tb_making\" : tb_making.getCodeExportRef(),\n }\n\ndef getFrameGuardVeryLightCode( codes ):\n return CodeTemplates.frame_guard_listcontr_template % {\n \"codes\" : indented( codes, 0 ),\n }\n","sub_path":"nuitka/codegen/Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":87811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"439995482","text":"from sv_packages.common_funcs import create_connection\n\ndef isEmpty(data):\n i = 0\n for val in data.values():\n if ((val == None) or (val == \"\")): i += 1\n return False if len(data.values()) != i else True\n\ndef postHandlerLogisticianByMaterial(data):\n print(f\"data = {data}\")\n if isEmpty(data) == True:\n SQLreq = \"SELECT * FROM T_supply_directory;\"\n print(f\"SQLreq = {SQLreq}\")\n else:\n SQLreq = \"SELECT * FROM T_supply_directory \" \\\n \"WHERE sup_code IN (SELECT tsu.sup_code FROM T_storage_units AS tsu \" \\\n \"WHERE tsu.material_code IN (SELECT tmd.material_code FROM T_material_directory AS tmd \" \\\n \"WHERE\"\n\n for item in data.items():\n key = list(item)[0]\n val = list(item)[1]\n if ((val == None) or (val == \"\")): continue\n SQLreq += f\" tmd.{key} = '{val}' AND\"\n\n SQLreq = SQLreq[: -3] # Удаление последнего AND\n SQLreq += \"));\"\n print(f\"SQLreq = {SQLreq}\")\n\n db_path = \"sv_packages/database/sqlite.db\"\n connection = create_connection(db_path)\n sups = connection.execute(SQLreq).fetchall()\n connection.close()\n return sups\n\ndef postHandlerLogisticianByBank(data):\n print(f\"data = {data}\")\n if isEmpty(data) == True:\n SQLreq = \"SELECT * FROM T_supply_directory;\"\n print(f\"SQLreq = {SQLreq}\")\n else:\n for item in data.items():\n key = list(item)[0]\n val = list(item)[1]\n if ((val == None) or (val == \"\")): data[key] = \"%\"\n SQLreq = f\"SELECT * FROM T_supply_directory WHERE bank_address LIKE '%{data['index']}, {data['city']}, {data['street']} st., {data['building']}%';\";\n print(f\"SQLreq = {SQLreq}\")\n\n db_path = \"sv_packages/database/sqlite.db\"\n connection = create_connection(db_path)\n sups = connection.execute(SQLreq).fetchall()\n connection.close()\n print(len(sups))\n buff = {\"sum\" : len(sups)}\n return buff\n\ndef postHandlerLogisticianAdd(data):\n print(f\"data = {data}\")\n\n SQLreq = f\"\"\"INSERT INTO T_supply_directory(sup_name, inn_code, org_address, bank_address, bank_acc) VALUES\"\"\"\n for item in data.items():\n val = list(item)[1]\n if ((val == None) or (val == \"\")):\n buff = {\"res\": \" не удалось добавить поставщика. Все поля должны быть заполнены!\"}\n return buff\n\n SQLreq = f\"\"\"{SQLreq} ('{data['sup_name']}', '{data['inn_code']}', '{data['org_address']}', '{data['bank_address']}', '{data['bank_acc']}');\"\"\"\n\n print(f\"SQLreq = {SQLreq}\")\n\n db_path = \"sv_packages/database/sqlite.db\"\n connection = create_connection(db_path)\n connection.execute(\"\"\"BEGIN;\"\"\").fetchall()\n connection.execute(SQLreq).fetchall()\n connection.execute(\"\"\"COMMIT;\"\"\").fetchall()\n connection.close()\n buff = {\"res\": \" поставщик успешно добавлен в базу данных!\"}\n return buff\n\ndef postHandlerLogisticianDelete(data):\n print(f\"data = {data}\")\n\n if isEmpty(data) == True:\n buff = {\"res\": \" не удалось удалить поставщика. Должно быть заполнено хотя бы одно поле!\"}\n return buff\n else:\n SQLreq = \"SELECT * FROM T_supply_directory WHERE\";\n\n for item in data.items():\n key = list(item)[0]\n val = list(item)[1]\n if ((val == None) or (val == \"\")): continue\n SQLreq += f\" {key} = '{val}' AND\"\n\n SQLreq = SQLreq[: -3] # Удаление последнего AND\n SQLreq += \";\"\n print(f\"SQLreq = {SQLreq}\")\n\n db_path = \"sv_packages/database/sqlite.db\"\n connection = create_connection(db_path)\n sups = connection.execute(SQLreq).fetchall()\n connection.close()\n\n if len(sups) == 0:\n buff = {\"res\": \" не удалось удалить поставщика. Поставщик не найден!\"}\n return buff\n\n SQLreq = f\"\"\"DELETE FROM T_supply_directory WHERE\"\"\"\n\n for item in data.items():\n key = list(item)[0]\n val = list(item)[1]\n if ((val == None) or (val == \"\")): continue\n SQLreq += f\" {key} = '{val}' AND\"\n\n SQLreq = SQLreq[: -3] # Удаление последнего AND\n SQLreq += \";\"\n\n print(f\"SQLreq = {SQLreq}\")\n\n db_path = \"sv_packages/database/sqlite.db\"\n connection = create_connection(db_path)\n connection.execute(\"\"\"BEGIN;\"\"\").fetchall()\n connection.execute(SQLreq).fetchall()\n connection.execute(\"\"\"COMMIT;\"\"\").fetchall()\n connection.close()\n buff = {\"res\": \" поставщик успешно удалён из базы данных!\"}\n return buff\n","sub_path":"kalaida_schvyschkov/server/sv_packages/logistician_funcs.py","file_name":"logistician_funcs.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"515053482","text":"import math\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import optim, Tensor\nfrom torch import nn\n\nimport torchvision\nimport pytorch_lightning as pl\n\n\"\"\"\nDefines the VGG encoder with beta-TCVAE loss module. As the decoder for the\nMNIST and dSprites data, an InfoGAN, and for the OCT data, a DCGAN architecture is\nautomatically selected. Also the VGG size depends on the data.\n\"\"\"\n\nclass betaTCVAE_VGG(pl.LightningModule):\n def __init__(self,\n latent_dim=10,\n input_dim=784,\n lr=0.001,\n anneal_steps: int = 200,\n alpha: float = 1.,\n beta: float = 1.,\n gamma: float = 1.,\n dataset = \"mnist\",\n c = 64\n ):\n super(betaTCVAE_VGG, self).__init__()\n self.save_hyperparameters()\n self.c = c\n self.num_iter = 0\n self.dataset = dataset\n\n if dataset == \"mnist\":\n self.scale = 7\n self.trainset_size = 50000\n elif dataset==\"dSprites\":\n self.scale = 16\n self.trainset_size = 600000\n elif dataset == \"OCT\":\n self.scale = 50\n self.trainset_size = 85600\n\n # Encoder\n\n if dataset == \"OCT\":\n model = torchvision.models.vgg19_bn()\n model = list(model.features.children())[1:53]\n else:\n model = torchvision.models.vgg16_bn()\n model = list(model.features.children())[1:36]\n\n self.enc_conv1 = nn.Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.vgg = nn.Sequential(*model)\n self.enc_avgpool = nn.AdaptiveAvgPool2d(output_size=(1,1))\n\n self.enc_fc = nn.Linear(in_features=512, out_features=10*c)\n self.fc_mu = nn.Linear(in_features=10*c, out_features=latent_dim)\n self.fc_logvar = nn.Linear(in_features=10*c, out_features=latent_dim) \n\n # Decoder\n\n if dataset == \"OCT\":\n self.decoder = nn.Sequential(\n nn.ConvTranspose2d(in_channels=latent_dim, out_channels=self.scale * 8, kernel_size=12, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(self.scale * 8),\n nn.ReLU(True),\n nn.ConvTranspose2d(in_channels=self.scale * 8, out_channels=self.scale * 4, kernel_size=12, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(self.scale * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(in_channels=self.scale * 4, out_channels=self.scale * 2, kernel_size=14, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(self.scale * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(in_channels=self.scale * 2, out_channels=self.scale, kernel_size=14, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(self.scale),\n nn.ReLU(True),\n nn.ConvTranspose2d(in_channels=self.scale, out_channels=1, kernel_size=14, stride=2, padding=1, bias=False),\n nn.Sigmoid()\n )\n else:\n self.decoder = nn.Sequential(\n nn.BatchNorm1d(latent_dim),\n nn.Linear(in_features=latent_dim, out_features=c*2*self.scale*self.scale),\n nn.BatchNorm1d(c*2*self.scale*self.scale),\n nn.Unflatten(1, (self.c*2, self.scale, self.scale)),\n nn.ConvTranspose2d(in_channels=c*2, out_channels=c, kernel_size=4, stride=2, padding=1),\n nn.ReLU(), nn.BatchNorm2d(c),\n nn.ConvTranspose2d(in_channels=c, out_channels=1, kernel_size=4, stride=2, padding=1),\n nn.Sigmoid()\n )\n\n def encode(self, x):\n x = self.enc_conv1(x)\n x = self.vgg(x)\n x = self.enc_avgpool(x)\n\n try:\n x = torch.squeeze(x, dim=3)\n x = torch.squeeze(x, dim=2)\n except IndexError:\n pass\n\n x = F.relu(self.enc_fc(x))\n mu = self.fc_mu(x)\n log_var = self.fc_logvar(x)\n return mu, log_var\n\n def sampling(self, mu, log_var):\n std = torch.exp(log_var * 0.5)\n q = torch.distributions.Normal(mu, std)\n z = q.rsample()\n return z\n\n def decode(self, z):\n if self.dataset==\"OCT\":\n z = torch.unsqueeze(z,2)\n z = torch.unsqueeze(z,3)\n\n x = self.decoder(z)\n return x\n\n def forward(self, x):\n mu, log_var = self.encode(x)\n return mu\n\n def log_density_gaussian(self, x: Tensor, mu: Tensor, logvar: Tensor):\n norm = - 0.5 * (math.log(2 * math.pi) + logvar)\n log_density = norm - 0.5 * ((x - mu) ** 2 * torch.exp(-logvar))\n return log_density\n\n def loss(self, recons, x, mu, log_var, z):\n # Inspired by: https://github.com/YannDubs/disentangling-vae/blob/7b8285baa19d591cf34c652049884aca5d8acbca/disvae/models/losses.py#L316\n recons_loss = F.binary_cross_entropy(\n recons.view(-1, self.hparams.input_dim).clamp(0, 1),\n x.view(-1, self.hparams.input_dim),\n reduction='sum')\n\n log_q_zx = self.log_density_gaussian(z, mu, log_var).sum(dim=1)\n\n zeros = torch.zeros_like(z)\n log_p_z = self.log_density_gaussian(z, zeros, zeros).sum(dim=1)\n\n batch_size, latent_dim = z.shape\n mat_log_q_z = self.log_density_gaussian(z.view(batch_size, 1, latent_dim),\n mu.view(1, batch_size,\n latent_dim),\n log_var.view(1, batch_size, latent_dim))\n\n\n # Estimate the three KL terms (log(q(z))) via importance sampling\n strat_weight = (self.trainset_size - batch_size + 1) / \\\n (self.trainset_size * (batch_size - 1))\n\n importance_weights = torch.Tensor(batch_size, batch_size).fill_(\n 1 / (batch_size - 1)).to(x.device)\n\n importance_weights.view(-1)[::batch_size] = 1 / self.trainset_size\n importance_weights.view(-1)[1::batch_size] = strat_weight\n importance_weights[batch_size - 2, 0] = strat_weight\n log_importance_weights = importance_weights.log()\n\n mat_log_q_z += log_importance_weights.view(batch_size, batch_size, 1)\n\n log_q_z = torch.logsumexp(mat_log_q_z.sum(2), dim=1, keepdim=False)\n log_prod_q_z = torch.logsumexp(\n mat_log_q_z, dim=1, keepdim=False).sum(1)\n\n # Three KL Term components\n mi_loss = (log_q_zx - log_q_z).mean()\n tc_loss = (log_q_z - log_prod_q_z).mean()\n kld_loss = (log_prod_q_z - log_p_z).mean()\n\n if self.training:\n self.num_iter += 1\n anneal_rate = min(0 + 1 * self.num_iter /\n self.hparams.anneal_steps, 1)\n else:\n anneal_rate = 1\n\n loss = recons_loss/batch_size + \\\n self.hparams.alpha * mi_loss + \\\n self.hparams.beta * tc_loss + \\\n self.hparams.gamma * anneal_rate * kld_loss\n\n return loss\n\n def training_step(self, batch, batch_idx):\n x, _ = batch\n mu, log_var = self.encode(x)\n z = self.sampling(mu, log_var)\n\n recons = self.decode(z)\n\n vae_loss = self.loss(recons, x, mu, log_var, z)\n\n self.log('loss', vae_loss, on_epoch=False, prog_bar=True, on_step=True,\n sync_dist=True if torch.cuda.device_count() > 1 else False)\n\n return vae_loss\n\n def validation_step(self, batch, batch_idx):\n x, _ = batch\n mu, log_var = self.encode(x)\n z = self.sampling(mu, log_var)\n\n recons = self.decode(z)\n\n vae_loss = self.loss(recons, x, mu, log_var, z)\n\n self.log('val_loss', vae_loss, on_epoch=True, prog_bar=True,\n sync_dist=True if torch.cuda.device_count() > 1 else False)\n \n return vae_loss\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=self.hparams.lr)\n\n def get_progress_bar_dict(self):\n tqdm_dict = super().get_progress_bar_dict()\n if 'v_num' in tqdm_dict:\n del tqdm_dict['v_num']\n return tqdm_dict\n","sub_path":"src/models/encoder/VAE_loss/betaTCVAE_VGG.py","file_name":"betaTCVAE_VGG.py","file_ext":"py","file_size_in_byte":8199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"283628830","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport random\nimport threading\nimport logging\n\nfrom six.moves import xrange\n\nimport numpy as np\nimport tensorflow as tf\n\nflags = tf.app.flags\n\nflags.DEFINE_string(\"save_path\", None, \"Directory to write the model and training sammaries.\")\nflags.DEFINE_string(\"train_data\", None, \"Training text file.\")\nflags.DEFINE_integer(\"embedding_size\", 100, \"The embedding dimension size.\")\nflags.DEFINE_integer(\"samples_to_train\", 1, \"Number of samples to train(*Million).\")\nflags.DEFINE_float(\"learning_rate\", 0.025, \"Initial learning rate.\")\nflags.DEFINE_integer(\"num_neg_samples\", 5, \"Negative samples per training example.\")\nflags.DEFINE_integer(\"batch_size\", 1, \"Number of training examples processed per step.\")\nflags.DEFINE_integer(\"num_threads\", 4, \"Number of threads training step.\")\nflags.DEFINE_integer(\"min_count\", 5, \"The min_count of edges for it to be included\")\nflags.DEFINE_integer(\"order\", 2, \"Model type, preserve first-order or second-order proximity.\")\nFLAGS = flags.FLAGS\n\nclass Options(object):\n '''Options used by LINE model.'''\n\n def __init__(self):\n #Model options.\n\n #Embedding dimension.\n self.emb_dim = FLAGS.embedding_size\n \n #training text file.\n self.train_data = FLAGS.train_data\n \n #Number of Negative samples per example.\n self.num_samples = FLAGS.num_neg_samples\n \n #The initial learning rate.\n self.learning_rate = FLAGS.learning_rate\n\n #Number of samples to train.\n self.samples_to_train = FLAGS.samples_to_train * 1000000\n\n #Number of threads.\n self.num_threads = FLAGS.num_threads\n\n #Number of examples for one training step.\n self.batch_size = FLAGS.batch_size\n\n #The minium number of edges for it to be included.\n self.min_count = FLAGS.min_count\n\n #Where to write out embeddings.\n self.save_path = FLAGS.save_path\n\n #order, can be 1 or 2.\n self.order = FLAGS.order\n\nclass LINE(object):\n '''LINE model.'''\n \n def __init__(self, options, session):\n self._options = options\n self._session = session\n self._vertex2id = {}\n self._id2vertex = []\n self._vid2degree = []\n self._edge_source_id = []\n self._edge_target_id = []\n self._edge_weight = []\n self._alias = []\n self._prob = []\n self._edge_count = 0\n self._read_data(self._options.train_data)\n self.InitAliasTable()\n self.build_graph()\n\n def _read_data(self, filename):\n '''Read data from training file.'''\n for line in open(filename):\n chunkes = line.strip().split()\n assert len(chunkes) == 3\n [name_v1, name_v2, weight] = chunkes\n weight = float(weight)\n '''Add current vertex to dict if it is not included yet.'''\n if not self._vertex2id.has_key(name_v1):\n self._vertex2id[name_v1] = len(self._vertex2id)\n self._id2vertex.append(name_v2)\n self._vid2degree.append(0)\n \n if not self._vertex2id.has_key(name_v2):\n self._vertex2id[name_v2] = len(self._vertex2id)\n self._id2vertex.append(name_v2)\n self._vid2degree.append(0)\n \n vid = self._vertex2id[name_v1]\n self._vid2degree[vid] += weight\n self._edge_source_id.append(vid)\n \n vid = self._vertex2id[name_v2]\n self._vid2degree[vid] += weight\n self._edge_target_id.append(vid)\n\n self._edge_weight.append(weight)\n\n self._edge_num = len(self._edge_weight)\n self._options.vertex_size = len(self._id2vertex)\n logging.info(\"Edge number : %d\" % (self._edge_num))\n logging.info(\"Vertex number : %d\" % (self._options.vertex_size))\n\n def InitAliasTable(self):\n '''The alias sampling algo, which is used to sample an edge in O(1) time.'''\n norm_prob = []\n large_block = []\n small_block = []\n\n sum = 0.0\n\n for weight in self._edge_weight:\n sum += float(weight)\n\n for k in xrange(self._edge_num):\n norm_prob.append(self._edge_weight[k] * self._edge_num / sum)\n \n num_small_block = 0\n num_large_block = 0\n cur_small_block = 0\n cur_large_block = 0\n\n for k in xrange(self._edge_num - 1, -1, -1):\n if norm_prob[k] < 1:\n small_block.append(k)\n else:\n large_block.append(k)\n\n num_small_block = len(small_block)\n num_large_block = len(large_block)\n assert (num_small_block + num_large_block == self._edge_num)\n\n for i in xrange(self._edge_num):\n self._alias.append(0)\n self._prob.append(0.0)\n\n while True:\n if num_small_block ==0 or num_large_block == 0:\n break\n num_small_block -= 1\n num_large_block -= 1\n cur_small_block = small_block[num_small_block]\n cur_large_block = large_block[num_large_block]\n self._prob[cur_small_block] = norm_prob[cur_small_block]\n self._alias[cur_small_block] = cur_large_block\n norm_prob[cur_large_block] = norm_prob[cur_small_block] + norm_prob[cur_small_block] - 1.0\n if norm_prob[cur_large_block] < 1:\n small_block[num_small_block] = cur_large_block\n num_small_block += 1\n else:\n large_block[num_large_block] = cur_large_block\n num_large_block += 1\n while num_large_block > 0:\n num_large_block -= 1\n self._prob[large_block[num_large_block]] = 1\n while num_small_block > 0:\n num_small_block -= 1\n self._prob[small_block[num_small_block]] = 1\n logging.info(\"prob table length:%d\" % (len(self._prob)))\n logging.info(\"alias table length:%d\" % (len(self._alias)))\n\n def SampleAnEdge(self):\n '''sample an edge in O(1).'''\n opts = self._options\n self._edge_count += opts.batch_size\n inputs = np.ndarray(shape = (opts.batch_size), dtype = np.int32)\n labels = np.ndarray(shape = (opts.batch_size), dtype = np.int32)\n for i in range(opts.batch_size):\n r_value1 = random.random()\n r_value2 = random.random()\n k = int(self._edge_num * r_value1)\n k = k if r_value2 < self._prob[k] else self._alias[k]\n inputs[i] = self._edge_source_id[k]\n labels[i] = self._edge_target_id[k]\n return inputs, labels\n\n def forward(self, sources, targets):\n '''Build the graph for the forward pass.'''\n opts = self._options\n #embedding:[vertex_size,emb_dim]\n init_width = 0.5\n #emb_vertex, order=1\n emb_vertex = tf.Variable(tf.random_uniform([opts.vertex_size, opts.emb_dim], -init_width, init_width), name = 'emb_vertex')\n #emb_context, order=2\n emb_context = tf.Variable(tf.zeros([opts.vertex_size, opts.emb_dim], dtype = tf.float32), name = 'emb_context')\n self._emb_vertex = emb_vertex\n self._emb_context = emb_context\n self.global_step = tf.Variable(0, trainable=False, name='global_step',dtype=tf.int32)\n targets_matrix = tf.reshape(tf.cast(targets, dtype = tf.int64), [opts.batch_size, 1])\n #Negative sampling.\n sampled_ids,_,_ = (tf.nn.fixed_unigram_candidate_sampler(\n true_classes = targets_matrix,\n num_true = 1,\n num_sampled = opts.num_samples,\n unique = True,\n range_max=opts.vertex_size,\n distortion = 0.75,\n unigrams = self._vid2degree\n ))\n #embedding for sources\n example_emb = tf.nn.embedding_lookup(emb_vertex, sources)\n\n #embedding for targets\n if opts.order == 1:\n target_emb = tf.nn.embedding_lookup(emb_vertex, targets)\n elif opts.order == 2:\n target_emb = tf.nn.embedding_lookup(emb_context, targets)\n else:\n logging.error(\"unsupported order: %d\" % (opts.order))\n sys.exit(1)\n\n #true logits\n true_logits = tf.reduce_sum(tf.mul(example_emb, target_emb), 1)\n\n #embeddings for negative-sampled vertex\n if opts.order == 1:\n sampled_emb = tf.nn.embedding_lookup(emb_vertex, sampled_ids)\n else:\n sampled_emb = tf.nn.embedding_lookup(emb_context, sampled_ids)\n #sampled logits\n sampled_logits = tf.matmul(example_emb,\n sampled_emb,\n transpose_b = True)\n\n return true_logits, sampled_logits\n\n def nce_loss(self, true_logits, sampled_logits):\n '''build the graph for the NCE loss.'''\n\n #cross-entropy(logits, labels)\n opts = self._options\n true_xent = tf.nn.sigmoid_cross_entropy_with_logits(\n true_logits, tf.ones_like(true_logits))\n sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits( \n sampled_logits, tf.zeros_like(sampled_logits))\n \n nce_loss_tensor = (tf.reduce_sum(true_xent) + \n tf.reduce_sum(sampled_xent)) / opts.batch_size\n return nce_loss_tensor\n\n def optimize(self, loss):\n '''build the graph to optimize the loss function.'''\n opts = self._options\n #update learning rate.\n #self._lr = tf.train.exponential_decay(opts.learning_rate, self.global_step, 10000, 0.96, staircase=True)\n self._lr = opts.learning_rate * (1.0 - tf.cast(self.global_step, tf.float32) / float(opts.samples_to_train))\n optimizer = tf.train.GradientDescentOptimizer(self._lr)\n train = optimizer.minimize(loss,\n global_step = self.global_step,\n gate_gradients = optimizer.GATE_NONE\n )\n self._train = train\n \n def build_graph(self):\n '''build the graph for the full model.'''\n opts = self._options\n sources = tf.placeholder(tf.int32, shape = [opts.batch_size])\n self._sources = sources\n targets = tf.placeholder(tf.int32, shape = [opts.batch_size])\n self._targets = targets\n true_logits, sampled_logits = self.forward(sources, targets)\n loss = self.nce_loss(true_logits, sampled_logits)\n self._loss = loss\n self.optimize(loss)\n \n #Initialize all variables.\n tf.initialize_all_variables().run()\n \n #create a saver.\n self.saver = tf.train.Saver([self._emb_vertex])\n\n def _train_thread_body(self):\n opts = self._options\n while True:\n _sources, _targets = self.SampleAnEdge()\n feed_dict = {self._sources: _sources, self._targets: _targets}\n self._session.run([self._train], feed_dict=feed_dict)\n if self._edge_count >= opts.samples_to_train:\n break\n\n def train(self):\n '''trian the model.'''\n opts = self._options\n workers = []\n for _ in xrange(opts.num_threads):\n t = threading.Thread(target=self._train_thread_body)\n t.start()\n workers.append(t)\n \n #print training info.\n loss_list = []\n last_count, last_time = self._edge_count, time.time()\n while True:\n time.sleep(0.5)\n _sources, _targets = self.SampleAnEdge()\n feed_dict = {self._sources: _sources, self._targets: _targets}\n (lr, loss, _) = self._session.run([self._lr, self._loss, self._train],feed_dict=feed_dict)\n now = time.time()\n rate = (self._edge_count - last_count) / (now - last_time)\n progress = 100 * (self._edge_count) / float(opts.samples_to_train)\n last_time = now\n last_count = self._edge_count\n loss_list.append(loss)\n average_loss = np.mean(np.array(loss_list))\n print (\"Learning rate:%5.3f loss:%6.2f average loss: %f edges/sec:%8.0f progress:%f%%\\r\" % (\n lr, loss, average_loss, rate, progress \n ), end = \"\")\n sys.stdout.flush()\n if self._edge_count >= opts.samples_to_train:\n break\n print (\"\")\n for t in workers:\n t.join()\n\ndef main(_):\n logging.basicConfig(level = logging.INFO)\n if not FLAGS.train_data:\n logging.error('no train file.')\n sys.exit(1)\n opts = Options()\n with tf.Graph().as_default(), tf.Session() as session:\n with tf.device(\"/cpu:0\"):\n model = LINE(opts, session)\n model.train()\n model.saver.save(session, \n os.path.join(opts.save_path,\"emb.txt\"),global_step=model.global_step)\n\nif __name__ == '__main__':\n '''This is for test.'''\n tf.app.run()\n\n\n","sub_path":"LINE.py","file_name":"LINE.py","file_ext":"py","file_size_in_byte":13139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"211895739","text":"from collections import defaultdict\nfrom math import inf\nfrom math import sqrt\nimport random\nimport csv\n\n\ndef point_avg(points):\n \"\"\"\n Accepts a list of points, each with the same number of dimensions.\n (points can have more dimensions than 2)\n \n Returns a new point which is the center of all the points.\n \"\"\"\n\n new_p = []\n\n p_len = len(points)\n d_len = len(points[0])\n\n for d in range(d_len):\n sum = 0\n for p in range(p_len):\n sum = sum + points[p][d]\n \n new_p.append((float(sum))/(float(p_len)))\n \n return new_p\n\n\ndef update_centers(data_set, assignments):\n \"\"\"\n Accepts a dataset and a list of assignments; the indexes \n of both lists correspond to each other.\n Compute the center for each of the assigned groups.\n Return `k` centers in a list\n \"\"\"\n\n\n new_set = defaultdict(list)\n center = []\n \n \n\n for (assignment,point)in zip(assignments,data_set):\n new_set[assignment].append(point)\n \n for i in new_set.keys():\n center.append(point_avg(new_set[i]))\n\n\n return center\n\n\ndef assign_points(data_points, centers):\n \"\"\"\n \"\"\"\n assignments = []\n for point in data_points:\n shortest = inf # positive infinity\n shortest_index = 0\n for i in range(len(centers)):\n val = distance(point, centers[i])\n if val < shortest:\n shortest = val\n shortest_index = i\n assignments.append(shortest_index)\n return assignments\n\n\ndef distance(a, b):\n \"\"\"\n Returns the Euclidean distance between a and b\n \"\"\"\n dim = len(a)\n\n sum = 0\n\n for d in range(dim):\n elem = (a[d]-b[d])**2\n sum = sum + elem\n \n return sqrt(sum)\n\n \n\n\ndef generate_k(data_set, k):\n \"\"\"\n Given `data_set`, which is an array of arrays,\n return a random set of k points from the data_set\n \"\"\"\n if k<= 0 or k > len(data_set):\n raise ValueError(\"k is not valid\")\n return random.sample(data_set,k)\n\n\ndef get_list_from_dataset_file(dataset_file):\n data = []\n\n with open(dataset_file) as f:\n datas = csv.reader(f)\n for l in datas:\n row = []\n for n in l:\n elem = int(n)\n row.append(elem)\n data.append(row)\n return data\n\n\n\n\ndef cost_function(clustering):\n c = 0\n\n for k in clustering.keys():\n lst = clustering[k]\n center_point = point_avg(lst)\n\n for p in lst:\n c = c + distance(center_point,p)\n \n return c\n\n\n\ndef k_means(dataset_file, k):\n dataset = get_list_from_dataset_file(dataset_file)\n k_points = generate_k(dataset, k)\n assignments = assign_points(dataset, k_points)\n old_assignments = None\n while assignments != old_assignments:\n new_centers = update_centers(dataset, assignments)\n old_assignments = assignments\n assignments = assign_points(dataset, new_centers)\n clustering = defaultdict(list)\n for assignment, point in zip(assignments, dataset):\n clustering[assignment].append(point)\n print(\"hi\")\n return clustering\n","sub_path":"clustering/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"579037441","text":"def hammingDistance(s1, s2):\n dist = 0\n for i in range(len(s1)):\n if s1[i] != s2[i]:\n dist += 1\n return dist\n\ndef approxPatternMatching(pattern, text, d):\n indices = []\n for i in range(len(text) - len(pattern)):\n currText = text[i: i + len(pattern)]\n if hammingDistance(currText, pattern) <= d:\n indices.append(str(i))\n return indices\n\nif __name__ == '__main__':\n with open('rosalind_ba1h.txt', 'r+') as f:\n pattern = f.readline().rstrip()\n text = f.readline().rstrip()\n d = int(f.readline())\n\n indices = approxPatternMatching(pattern, text, d)\n\n print(' '.join(indices))\n","sub_path":"ApproximatePatternMatching.py","file_name":"ApproximatePatternMatching.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"214614213","text":"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\nfrom traffic import Traffic, TrafficMultiClass, GTSDBMultiClass\n\n\"\"\"Factory method for easily getting imdbs by name.\"\"\"\n\n__sets = {}\n\n\n# Set up traffic\n# path to devkit example '/home/szy/INRIA'\ntraffic_devkit_path = \"/mnt/nvme/traffic-rcnn/\"\ntraffic_multi_class_devkit_path = \"/mnt/nvme/traffic-multiclass/\"\ntraffic_dual_devkit_path = \"/mnt/nvme/traffic-dual-purpose/\"\ngtsrb_devkit_path = \"/mnt/nvme/gtsrb-rcnn/\"\ngtsdb_devkit_path = \"/mnt/nvme/gtsdb-rcnn/\"\ngtsdb_multi_class_devkit_path = \"/mnt/nvme/gtsdb-rcnn-multi/\"\n\nfor split in ['train', 'test', 'test2']:\n name = '{}_{}'.format('traffic', split)\n __sets[name] = (\n lambda split=split: Traffic(split, traffic_devkit_path))\n\nfor split in ['train', 'test']:\n name = '{}_{}'.format('traffic_multi_class', split)\n __sets[name] = (\n lambda split=split: TrafficMultiClass(\n split, traffic_multi_class_devkit_path))\n\nfor split in ['train', 'test', 'test2']:\n name = '{}_{}'.format('traffic_dual', split)\n __sets[name] = (\n lambda split=split: Traffic(\n split, traffic_dual_devkit_path))\n\nfor split in ['train', 'test']:\n name = '{}_{}'.format('gtsrb', split)\n __sets[name] = (\n lambda split=split: Traffic(split, gtsrb_devkit_path))\n\nfor split in ['train', 'test', 'full']:\n name = '{}_{}'.format('gtsdb', split)\n __sets[name] = (\n lambda split=split: Traffic(split, gtsdb_devkit_path))\n\nfor split in ['train', 'test', 'full']:\n name = '{}_{}'.format('gtsdb_multi_class', split)\n __sets[name] = (\n lambda split=split: GTSDBMultiClass(\n split, gtsdb_multi_class_devkit_path))\n\n\ndef get_imdb(name):\n \"\"\"Get an imdb (image database) by name.\"\"\"\n if name not in __sets:\n raise KeyError('Unknown dataset: {}'.format(name))\n return __sets[name]()\n\n\ndef list_imdbs():\n \"\"\"List all registered imdbs.\"\"\"\n return __sets.keys()\n","sub_path":"misc/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"170022631","text":"from django.utils.deprecation import MiddlewareMixin\nimport re\nfrom django.shortcuts import render,HttpResponse\nfrom rbac import models\n\nclass Permisson(MiddlewareMixin):\n\n\n def process_request(self,request):\n\n current_path = request.path\n\n urls=['/','/login/','/reg/','/admin.*/',]\n for i in urls:\n i = '^' + i + '$'\n re_result=re.match(i,current_path)\n if re_result:\n return None\n\n permisson=request.session.get('permisson_dict')\n if permisson:\n for i in permisson.values():\n for j in i['url']:\n url = '^' + j + '$'\n re_result=re.match(url,current_path)\n\n if re_result:\n request.action = i['action']\n return None\n else:\n return HttpResponse('没有访问权限!!')\n\n else:\n return HttpResponse('请登录!!')","sub_path":"后台管理/rbac/service/rbac_midware.py","file_name":"rbac_midware.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"650418033","text":"from random import randint\n\nerrate = randint(1, 10)\nversuche = 5\n\ncontainer = [1,2,3,4,5,6,7,8,9,10] \n\nfor i in range(versuche):\n\trate = input(\"Geben Sie eine Zahl zwischen 1 und 10 ein: \")\n\tprint(rate)\n\twhile int(rate) not in container:\n\t\trate = input(\"Ihr Input war nicht zwischen 1 und 10. Geben Sie eine Zahl zwischen 1 und 10 ein: \")\n\t\tprint(rate)\n\t\t\n\tif rate == str(errate):\n\t\tprint(\"Super! Sie haben die Zahl erraten. \")\n\t\tbreak \n\t\t\n\tif i == 4:\n\t\tprint(\"Die gesuchte Zahl wäre gewesen: \" + str(errate))\n\telse:\n\t\tcontinue\n\n","sub_path":"tomaselli/Aufgabe3a_Tomaselli.py","file_name":"Aufgabe3a_Tomaselli.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"24387601","text":"# 自定义异常处理\nfrom rest_framework.views import exception_handler\nfrom rest_framework.views import Response\nfrom rest_framework import status\n\n\n# 将仅针对由引发的异常生成的响应调用异常处理程序。它不会用于视图直接返回的任何响应\ndef custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n\n if response is None:\n return Response({\n 'message': '服务器错误'\n }, status=status.HTTP_500_INTERNAL_SERVER_ERROR, exception=True)\n\n else:\n # 这个循环是取第一个错误的提示用于渲染\n for index, value in enumerate(response.data):\n if index == 0:\n key = value\n value = response.data[key]\n\n if isinstance(value, str):\n message = value\n else:\n message = key + value[0]\n # print('123 = %s - %s - %s' % (context['view'], context['request'].method, exc))\n return Response({\n 'message': message\n }, status=response.status_code, exception=True)\n\n return response\n","sub_path":"utils/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"155980961","text":"from __future__ import annotations\n\nimport random\nfrom fractions import Fraction\nfrom typing import Any, Callable, Union\n\nimport aiger_discrete\nimport attr\nfrom aiger_discrete import FiniteFunc\nfrom aiger_discrete.discrete import project, TIMED_NAME\nfrom pyrsistent import pmap\nfrom pyrsistent.typing import PMap\n\n\nProbability = Union[float, Fraction]\nDistribution = Callable[[Any], Union[float, Fraction]]\n\n\ndef rejection_sample(name: str, func: FiniteFunc, dist: Distribution) -> Any:\n size = func.imap[name].size\n encoding = func.input_encodings.get(name, aiger_discrete.Encoding())\n while True:\n val = encoding.decode(random.getrandbits(size))\n prob = dist(val)\n\n if prob == 1:\n return val\n elif prob == 0:\n continue\n elif random.random() <= dist(val):\n return val\n\n\ndef to_finite_func(circ) -> FiniteFunc:\n if isinstance(circ, FiniteFunc):\n return circ\n return aiger_discrete.from_aigbv(circ.aigbv)\n\n\n@attr.s(frozen=True, auto_attribs=True)\nclass PCirc:\n \"\"\"Wrapper around AIG representing a function with some random inputs.\"\"\"\n circ: FiniteFunc = attr.ib(converter=to_finite_func)\n dist_map: PMap[str, Probability] = attr.ib(converter=pmap, default=pmap())\n\n def __attrs_post_init__(self):\n \"\"\"Make sure unneeded distributions are forgotten.\"\"\"\n dist_map = project(self.dist_map, self.circ.inputs)\n object.__setattr__(self, \"dist_map\", dist_map)\n\n @property\n def inputs(self): return self.circ.inputs - set(self.dist_map.keys())\n\n @property\n def outputs(self): return self.circ.outputs\n\n @property\n def latches(self): return self.circ.latches\n\n @property\n def latch2init(self): return self.circ.latch2init\n\n @property\n def aig(self): return self.circ.aig\n\n @property\n def aigbv(self): return self.circ.aigbv\n\n def assume(self, aigbv_like) -> PCirc:\n \"\"\"Return Probabilistic Circuit with new assumption over the inputs.\"\"\"\n return attr.evolve(self, circ=self.circ.assume(aigbv_like))\n\n def __rshift__(self, other) -> PCirc:\n circ = self.circ >> canon(other).circ\n return PCirc(circ, other.dist_map + self.dist_map)\n\n def __lshift__(self, other) -> PCirc:\n return canon(other) >> self\n\n def __or__(self, other) -> PCirc:\n if set(self.dist_map.keys()) & set(self.dist_map.keys()):\n raise ValueError(\"Circuits have conflicting dist_maps.\")\n circ = self.circ | canon(other).circ\n return PCirc(circ, self.dist_map + other.dist_map)\n\n def __call__(self, inputs, latches=None, max_tries=1_000):\n inputs = dict(inputs)\n for count in range(max_tries + 1):\n inputs.update({\n k: rejection_sample(k, self.circ, dist)\n for k, dist in self.dist_map.items()\n })\n\n try:\n return self.circ(inputs=inputs, latches=latches)\n except ValueError: # Invalid input selected.\n # TODO: build BDD to uniformly sample.\n if count <= max_tries:\n continue\n raise RuntimeError(\"Rejection Sampling failed!\")\n\n def __getitem__(self, others) -> PCirc:\n circ = self.circ[others]\n kind, relabels = others\n\n kwargs = {}\n if kind == 'i':\n evolver = self.dist_map.evolver()\n for old, new in relabels.items():\n if old not in self.dist_map:\n continue\n evolver[new] = self.dist_map[old]\n del evolver[old]\n kwargs['dist_map'] = evolver.persistent()\n\n return attr.evolve(self, circ=circ, **kwargs)\n\n def loopback(self, *wirings) -> PCirc:\n inputs = self.inputs\n assert all(w['input'] in inputs for w in wirings)\n circ = self.circ.loopback(*wirings)\n return attr.evolve(self, circ=circ)\n\n def unroll(self,\n horizon, *,\n init=True,\n omit_latches=True,\n only_last_outputs=False) -> PCirc:\n circ = self.circ.unroll(\n horizon=horizon,\n init=init,\n omit_latches=omit_latches,\n only_last_outputs=only_last_outputs,\n )\n dist_map = {}\n for timed_name in circ.inputs:\n # Should always match because of unrolling.\n name = TIMED_NAME.match(timed_name).groups()[0]\n if name not in self.dist_map:\n continue\n dist_map[timed_name] = self.dist_map[name]\n\n return attr.evolve(self, circ=circ, dist_map=dist_map)\n\n simulator = aiger_discrete.FiniteFunc.simulator\n simulate = aiger_discrete.FiniteFunc.simulate\n\n def use_faircoins(self) -> PCirc:\n # TODO: return PCirc where\n # 1. every random variable's encoding is i.d.\n # 2. every random distribution is over fair coins.\n pass\n\n def with_distmap(self, dist_map) -> PCirc:\n \"\"\"Update the distributions over the inputs.\"\"\"\n return attr.evolve(self, dist_map=self.dist_map + pmap(dist_map))\n\n\ndef canon(circ) -> PCirc:\n if isinstance(circ, PCirc):\n return circ\n return PCirc(circ) # Assume aigbv like.\n\n\n__all__ = ['PCirc']\n","sub_path":"aiger_coins/pcirc.py","file_name":"pcirc.py","file_ext":"py","file_size_in_byte":5254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"127492394","text":"import Orange\nimport numpy as np\n\n#TEST DATA TO NUMPY\ntab = Orange.data.Table(\"Test_Data_With_Domain_Data.tab\")\nU, _, _ = tab.to_numpy()\n\nnp.save(\"TestWithoutZipData\", U)\n\n#TRAIN DATA TO NUMPY\ntab = Orange.data.Table(\"Train_Data_With_Domain_Data.tab\")\nU, _, _ = tab.to_numpy()\n\nnp.save(\"TrainWithoutZipData\", U)\n\n\n\n","sub_path":"Clustering and atribute chosing/numpyData/final_Clusters/Final Prediction Test Set/Preparing Train and Test data/DataToNumpy.py","file_name":"DataToNumpy.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"248344963","text":"\n\n#calss header\nclass _PETITION():\n\tdef __init__(self,): \n\t\tself.name = \"PETITION\"\n\t\tself.definitions = [u'a document signed by a large number of people demanding or asking for some action from the government or another authority: ', u'a formal letter to a law court asking for a particular legal action: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_petition.py","file_name":"_petition.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"48848045","text":"\"\"\"Tests for cement.ext.ext_argparse.\"\"\"\n\nimport sys\nimport re\nfrom argparse import ArgumentError\nfrom cement.ext.ext_argparse import ArgparseArgumentHandler\nfrom cement.ext.ext_argparse import ArgparseController, expose\nfrom cement.ext.ext_argparse import _clean_label, _clean_func\nfrom cement.utils import test\nfrom cement.utils.misc import rando\nfrom cement.core.exc import InterfaceError, FrameworkError\n\nAPP = rando()[:12]\n\nif (sys.version_info[0] > 3 and sys.version_info[1] >= 4):\n ARGPARSE_SUPPORTS_DEFAULTS = True\nelse:\n ARGPARSE_SUPPORTS_DEFAULTS = False\n\n\nclass Base(ArgparseController):\n\n class Meta:\n label = 'base'\n arguments = [\n (['--foo'], dict(dest='foo')),\n ]\n\n @expose(hide=True, help=\"this help doesn't get seen\")\n def default(self):\n return \"Inside Base.default\"\n\n @expose()\n def cmd1(self):\n return \"Inside Base.cmd1\"\n\n @expose()\n def command_with_dashes(self):\n return \"Inside Base.command_with_dashes\"\n\n\nclass Second(ArgparseController):\n\n class Meta:\n label = 'second'\n stacked_on = 'base'\n stacked_type = 'embedded'\n arguments = [\n (['--foo2'], dict(dest='foo2')),\n ]\n\n @expose(\n arguments=[\n (['--cmd2-foo'],\n dict(help='cmd2 sub-command only options', dest='cmd2_foo')),\n ]\n )\n def cmd2(self):\n if self.app.pargs.cmd2_foo:\n return \"Inside Second.cmd2 : Foo > %s\" % self.app.pargs.cmd2_foo\n else:\n return \"Inside Second.cmd2\"\n\n\nclass Third(ArgparseController):\n\n class Meta:\n label = 'third'\n stacked_on = 'base'\n stacked_type = 'nested'\n arguments = [\n (['--foo3'], dict(dest='foo3')),\n ]\n\n @expose(hide=True)\n def default(self):\n return \"Inside Third.default\"\n\n @expose()\n def cmd3(self):\n return \"Inside Third.cmd3\"\n\n\nclass Fourth(ArgparseController):\n\n class Meta:\n label = 'fourth'\n stacked_on = 'third'\n stacked_type = 'embedded'\n hide = True\n help = \"this help doesn't get seen cause we're hiding\"\n arguments = [\n (['--foo4'], dict(dest='foo4')),\n ]\n\n @expose()\n def cmd4(self):\n return \"Inside Fourth.cmd4\"\n\n\nclass Fifth(ArgparseController):\n\n class Meta:\n label = 'fifth'\n stacked_on = 'third'\n stacked_type = 'nested'\n hide = True\n help = \"this help isn't seen... i'm hiding\"\n arguments = [\n (['--foo5'], dict(dest='foo5')),\n ]\n\n @expose(hide=True)\n def default(self):\n return \"Inside Fifth.default\"\n\n @expose()\n def cmd5(self):\n return \"Inside Fifth.cmd5\"\n\n\nclass Sixth(ArgparseController):\n\n class Meta:\n label = 'sixth'\n stacked_on = 'fifth'\n stacked_type = 'nested'\n arguments = [\n (['--foo6'], dict(dest='foo6')),\n ]\n\n @expose(hide=True)\n def default(self):\n return \"Inside Sixth.default\"\n\n @expose()\n def cmd6(self):\n return \"Inside Sixth.cmd6\"\n\n\nclass Seventh(ArgparseController):\n\n class Meta:\n label = 'seventh'\n stacked_on = 'fourth'\n stacked_type = 'embedded'\n arguments = [\n (['--foo7'], dict(dest='foo7')),\n ]\n\n @expose()\n def cmd7(self):\n return \"Inside Seventh.cmd7\"\n\n\nclass Unstacked(ArgparseController):\n\n class Meta:\n label = 'unstacked'\n stacked_on = None\n arguments = [\n (['--foo6'], dict(dest='foo6')),\n ]\n\n\nclass BadStackType(ArgparseController):\n\n class Meta:\n label = 'bad_stack_type'\n stacked_on = 'base'\n stacked_type = 'bogus_stacked_type'\n arguments = [\n (['--foo6'], dict(dest='foo6')),\n ]\n\n\nclass DuplicateArguments(ArgparseController):\n\n class Meta:\n label = 'duplicate_arguments'\n arguments = [\n (['--foo'], dict(dest='foo')),\n ]\n\n\nclass ControllerCommandDuplicateArguments(ArgparseController):\n\n class Meta:\n label = 'controller_command_duplicate_arguments'\n\n @expose(\n arguments=[\n (['--foo'], dict(dest='foo')),\n (['--foo'], dict(dest='foo')),\n ]\n )\n def sub_command(self):\n pass\n\n\nclass AlternativeDefault(ArgparseController):\n\n class Meta:\n label = 'alternative_default'\n default_func = 'alternative_default'\n stacked_on = 'base'\n stacked_type = 'nested'\n\n @expose(hide=True)\n def alternative_default(self):\n return \"Inside AlternativeDefault.alternative_default\"\n\n\nclass BadAlternativeDefault(ArgparseController):\n\n class Meta:\n label = 'bad_alternative_default'\n default_func = 'bogus_default'\n stacked_on = 'base'\n stacked_type = 'nested'\n\n\nclass Aliases(ArgparseController):\n\n class Meta:\n label = 'aliases'\n aliases = ['aliases-controller', 'ac']\n stacked_on = 'base'\n stacked_type = 'nested'\n\n @expose(aliases=['aliases-cmd-1', 'ac1'])\n def aliases_cmd1(self):\n return \"Inside Aliases.aliases_cmd1\"\n\n\nclass ArgparseExtTestCase(test.CementExtTestCase):\n\n def setUp(self):\n super(ArgparseExtTestCase, self).setUp()\n self.app = self.make_app(APP,\n argument_handler=ArgparseArgumentHandler,\n handlers=[\n Sixth,\n Base,\n Second,\n Third,\n Fourth,\n Fifth,\n Seventh,\n ],\n )\n\n def test_clean_label(self):\n self.eq(_clean_label('some_cmd_name'), 'some-cmd-name')\n\n def test_clean_func(self):\n self.eq(_clean_func('some-cmd-name'), 'some_cmd_name')\n\n def test_base_default(self):\n if not ARGPARSE_SUPPORTS_DEFAULTS:\n raise test.SkipTest(\n 'Argparse does not support default commands in Python < 3.4'\n )\n\n with self.app as app:\n res = app.run()\n self.eq(res, \"Inside Base.default\")\n\n def test_base_cmd1(self):\n with self.app as app:\n app._meta.argv = ['cmd1']\n res = app.run()\n self.eq(res, \"Inside Base.cmd1\")\n\n def test_base_command_with_dashes(self):\n with self.app as app:\n app._meta.argv = ['command-with-dashes']\n res = app.run()\n self.eq(res, \"Inside Base.command_with_dashes\")\n\n def test_controller_commands(self):\n with self.app as app:\n app._meta.argv = ['cmd2']\n res = app.run()\n self.eq(res, \"Inside Second.cmd2\")\n self.tearDown()\n\n self.setUp()\n with self.app as app:\n app._meta.argv = ['third', 'cmd3']\n res = app.run()\n self.eq(res, \"Inside Third.cmd3\")\n self.tearDown()\n\n self.setUp()\n with self.app as app:\n app._meta.argv = ['third', 'cmd4']\n res = app.run()\n self.eq(res, \"Inside Fourth.cmd4\")\n self.tearDown()\n\n self.setUp()\n with self.app as app:\n app._meta.argv = ['third', 'fifth', 'cmd5']\n res = app.run()\n self.eq(res, \"Inside Fifth.cmd5\")\n self.tearDown()\n\n self.setUp()\n with self.app as app:\n app._meta.argv = ['third', 'fifth', 'sixth', 'cmd6']\n res = app.run()\n self.eq(res, \"Inside Sixth.cmd6\")\n self.tearDown()\n\n self.setUp()\n with self.app as app:\n app._meta.argv = ['third', 'cmd7']\n res = app.run()\n self.eq(res, \"Inside Seventh.cmd7\")\n self.tearDown()\n\n def test_base_cmd1_parsing(self):\n with self.app as app:\n app._meta.argv = ['--foo=bar', 'cmd1']\n res = app.run()\n self.eq(res, \"Inside Base.cmd1\")\n self.eq(app.pargs.foo, 'bar')\n\n def test_second_cmd2(self):\n with self.app as app:\n app._meta.argv = ['--foo=bar', '--foo2=bar2', 'cmd2']\n res = app.run()\n self.eq(res, \"Inside Second.cmd2\")\n self.eq(app.pargs.foo, 'bar')\n self.eq(app.pargs.foo2, 'bar2')\n\n def test_third_cmd3(self):\n with self.app as app:\n app._meta.argv = [\n '--foo=bar', '--foo2=bar2',\n 'third', '--foo3=bar3', '--foo4=bar4', '--foo7=bar7', 'cmd3',\n ]\n res = app.run()\n self.eq(res, \"Inside Third.cmd3\")\n self.eq(app.pargs.foo, 'bar')\n self.eq(app.pargs.foo2, 'bar2')\n self.eq(app.pargs.foo3, 'bar3')\n self.eq(app.pargs.foo4, 'bar4')\n self.eq(app.pargs.foo7, 'bar7')\n\n def test_fifth_cmd5(self):\n with self.app as app:\n app._meta.argv = [\n '--foo=bar', '--foo2=bar2',\n 'third', '--foo3=bar3', '--foo4=bar4',\n 'fifth', '--foo5=bar5', 'cmd5'\n ]\n res = app.run()\n self.eq(res, \"Inside Fifth.cmd5\")\n self.eq(app.pargs.foo, 'bar')\n self.eq(app.pargs.foo2, 'bar2')\n self.eq(app.pargs.foo3, 'bar3')\n self.eq(app.pargs.foo4, 'bar4')\n self.eq(app.pargs.foo5, 'bar5')\n\n def test_sixth_cmd6(self):\n with self.app as app:\n app._meta.argv = [\n '--foo=bar', '--foo2=bar2',\n 'third', '--foo3=bar3', '--foo4=bar4',\n 'fifth', '--foo5=bar5', 'sixth', '--foo6=bar6', 'cmd6',\n ]\n res = app.run()\n self.eq(res, \"Inside Sixth.cmd6\")\n self.eq(app.pargs.foo, 'bar')\n self.eq(app.pargs.foo2, 'bar2')\n self.eq(app.pargs.foo3, 'bar3')\n self.eq(app.pargs.foo4, 'bar4')\n self.eq(app.pargs.foo5, 'bar5')\n self.eq(app.pargs.foo6, 'bar6')\n\n def test_seventh_cmd7(self):\n with self.app as app:\n app._meta.argv = [\n '--foo=bar', '--foo2=bar2',\n 'third', '--foo3=bar3', '--foo4=bar4', '--foo7=bar7', 'cmd7',\n ]\n res = app.run()\n self.eq(res, \"Inside Seventh.cmd7\")\n self.eq(app.pargs.foo, 'bar')\n self.eq(app.pargs.foo2, 'bar2')\n self.eq(app.pargs.foo3, 'bar3')\n self.eq(app.pargs.foo4, 'bar4')\n self.eq(app.pargs.foo7, 'bar7')\n\n def test_collect(self):\n with self.app as app:\n args = app.controller._collect_arguments()\n cmds = app.controller._collect_commands()\n args2, cmds2 = app.controller._collect()\n self.eq((args, cmds), (args2, cmds2))\n\n def test_controller_embedded_on_base(self):\n self.app._meta.argv = ['cmd2']\n with self.app as app:\n res = app.run()\n self.eq(res, \"Inside Second.cmd2\")\n\n def test_controller_command_arguments(self):\n self.app._meta.argv = ['cmd2', '--cmd2-foo=bar2']\n with self.app as app:\n res = app.run()\n self.eq(res, \"Inside Second.cmd2 : Foo > bar2\")\n\n def test_controller_default_nested_on_base(self):\n if not ARGPARSE_SUPPORTS_DEFAULTS:\n raise test.SkipTest(\n 'Argparse does not support default commands in Python < 3.4'\n )\n\n self.app._meta.argv = ['third']\n with self.app as app:\n res = app.run()\n self.eq(res, \"Inside Third.default\")\n\n def test_controller_command_nested_on_base(self):\n self.app._meta.argv = ['third', 'cmd3']\n with self.app as app:\n res = app.run()\n self.eq(res, \"Inside Third.cmd3\")\n\n def test_controller_doubled_embedded(self):\n self.app._meta.argv = ['third', 'cmd4']\n with self.app as app:\n res = app.run()\n self.eq(res, \"Inside Fourth.cmd4\")\n\n def test_controller_default_double_nested(self):\n if not ARGPARSE_SUPPORTS_DEFAULTS:\n raise test.SkipTest(\n 'Argparse does not support default commands in Python < 3.4'\n )\n self.app._meta.argv = ['third', 'fifth']\n with self.app as app:\n res = app.run()\n self.eq(res, \"Inside Fifth.default\")\n\n def test_controller_command_double_nested(self):\n self.app._meta.argv = ['third', 'fifth', 'cmd5']\n with self.app as app:\n res = app.run()\n self.eq(res, \"Inside Fifth.cmd5\")\n\n def test_alternative_default(self):\n if not ARGPARSE_SUPPORTS_DEFAULTS:\n raise test.SkipTest(\n 'Argparse does not support default commands in Python < 3.4'\n )\n\n self.reset_backend()\n self.app = self.make_app(APP,\n argv=['alternative_default'],\n argument_handler=ArgparseArgumentHandler,\n handlers=[\n Base,\n AlternativeDefault,\n ],\n )\n with self.app as app:\n res = app.run()\n self.eq(res,\n \"Inside AlternativeDefault.alternative_default\")\n\n @test.raises(FrameworkError)\n def test_bad_alternative_default_command(self):\n if not ARGPARSE_SUPPORTS_DEFAULTS:\n raise test.SkipTest(\n 'Argparse does not support default commands in Python < 3.4'\n )\n\n self.reset_backend()\n self.app = self.make_app(APP,\n argv=['bad_alternative_default'],\n argument_handler=ArgparseArgumentHandler,\n handlers=[\n Base,\n BadAlternativeDefault,\n ],\n )\n try:\n with self.app as app:\n res = app.run()\n\n except FrameworkError as e:\n res = re.match(\"(.*)does not exist(.*)bogus_default(.*)\",\n e.__str__())\n self.ok(res)\n raise\n\n @test.raises(InterfaceError)\n def test_invalid_stacked_on(self):\n self.reset_backend()\n try:\n self.app = self.make_app(APP,\n argument_handler=ArgparseArgumentHandler,\n handlers=[\n Base,\n Unstacked,\n ],\n )\n with self.app as app:\n app.run()\n except InterfaceError as e:\n self.ok(re.match(\"(.*)is not stacked anywhere!(.*)\", e.msg))\n raise\n\n @test.raises(InterfaceError)\n def test_invalid_stacked_type(self):\n self.reset_backend()\n try:\n self.app = self.make_app(APP,\n argument_handler=ArgparseArgumentHandler,\n handlers=[\n Base,\n BadStackType,\n ],\n )\n with self.app as app:\n app.run()\n except InterfaceError as e:\n self.ok(re.match(\"(.*)has an unknown stacked type(.*)\", e.msg))\n raise\n\n @test.raises(ArgumentError)\n def test_duplicate_arguments(self):\n self.reset_backend()\n try:\n self.app = self.make_app(APP,\n argument_handler=ArgparseArgumentHandler,\n handlers=[\n Base,\n DuplicateArguments,\n ],\n )\n with self.app as app:\n app.run()\n except ArgumentError as e:\n self.ok(re.match(\"(.*)conflicting option string(.*)\",\n e.__str__()))\n raise\n\n @test.raises(ArgumentError)\n def test_controller_command_duplicate_arguments(self):\n self.reset_backend()\n try:\n self.app = self.make_app(APP,\n argument_handler=ArgparseArgumentHandler,\n handlers=[\n Base,\n ControllerCommandDuplicateArguments,\n ],\n )\n with self.app as app:\n app.run()\n except ArgumentError as e:\n self.ok(re.match(\"(.*)conflicting option string(.*)\",\n e.__str__()))\n raise\n\n def test_aliases(self):\n if sys.version_info[0] < 3:\n raise test.SkipTest(\n 'Argparse does not support aliases in Python < 3'\n )\n self.reset_backend()\n\n self.app = self.make_app(APP,\n argument_handler=ArgparseArgumentHandler,\n handlers=[\n Base,\n Aliases,\n ],\n )\n with self.app as app:\n app._meta.argv = ['aliases', 'aliases-cmd1']\n res = app.run()\n self.eq(res, \"Inside Aliases.aliases_cmd1\")\n\n app._meta.argv = ['aliases', 'aliases-cmd-1']\n app._setup_arg_handler()\n res = app.run()\n self.eq(res, \"Inside Aliases.aliases_cmd1\")\n\n app._meta.argv = ['aliases-controller', 'aliases-cmd1']\n app._setup_arg_handler()\n res = app.run()\n self.eq(res, \"Inside Aliases.aliases_cmd1\")\n\n app._meta.argv = ['ac', 'ac1']\n app._setup_arg_handler()\n res = app.run()\n self.eq(res, \"Inside Aliases.aliases_cmd1\")\n\n def test_unknown_arguments(self):\n self.reset_backend()\n\n class MyArgumentHandler(ArgparseArgumentHandler):\n class Meta:\n label = 'my_argument_handler'\n ignore_unknown_arguments = True\n self.app = self.make_app(APP,\n argument_handler=MyArgumentHandler,\n )\n with self.app as app:\n app._meta.argv = ['-l', 'some-other-argument']\n app.run()\n res = '-l' in app.args.unknown_args\n self.ok(res)\n res = 'some-other-argument' in app.args.unknown_args\n self.ok(res)\n","sub_path":"tests/ext/argparse_tests.py","file_name":"argparse_tests.py","file_ext":"py","file_size_in_byte":19092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"304699897","text":"# Create your views here.\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.shortcuts import redirect, render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.views.generic.list_detail import object_list\n\nfrom models import Article, Edit\nfrom forms import ArticleForm, EditForm\n\n@login_required\ndef add_article(request):\n form = ArticleForm(request.POST or None)\n if form.is_valid():\n article = form.save(commit=False)\n article.author = request.user\n article.save()\n msg = \"Article saved successfully\"\n messages.success(request, msg, fail_silently=True)\n return redirect(article)\n return render_to_response('wiki/article_form.html', \n { 'form': form },\n context_instance=RequestContext(request))\n\n@login_required\ndef edit_article(request, slug):\n article = get_object_or_404(Article, slug=slug)\n form = ArticleForm(request.POST or None, instance=article)\n edit_form = EditForm(request.POST or None)\n if form.is_valid():\n article = form.save()\n if edit_form.is_valid():\n edit = edit_form.save(commit=False)\n edit.article = article\n edit.editor = request.user\n edit.save()\n msg = \"Article updated successfully\"\n messages.success(request, msg, fail_silently=True)\n return redirect(article)\n return render_to_response('wiki/article_form.html', \n { \n 'form': form,\n 'edit_form': edit_form,\n 'article': article,\n },\n context_instance=RequestContext(request))\n\ndef article_history(request, slug):\n article = get_object_or_404(Article, slug=slug)\n return object_list(request, \n queryset=Edit.objects.filter(article__slug=slug),\n extra_context={'article': article})\n\n","sub_path":"source/djen_project/wiki/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"210712973","text":"import subprocess\nimport json\nfrom poodle import Object\nfrom kalc.model.full import kinds_collection\nfrom kalc.misc.kind_filter import FilterByLabelKey, FilterByName, KindPlaceholder\nfrom kalc.model.kubernetes import KubernetesCluster\nimport kalc.policy \nfrom kalc.model.search import KubernetesModel\nfrom kalc.model.kinds.Deployment import YAMLable, Deployment\nfrom pygments import highlight\nfrom pygments.lexers.diff import DiffLexer\nfrom pygments.formatters.terminal import TerminalFormatter\nimport random\nimport io\nimport kalc.misc.util\nimport pkg_resources\nimport yaml\nimport kalc.misc.support_check\nfrom kalc.misc.metrics import Metric\nfrom logzero import logger\n\n\n__version__ = pkg_resources.get_distribution(\"kalc\").version\n\n\nALL_RESOURCES = [ \"all\", \"node\", \"pc\", \"limitranges\", \"resourcequotas\", \"poddisruptionbudgets\", \"hpa\"]\n\ncluster_md5_sh = 'kubectl get pods -o wide --all-namespaces -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName --sort-by=\"{.metadata.name}\" | md5sum'\n\nkalc_state_objects = []\nkind = KindPlaceholder\ncluster = None\n\nmd5_cluster = \"\"\n\nkalc.policy.policy_engine.register_state_objects(kalc_state_objects)\n\nfor k, v in kinds_collection.items():\n v.by_name = FilterByName(k, kalc_state_objects)\n v.by_label = FilterByLabelKey(k, kalc_state_objects)\n globals()[k] = v\n setattr(kind, k, v)\n\ndef update(data=None):\n \"Fetch information from currently selected cluster\"\n if isinstance(data, io.IOBase):\n data = data.read()\n k = KubernetesCluster()\n all_data = []\n all_support_checks = []\n if not data:\n global md5_cluster\n result = subprocess.Popen(cluster_md5_sh, shell=True, stdout=subprocess.PIPE, executable='/bin/bash')\n md5_cluster = result.stdout.read().decode('ascii').split()[0]\n assert len(md5_cluster) == 32, \"md5_cluster sum wrong len({0}) not is 32\".format(md5_cluster)\n\n\n for res in ALL_RESOURCES:\n result = subprocess.run(['kubectl', 'get', res, '--all-namespaces', '-o=json'], stdout=subprocess.PIPE)\n if len(result.stdout) < 100:\n print(result.stdout)\n raise SystemError(\"Error using kubectl. Make sure `kubectl get pods` is working.\")\n data = json.loads(result.stdout.decode(\"utf-8\"))\n y_data = yaml.dump(data, default_flow_style=False)\n sc = kalc.misc.support_check.YAMLStrSupportChecker(yaml_str=y_data)\n all_support_checks.extend(sc.check())\n all_data.append(data)\n \n for result in all_support_checks:\n if not result.isOK(): logger.warning(\"Unsupported feature: %s\" % str(result))\n else: logger.debug(str(result))\n \n for d in all_data:\n for item in d[\"items\"]:\n k.load_item(item)\n\n else:\n # TODO: make sure \"data\" is in YAML format\n sc = kalc.misc.support_check.YAMLStrSupportChecker(yaml_str=data)\n for result in sc.check():\n if not result.isOK(): logger.warning(\"Unsupported feature: %s\" % str(result))\n else: logger.debug(str(result))\n\n for ys in kalc.misc.util.split_yamldumps(data):\n k.load(ys)\n \n k._build_state()\n global kalc_state_objects\n kalc_state_objects.clear()\n kalc_state_objects.extend(k.state_objects)\n global cluster\n cluster = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects)) # pylint: disable=undefined-variable\n\ndef run():\n # TODO HERE: copy state_objects!\n # as we will be running multiple times, we need to store original state\n # or we actually don't! we can continue computing on top of previous...?\n # for now it is ok..\n kube = KubernetesModel(kalc_state_objects)\n policy_added = False\n hypotheses = []\n for ob in kalc_state_objects:\n if isinstance(ob.policy, str): continue # STUB. find and fix\n hypotheses.append(ob.policy.apply(kube))\n # TODO HERE: generate different combinations of hypotheses\n kube.run(timeout=999000, sessionName=\"kalc\")\n # TODO. STUB\n # TODO example hanlers and patches\n for obj in kalc_state_objects:\n if isinstance(obj, Deployment):\n if \"redis-slave\" in str(obj.metadata_name):\n obj.affinity_required_handler()\n # obj.scale_replicas_handler(random.randint(4,10))\n\n if policy_added: patch()\n for a in kube.plan:\n print(a)\n r = a()\n if isinstance(r, dict) and \"kubectl\" in r:\n print(\">>\", r[\"kubectl\"])\n # print summary\n\ndef patch():\n for obj in kalc_state_objects:\n if isinstance(obj, Deployment):\n # print(\"patch for \", obj.metadata_name)\n print(highlight(obj.get_patch(), DiffLexer(), TerminalFormatter()))\n\ndef apply():\n pass\n\n\ndef print_objects(objectList):\n print(\"<==== Domain Object List =====>\")\n\n pod_loaded_list = filter(lambda x: isinstance(x, Pod), objectList) # pylint: disable=undefined-variable\n print(\"----------Pods---------------\")\n for poditem in pod_loaded_list:\n print(\"## Pod:\"+ str(poditem.metadata_name._get_value()) + \\\n \", Status: \" + str(poditem.status._get_value()) + \\\n \", Priority_class: \" + str(poditem.priorityClass._property_value.metadata_name) + \\\n \", CpuRequest: \" + str(poditem.cpuRequest._get_value()) + \\\n \", MemRequest: \" + str(poditem.memRequest._get_value()) + \\\n \", CpuLimit: \" + str(poditem.cpuLimit._get_value()) + \\\n \", MemLimit: \" + str(poditem.memLimit._get_value()) + \\\n \", ToNode: \" + str(poditem.toNode._property_value) + \\\n \", AtNode: \" + str(poditem.atNode._property_value) + \\\n \", Metadata_labels:\" + str([str(x) for x in poditem.metadata_labels._property_value]) + \\\n \", hasService: \" + str(poditem.hasService._get_value()) + \\\n \", hasDeployment: \" + str(poditem.hasDeployment._get_value()) + \\\n \", hasDaemonset: \" + str(poditem.hasDaemonset._get_value()))\n \n node_loaded_list = filter(lambda x: isinstance(x, Node), objectList) # pylint: disable=undefined-variable\n print(\"----------Nodes---------------\")\n for nodeitem in node_loaded_list:\n print(\"## Node:\"+ str(nodeitem.metadata_name._get_value()) + \\\n \", cpuCapacity: \" + str(nodeitem.cpuCapacity._get_value()) + \\\n \", memCapacity: \" + str(nodeitem.memCapacity._get_value()) + \\\n \", CurrentFormalCpuConsumption: \" + str(nodeitem.currentFormalCpuConsumption._get_value()) + \\\n \", CurrentFormalMemConsumption: \" + str(nodeitem.currentFormalMemConsumption._get_value()) + \\\n \", AmountOfPodsOverwhelmingMemLimits: \" + str(nodeitem.AmountOfPodsOverwhelmingMemLimits._get_value()) + \\\n \", PodAmount: \" + str(nodeitem.podAmount._get_value()) + \\\n \", IsNull:\" + str(nodeitem.isNull._get_value()) + \\\n \", Status:\" + str(nodeitem.status._get_value()) +\\\n \", AmountOfActivePods: \" + str(nodeitem.amountOfActivePods._get_value()) +\\\n \", Searchable: \" + str(nodeitem.searchable._get_value())+\\\n \", IsSearched: \", str(nodeitem.isSearched._get_value())+\\\n \", different_than: \", str([str(x) for x in nodeitem.different_than._get_value()]))\n services = filter(lambda x: isinstance(x, Service), objectList) # pylint: disable=undefined-variable\n print(\"----------Services---------------\")\n for service in services:\n print(\"## Service: \"+str(service.metadata_name)+\\\n \", AmountOfActivePods: \"+str(service.amountOfActivePods._get_value())+\\\n \", Status: \" + str(service.status._get_value()) +\n \", Spec_selector: \"+str([str(x) for x in service.spec_selector._property_value])+\\\n \", Pod_List: \"+str([str(x) for x in service.podList._get_value()])+\\\n \", IsSearched: \", str(service.isSearched._get_value()))\n\n\n prios = filter(lambda x: isinstance(x, PriorityClass), objectList) # pylint: disable=undefined-variable\n print(\"----------PriorityClasses---------------\")\n for prio in prios:\n print(\"## PriorityClass: \"+str(prio.metadata_name) +\" \" + str(prio.priority._get_value()))\n\n\n scheduler = next(filter(lambda x: isinstance(x, Scheduler), objectList)) # pylint: disable=undefined-variable\n print(\"----------Shedulers---------------\")\n print(\"## Sheduler: \"+str(scheduler.status._get_value()) +\\\n \" PodList: \"+str([str(x) for x in scheduler.podQueue._get_value()]) +\\\n \" QueueLength: \"+str(scheduler.queueLength._get_value()))\n\n deployments_loaded_list = filter(lambda x: isinstance(x, Deployment), objectList)\n print(\"----------Deployments------------\")\n for deployment in deployments_loaded_list:\n print(\"## Deployment: \"+str(deployment.metadata_name._get_value()) +\\\n \" Spec_replicas: \"+ str(deployment.spec_replicas._get_value()) +\\\n \" Namespace: \" + str(deployment.metadata_namespace._get_value())+\\\n \" AmountOfActivePods: \" + str(deployment.amountOfActivePods._get_value())+\\\n \" Status: \" + str(deployment.status._get_value())+\\\n \" PodList: \" + str([str(x) for x in deployment.podList._get_value()])+\\\n \" PriorityClassName: \" + str(deployment.spec_template_spec_priorityClassName._property_value) + \\\n \" Searchable:\" + str(deployment.searchable))\n # \" Metadata_labels: \" + str([str(x) for x in deployment.template_metadata_labels._property_value]))\n \n daemonsets_loaded_list = filter(lambda x: isinstance(x, DaemonSet), objectList) # pylint: disable=undefined-variable\n print(\"----------DaemonSets------------\")\n for daemonset in daemonsets_loaded_list:\n print(\"## DaemonSet: \"+str(daemonset.metadata_name._get_value()) +\\\n \" AmountOfActivePods: \" + str(daemonset.amountOfActivePods._get_value())+\\\n \" Status: \" + str(daemonset.status._get_value())+\\\n \" PodList: \" + str([str(x) for x in daemonset.podList._get_value()])+\\\n \" PriorityClassName: \" + str(daemonset.spec_template_spec_priorityClassName._property_value) + \\\n \" Searchable:\" + str(daemonset.searchable))\n # \" Metadata_labels: \" + str([str(x) for x in deployment.template_metadata_labels._property_value]))\n\n replicasets_loaded_list = filter(lambda x: isinstance(x, ReplicaSet), objectList) # pylint: disable=undefined-variable\n print(\"----------ReplicaSets------------\")\n for replicaset in replicasets_loaded_list:\n print(\"## Replicaset: \"+str(replicaset.metadata_name._get_value()) +\\\n \" hash: \" + str(replicaset.hash)+\\\n \" spec_replicas: \" + str(replicaset.spec_replicas._get_value())+\\\n \" metadata_ownerReferences__kind: \" + str(replicaset.metadata_ownerReferences__name._property_value)+\\\n \" metadata_ownerReferences__name: \" + str(replicaset.metadata_ownerReferences__name._property_value))\n\n globalvar_loaded_list = filter(lambda x: isinstance(x, GlobalVar), objectList) # pylint: disable=undefined-variable\n print(\"----------GlobalVar------------\")\n list_of_objects_output =['']\n for globalvar_item in globalvar_loaded_list:\n list_of_objects_output.extend(['is_service_disrupted',str(globalvar_item.is_service_disrupted._get_value())])\n list_of_objects_output.extend(['is_deployment_disrupted',str(globalvar_item.is_deployment_disrupted._get_value())])\n list_of_objects_output.extend(['is_daemonset_disrupted',str(globalvar_item.is_daemonset_disrupted._get_value())])\n list_of_objects_output.extend(['is_node_disrupted',str(globalvar_item.is_node_disrupted._get_value())])\n print(list_of_objects_output)\n","sub_path":"kalc/interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":11525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"18263808","text":"from flask_restful import Resource, reqparse, fields, marshal_with\nfrom api.db import crawl_db, CrawlCollections\n\nparser = reqparse.RequestParser()\nparser.add_argument('type', type=str, required=True, location='args')\nparser.add_argument('project_id', type=int, required=True, location='args')\n\n\nclass GithubStat(Resource):\n def get(self):\n args = parser.parse_args()\n stat_type = args['type']\n project_id = args['project_id']\n stat = crawl_db[CrawlCollections.GIT_STAT].find_one({'project_id': project_id, 'type': stat_type})\n if stat is not None:\n return stat['data']\n","sub_path":"api/resources_web/github/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"465077145","text":"\"\"\"\r\nOpenVisus conversion module\r\n\"\"\"\r\nimport os\r\nimport rasterio\r\nimport argparse\r\nimport subprocess\r\nimport pathlib\r\nimport shutil\r\nfrom glob import glob\r\nfrom PIL import Image,ImageOps\r\n\r\nfrom OpenVisus import *\r\nDbModule.attach()\r\n\r\nimport dask\r\nimport distributed\r\nimport pandas as pd\r\nfrom crown_maps.verify import get_site, get_year\r\nimport numpy as np\r\nfrom numba import njit, prange\r\n\r\nfrom OpenVisus.__main__ import MidxToIdx\r\n\r\ndef match_name(x):\r\n x = os.path.basename(x)\r\n return x.replace(\"image.tif\",\"image_rasterized.tif\")\r\n\r\n@njit(parallel=True)\r\ndef blend_rgb_ann(a, b):\r\n #a[b[b>0]] = [255,0,0]\r\n for i in prange(a[0].shape[0]):\r\n for j in prange(a[0].shape[1]):\r\n if(b[i][j] > 0):\r\n a[0][i][j]=255\r\n a[1][i][j]=0\r\n a[2][i][j]=0\r\n \r\ndef blend(rgb_path, annotation_dir, outdir):\r\n basename = os.path.basename(rgb_path)\r\n ann_path=annotation_dir+\"/\"+basename.replace(\"image.tif\", \"image_rasterized.tif\")\r\n ageo = rasterio.open(rgb_path)\r\n a = ageo.read()\r\n bgeo = rasterio.open(ann_path)\r\n b = bgeo.read()\r\n print(\"Blending \", rgb_path, \"and\", ann_path, \"...\")\r\n blend_rgb_ann(a, b[0])\r\n out_name = outdir+\"/\"+basename\r\n with rasterio.open(\r\n out_name,\r\n 'w',\r\n driver='GTiff',\r\n height=ageo.height,\r\n width=ageo.width,\r\n count=3,\r\n dtype=a.dtype,\r\n crs='+proj=latlong',\r\n transform=ageo.transform,\r\n ) as dst:\r\n dst.write(a)\r\n \r\n return out_name\r\n \r\ndef run(rgb_images, dst_directory, annotation_dir):\r\n \r\n #Construct outdir variable from top level savedir and site\r\n site = get_site(rgb_images[0]) \r\n outdir = os.path.join(dst_directory,site)\r\n pathlib.Path(outdir+\"/temp\").mkdir(parents=True, exist_ok=True)\r\n \r\n outname = outdir.split(\"/\")[-1]\r\n if(outname==\"\"):\r\n outname = outdir.split(\"/\")[-2]\r\n \r\n # Blend rgb and annotations\r\n images = []\r\n for rgb_path in rgb_images:\r\n images.append(blend(rgb_path, annotation_dir, outdir))\r\n \r\n # find images\r\n # convert to idx\r\n sx,sy=1.0, 1.0\r\n tiles=[]\r\n for I,filename in enumerate(images):\r\n metadata =rasterio.open(filename)\r\n name=os.path.splitext(os.path.basename(filename))[0]\r\n width,height=metadata.width,metadata.height\r\n x1,y1,x2,y2=metadata.bounds.left,metadata.bounds.bottom, metadata.bounds.right, metadata.bounds.top\r\n \r\n # compute scaling to keep all pixels\r\n if I==0:\r\n sx=width /(x2-x1)\r\n sy=height/(y2-y1)\r\n \r\n x1,y1,x2,y2=sx*x1,sy*y1,sx*x2,sy*y2\r\n tile={\"name\": name, \"size\" : (width,height), \"bounds\" : (x1,y1,x2,y2)}\r\n print(\"Converting tile...\",tile,I,\"/\",len(images))\r\n tiles.append(tile) \r\n \r\n # avoid creation multiple times\r\n if not os.path.isfile(os.path.join(dst_directory,name,\"visus.idx\")):\r\n data=Image.open(filename)\r\n data=ImageOps.flip(data)\r\n CreateIdx(url=os.path.join(dst_directory,name,\"visus.idx\"), rmtree=True, dim=2,data=numpy.asarray(data))\r\n \r\n # create midx\r\n X1=min([tile[\"bounds\"][0] for tile in tiles])\r\n Y1=min([tile[\"bounds\"][1] for tile in tiles])\r\n midx_filename=os.path.join(dst_directory,\"visus.midx\")\r\n with open(midx_filename,\"wt\") as file:\r\n file.writelines([\r\n \"\\n\",\r\n \"\\toutput=voronoi()\\n\",\r\n *[\"\\t\\n\".format(tile[\"name\"],tile[\"name\"],tile[\"bounds\"][0]-X1,tile[\"bounds\"][1]-Y1) for tile in tiles],\r\n \"\\n\"\r\n ])\r\n \r\n # to see automatically computed idx file\r\n db=LoadDataset(midx_filename)\r\n print(db.getDatasetBody().toString())\r\n idx_filename=os.path.join(dst_directory,\"visus.idx\")\r\n MidxToIdx([\"--midx\", midx_filename, \"--field\",\"output=voronoi()\", \"--tile-size\",\"4*1024\", \"--idx\", idx_filename])\r\n \r\n\r\nif __name__==\"__main__\": \r\n #Create dask cluster\r\n from crown_maps import start_cluster\r\n client = start_cluster.start(cpus=20,mem_size=\"40GB\")\r\n client.wait_for_workers(1)\r\n \r\n #Pool of rasterized predictions\r\n rgb_list = glob.glob(\"/orange/ewhite/NeonData/**/Mosaic/*image.tif\",recursive=True) \r\n annotation_dir = \"/orange/idtrees-collab/rasterized/\"\r\n outdir = \"/orange/idtrees-collab/OpenVisus/\"\r\n annotation_list = glob.glob(annotation_dir + \"*.tif\")\r\n \r\n annotation_names = [os.path.basename(x) for x in annotation_list]\r\n rgb_list = [x for x in rgb_list if match_name(x) in annotation_names]\r\n \r\n df = pd.DataFrame({\"path\":rgb_list})\r\n df[\"site\"] = df.path.apply(lambda x: get_site(x))\r\n df[\"year\"] = df.path.apply(lambda x: get_year(x))\r\n \r\n #just run\r\n #df = df[df.site.isin([\"ABBY\"])]\r\n \r\n #order by site using only the most recent year\r\n site_lists = df.groupby('site').apply(lambda x: x[x.year==x.year.max()]).reset_index(drop=True).groupby('site').path.apply(list).values\r\n \r\n ####Scatter and run in parallel\r\n futures = []\r\n for site in site_lists:\r\n site = np.sort(site)\r\n siteID = get_site(site[0])\r\n site_dir = \"{}/{}\".format(outdir, siteID)\r\n \r\n try:\r\n shutil.rmtree(site_dir)\r\n os.mkdir(site_dir) \r\n except:\r\n os.mkdir(site_dir) \r\n \r\n #run(rgb_images=site[0:20], dst_directory=site_dir, annotation_dir=annotation_dir)\r\n future = dask.delayed(run)(rgb_images=site[0:20], dst_directory=site_dir, annotation_dir=annotation_dir)\r\n futures.append(future)\r\n \r\n persisted_values = dask.persist(*futures)\r\n distributed.wait(persisted_values)\r\n for pv in persisted_values:\r\n try:\r\n print(pv)\r\n except Exception as e:\r\n print(e)\r\n continue ","sub_path":"App/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":5641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"60829183","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/svpino/dev/tensorflow-object-detection-sagemaker/todl/tensorflow-object-detection/research/object_detection/builders/losses_builder.py\n# Compiled at: 2020-04-05 19:50:57\n# Size of source mod 2**32: 9187 bytes\n\"\"\"A function to build localization and classification losses from config.\"\"\"\nimport functools\nimport object_detection.core as sampler\nfrom object_detection.core import losses\nfrom object_detection.protos import losses_pb2\nfrom object_detection.utils import ops\n\ndef build(loss_config):\n \"\"\"Build losses based on the config.\n\n Builds classification, localization losses and optionally a hard example miner\n based on the config.\n\n Args:\n loss_config: A losses_pb2.Loss object.\n\n Returns:\n classification_loss: Classification loss object.\n localization_loss: Localization loss object.\n classification_weight: Classification loss weight.\n localization_weight: Localization loss weight.\n hard_example_miner: Hard example miner object.\n random_example_sampler: BalancedPositiveNegativeSampler object.\n\n Raises:\n ValueError: If hard_example_miner is used with sigmoid_focal_loss.\n ValueError: If random_example_sampler is getting non-positive value as\n desired positive example fraction.\n \"\"\"\n classification_loss = _build_classification_loss(loss_config.classification_loss)\n localization_loss = _build_localization_loss(loss_config.localization_loss)\n classification_weight = loss_config.classification_weight\n localization_weight = loss_config.localization_weight\n hard_example_miner = None\n if loss_config.HasField('hard_example_miner'):\n if loss_config.classification_loss.WhichOneof('classification_loss') == 'weighted_sigmoid_focal':\n raise ValueError('HardExampleMiner should not be used with sigmoid focal loss')\n hard_example_miner = build_hard_example_miner(loss_config.hard_example_miner, classification_weight, localization_weight)\n else:\n random_example_sampler = None\n if loss_config.HasField('random_example_sampler'):\n if loss_config.random_example_sampler.positive_sample_fraction <= 0:\n raise ValueError('RandomExampleSampler should not use non-positivevalue as positive sample fraction.')\n random_example_sampler = sampler.BalancedPositiveNegativeSampler(positive_fraction=(loss_config.random_example_sampler.positive_sample_fraction))\n elif loss_config.expected_loss_weights == loss_config.NONE:\n expected_loss_weights_fn = None\n else:\n if loss_config.expected_loss_weights == loss_config.EXPECTED_SAMPLING:\n expected_loss_weights_fn = functools.partial((ops.expected_classification_loss_by_expected_sampling),\n min_num_negative_samples=(loss_config.min_num_negative_samples),\n desired_negative_sampling_ratio=(loss_config.desired_negative_sampling_ratio))\n else:\n if loss_config.expected_loss_weights == loss_config.REWEIGHTING_UNMATCHED_ANCHORS:\n expected_loss_weights_fn = functools.partial((ops.expected_classification_loss_by_reweighting_unmatched_anchors),\n min_num_negative_samples=(loss_config.min_num_negative_samples),\n desired_negative_sampling_ratio=(loss_config.desired_negative_sampling_ratio))\n else:\n raise ValueError('Not a valid value for expected_classification_loss.')\n return (\n classification_loss, localization_loss, classification_weight,\n localization_weight, hard_example_miner, random_example_sampler,\n expected_loss_weights_fn)\n\n\ndef build_hard_example_miner(config, classification_weight, localization_weight):\n \"\"\"Builds hard example miner based on the config.\n\n Args:\n config: A losses_pb2.HardExampleMiner object.\n classification_weight: Classification loss weight.\n localization_weight: Localization loss weight.\n\n Returns:\n Hard example miner.\n\n \"\"\"\n loss_type = None\n if config.loss_type == losses_pb2.HardExampleMiner.BOTH:\n loss_type = 'both'\n if config.loss_type == losses_pb2.HardExampleMiner.CLASSIFICATION:\n loss_type = 'cls'\n if config.loss_type == losses_pb2.HardExampleMiner.LOCALIZATION:\n loss_type = 'loc'\n max_negatives_per_positive = None\n num_hard_examples = None\n if config.max_negatives_per_positive > 0:\n max_negatives_per_positive = config.max_negatives_per_positive\n if config.num_hard_examples > 0:\n num_hard_examples = config.num_hard_examples\n hard_example_miner = losses.HardExampleMiner(num_hard_examples=num_hard_examples,\n iou_threshold=(config.iou_threshold),\n loss_type=loss_type,\n cls_loss_weight=classification_weight,\n loc_loss_weight=localization_weight,\n max_negatives_per_positive=max_negatives_per_positive,\n min_negatives_per_image=(config.min_negatives_per_image))\n return hard_example_miner\n\n\ndef build_faster_rcnn_classification_loss(loss_config):\n \"\"\"Builds a classification loss for Faster RCNN based on the loss config.\n\n Args:\n loss_config: A losses_pb2.ClassificationLoss object.\n\n Returns:\n Loss based on the config.\n\n Raises:\n ValueError: On invalid loss_config.\n \"\"\"\n if not isinstance(loss_config, losses_pb2.ClassificationLoss):\n raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')\n loss_type = loss_config.WhichOneof('classification_loss')\n if loss_type == 'weighted_sigmoid':\n return losses.WeightedSigmoidClassificationLoss()\n if loss_type == 'weighted_softmax':\n config = loss_config.weighted_softmax\n return losses.WeightedSoftmaxClassificationLoss(logit_scale=(config.logit_scale))\n if loss_type == 'weighted_logits_softmax':\n config = loss_config.weighted_logits_softmax\n return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(logit_scale=(config.logit_scale))\n if loss_type == 'weighted_sigmoid_focal':\n config = loss_config.weighted_sigmoid_focal\n alpha = None\n if config.HasField('alpha'):\n alpha = config.alpha\n return losses.SigmoidFocalClassificationLoss(gamma=(config.gamma),\n alpha=alpha)\n config = loss_config.weighted_softmax\n return losses.WeightedSoftmaxClassificationLoss(logit_scale=(config.logit_scale))\n\n\ndef _build_localization_loss(loss_config):\n \"\"\"Builds a localization loss based on the loss config.\n\n Args:\n loss_config: A losses_pb2.LocalizationLoss object.\n\n Returns:\n Loss based on the config.\n\n Raises:\n ValueError: On invalid loss_config.\n \"\"\"\n if not isinstance(loss_config, losses_pb2.LocalizationLoss):\n raise ValueError('loss_config not of type losses_pb2.LocalizationLoss.')\n loss_type = loss_config.WhichOneof('localization_loss')\n if loss_type == 'weighted_l2':\n return losses.WeightedL2LocalizationLoss()\n if loss_type == 'weighted_smooth_l1':\n return losses.WeightedSmoothL1LocalizationLoss(loss_config.weighted_smooth_l1.delta)\n if loss_type == 'weighted_iou':\n return losses.WeightedIOULocalizationLoss()\n raise ValueError('Empty loss config.')\n\n\ndef _build_classification_loss(loss_config):\n \"\"\"Builds a classification loss based on the loss config.\n\n Args:\n loss_config: A losses_pb2.ClassificationLoss object.\n\n Returns:\n Loss based on the config.\n\n Raises:\n ValueError: On invalid loss_config.\n \"\"\"\n if not isinstance(loss_config, losses_pb2.ClassificationLoss):\n raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')\n loss_type = loss_config.WhichOneof('classification_loss')\n if loss_type == 'weighted_sigmoid':\n return losses.WeightedSigmoidClassificationLoss()\n if loss_type == 'weighted_sigmoid_focal':\n config = loss_config.weighted_sigmoid_focal\n alpha = None\n if config.HasField('alpha'):\n alpha = config.alpha\n return losses.SigmoidFocalClassificationLoss(gamma=(config.gamma),\n alpha=alpha)\n if loss_type == 'weighted_softmax':\n config = loss_config.weighted_softmax\n return losses.WeightedSoftmaxClassificationLoss(logit_scale=(config.logit_scale))\n if loss_type == 'weighted_logits_softmax':\n config = loss_config.weighted_logits_softmax\n return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(logit_scale=(config.logit_scale))\n if loss_type == 'bootstrapped_sigmoid':\n config = loss_config.bootstrapped_sigmoid\n return losses.BootstrappedSigmoidClassificationLoss(alpha=(config.alpha),\n bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))\n raise ValueError('Empty loss config.')","sub_path":"pycfiles/todl-0.1.1.tar/losses_builder.cpython-37.py","file_name":"losses_builder.cpython-37.py","file_ext":"py","file_size_in_byte":8896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"559358222","text":"from mgmt import connection\nimport json\nimport requests\n\n__all__ = [\n 'AddSubnetLib'\n]\n\n\nclass AddSubnetLib():\n def __init__(self):\n self.f = requests.Session()\n\n def run(self, mgmt_ip, sid, subnet, host_suffix, mask_length):\n '''\n Function used to Add new Network(s) to CheckPoint.\n :param mgmt_ip: IP address of the CheckPoint Management Server.\n :param sid: Session ID provided from the Login Action.\n :param subnet: Network address of the subnet that will be added to CheckPoint.\n :param host_suffix: used to create the hostname (first half) after combining it with the Network address (i.e: Network_, MaliciousNet_, N_..etc)))\n :param mask_length: mask length of the subnet that will be added to CheckPoint.\n :return: the http return status code of the api request to Add a new subnet.\n '''\n t = connection(mgmt_ip)\n url = t.create_api_url(\"add-network\")\n print(\"API url used in this action: \" + url)\n hostname = host_suffix+subnet+\"_\"+mask_length\n request_headers = {'Content-Type':'application/json', 'X-chkp-sid' : sid}\n my_data = {\"name\": hostname, \"subnet\": subnet, \"mask-length\":mask_length}\n r = self.f.post(url, data=json.dumps(my_data), headers=request_headers, verify=False)\n try:\n '''\n #Un-hash the following 2 lines to see the full response of the API request\n #a = json.loads(r.content) \n #print(a)'''\n return_code = r.status_code\n if return_code == 200:\n print(\"Subnet add for {}/{} is OK \".format(subnet, mask_length))\n return r.status_code\n else:\n print(\"Subnet {}/{} add is NOK \".format(subnet, mask_length))\n a = json.loads(r.content)\n print(a)\n return return_code\n '''\n #Un-hash the following 2 lines, if you want to use embedded discard to discard the changes in case of errors instead of using the Stackstorm DiscardSession action in the workflow.\n y = DiscardSessionLib()\n print(y.run(mgmt_ip, sid))\n return return_code, a['errors']'''\n\n except Exception as e:\n print(\"Error is :\",e)\n '''\n #Un-hash the following 2 lines, if you want to use embedded discard to discard the changes in case of errors instead of using the Stackstorm DiscardSession action in the workflow.\n y = DiscardSessionLib()\n print(y.run(mgmt_ip, sid))\n return return_code, a['errors']'''\n","sub_path":"actions/lib/AddSubnet.py","file_name":"AddSubnet.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"404423314","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom collections import defaultdict\nfrom dateutil.relativedelta import relativedelta\n\nfrom odoo import api, fields, models, SUPERUSER_ID, _\nfrom odoo.osv import expression\nfrom odoo.addons.stock.models.stock_rule import ProcurementException\n\n\nclass StockRule(models.Model):\n _inherit = 'stock.rule'\n\n @api.model\n def _run_manufacture(self, procurements):\n productions_values_by_company = defaultdict(list)\n errors = []\n for procurement, rule in procurements:\n bom = rule._get_matching_bom(procurement.product_id,\n procurement.company_id, procurement.values)\n if not bom:\n msg = _('There is no Bill of Material of type manufacture or kit found for the product %s. Please define a Bill of Material for this product.') % (\n procurement.product_id.display_name,)\n errors.append((procurement, msg))\n\n productions_values_by_company[procurement.company_id.id].append(\n rule._prepare_mo_vals(*procurement, bom))\n\n if errors:\n raise ProcurementException(errors)\n\n for company_id, productions_values in productions_values_by_company.items():\n # creck if already exists someone mrp production order with the same production\n productions_values_check = []\n for p in productions_values:\n print(\">>>>>>>>>>>>>>>>>>>>> p:\", p)\n current_qty = p['product_qty']\n mrp_production_ids = self.env['mrp.production'].search([\n ('product_id', '=', p['product_id']),\n ('origin', '=', p['origin']),\n ('state', 'not in', ('cancel', 'done', 'draft'))], order='id')\n if mrp_production_ids:\n mrp_qty = sum(m.product_qty for m in mrp_production_ids)\n diff_qty = current_qty-mrp_qty\n if diff_qty > 0:\n p['product_qty'] = diff_qty\n productions_values_check.append(p)\n else:\n break\n else:\n productions_values_check.append(p)\n\n productions_values = productions_values_check\n print(\">>>>>>>>>>>>>>>>>>>>> productions_values_check:\", productions_values_check)\n\n # create the MO as SUPERUSER because the current user may not have the rights to do it (mto product launched by a sale for example)\n productions = self.env['mrp.production'].with_user(\n SUPERUSER_ID).sudo().with_company(company_id).create(productions_values)\n self.env['stock.move'].sudo().create(productions._get_moves_raw_values())\n self.env['stock.move'].sudo().create(productions._get_moves_finished_values())\n productions._create_workorder()\n productions.filtered(lambda p: p.move_raw_ids).action_confirm()\n\n for production in productions:\n origin_production = production.move_dest_ids and production.move_dest_ids[\n 0].raw_material_production_id or False\n orderpoint = production.orderpoint_id\n if orderpoint:\n production.message_post_with_view('mail.message_origin_link',\n values={'self': production,\n 'origin': orderpoint},\n subtype_id=self.env.ref('mail.mt_note').id)\n if origin_production:\n production.message_post_with_view('mail.message_origin_link',\n values={'self': production,\n 'origin': origin_production},\n subtype_id=self.env.ref('mail.mt_note').id)\n return True\n # res = super(StockRule, self)._run_manufacture(procurements)\n # print res\n","sub_path":"requiez/models/stock_rule.py","file_name":"stock_rule.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"119179400","text":"#!/usr/bin/env python3\n# Copyright (c) 2014 The Caroline authors. All rights reserved.\n# Use of this source file is governed by a MIT license that can be found in the\n# LICENSE file.\n# Author: Aleksandr Derbenev \n\nimport json\nfrom optparse import OptionParser\nimport os\nimport shutil\nimport random\nimport math\n\nimport bpy\n\nclass Bounds(object):\n def __init__(self, camera):\n self.angle_x = camera.angle_x\n self.angle_y = camera.angle_y\n self.angle = camera.angle\n self.clip_near = camera.clip_start\n self.clip_far = camera.clip_end\n\ndef random_mesh_origin(angle_x, angle_y, clip_near, clip_far):\n \"\"\"Create random origin point for mesh. Assume that object will be visible if\n it's origin is in the truncated pyramid.\"\"\"\n # Get z coordinate first because x and y ranges depends from it.\n z = random.uniform(clip_near + 2, clip_far - 2)\n x = random.uniform(-math.tan(angle_x / 2) * z,\n math.tan(angle_x / 2) * z / 2)\n y = random.uniform(-math.tan(angle_y / 2) * z,\n math.tan(angle_y / 2) * z / 2)\n return [x, y, -z]\n\ndef random_mesh_rotation():\n \"\"\"Generate random angles for initial mesh rotation.\"\"\"\n return [random.random(), random.random(), random.random()];\n\ndef random_mesh_constructor():\n \"\"\"Returs lambda for creating a random mesh with given origin point and\n rotation.\"\"\"\n def dimension(max_size):\n \"\"\"Return random value for dimension.\"\"\"\n return random.uniform(max_size / 10, max_size)\n\n return random.choice((\n lambda origin, rotation, max_size:\n bpy.ops.mesh.primitive_cone_add(radius1 = dimension(max_size) / 2,\n radius2 = dimension(max_size) / 2, depth = dimension(max_size),\n end_fill_type='TRIFAN',\n location=origin, rotation=rotation),\n lambda origin, rotation, max_size:\n bpy.ops.mesh.primitive_cube_add(radius = dimension(max_size) / 2,\n location = origin, rotation = rotation),\n lambda origin, rotation, max_size:\n bpy.ops.mesh.primitive_cylinder_add(radius = dimension(max_size) / 2,\n depth = dimension(max_size),\n location = origin, rotation = rotation),\n lambda origin, rotation, max_size:\n bpy.ops.mesh.primitive_ico_sphere_add(size = dimension(max_size) / 2,\n location = origin, rotation = rotation),\n lambda origin, rotation, max_size:\n bpy.ops.mesh.primitive_monkey_add(radius = dimension(max_size) / 2,\n location = origin, rotation = rotation),\n lambda origin, rotation, max_size:\n bpy.ops.mesh.primitive_plane_add(radius = dimension(max_size) / 2,\n location = origin, rotation = rotation),\n lambda origin, rotation, max_size:\n bpy.ops.mesh.primitive_torus_add(\n major_radius = random.uniform(max_size / 4, max_size) / 2,\n minor_radius = dimension(max_size / 5) / 2,\n location = origin, rotation = rotation),\n lambda origin, rotation, max_size:\n bpy.ops.mesh.primitive_uv_sphere_add(size = dimension(max_size) / 2,\n location = origin, rotation = rotation)\n ))\n\ndef override_context(options):\n \"\"\"Copy bpy.context and replace some values by provided in dictionary.\"\"\"\n context = bpy.context.copy()\n for key in options.keys():\n context[key] = options[key]\n return context\n\ndef parse_options():\n \"\"\"Configure and run command line arguments parser.\"\"\"\n parser = OptionParser()\n parser.add_option('-f', '--fixtures', dest = 'count', type = 'int',\n default = 1, action = 'store',\n help = 'Number of test fixtures to generate.')\n parser.add_option('-m', '--meshes', dest = 'meshes', type = 'int',\n default = 1, action = 'store',\n help = 'Maximum number of meshes in the scene.')\n parser.add_option('-o', '--output', dest = 'output', type = 'string',\n default = 'fixture', action = 'store',\n help = 'Name of the output directory. You can use %n for fixture number.'),\n parser.add_option('-d', '--distance', dest = 'distance', type = 'float',\n default = 0.08, action = 'store',\n help = 'Distantion between cameras in meters.')\n (options, args) = parser.parse_args()\n if options.count < 1:\n parser.error('Count of fixtures must be positive integer number.')\n if options.meshes < 1:\n parser.error('Maximum count of meshes must be positive integer number.')\n if options.count > 1 and '%n' not in options.output:\n options.output += '.%n'\n if options.distance <= 0:\n parser.error('Distance must be greater that 0.')\n options.output = options.output.replace('%n', '{number}')\n return (options, args);\n\ndef get_camera(): \n camera = None\n for cam in bpy.data.cameras:\n if cam.name in bpy.context.scene.objects.keys():\n camera = cam\n break\n camera_object = bpy.data.objects[camera.name]\n return (camera, camera_object)\n\n\ndef cleanup_scene():\n \"\"\"Cleanup current scene\"\"\"\n for item in bpy.data.meshes:\n if item.name in bpy.context.scene.objects.keys():\n bpy.context.scene.objects.unlink(bpy.data.objects[item.name])\n\ndef generate_mesh(bounds):\n \"\"\"Count location and rotation for mesh, get and call constructor.\"\"\"\n origin = random_mesh_origin(bounds.angle_x, bounds.angle_y,\n bounds.clip_near, bounds.clip_far)\n rotation = random_mesh_rotation()\n max_size = - origin[2] * math.tan(bounds.angle / 2)\n random_mesh_constructor()(origin=origin, rotation=rotation,\n max_size=max_size)\n\ndef generate_meshes(count, bounds):\n \"\"\"Generate meshes on the scene.\"\"\"\n for i in range(count):\n generate_mesh(bounds = bounds)\n\ndef setup_light(bounds):\n \"\"\"Find and set lamp to the appropriate location.\"\"\"\n lamp = bpy.data.lamps[0]\n if not lamp:\n lamp = bpy.data.lamps.new(name='lamp', type='SUN')\n lamp_object = bpy.data.objects[lamp.name]\n if lamp.name not in bpy.context.scene.objects:\n bpy.context.scene.objects.link(lamp_object)\n\n lamp_location = [\n 0.0,\n math.tan(bounds.angle) * bounds.clip_far,\n -(bounds.clip_far - bounds.clip_near) / 2\n ]\n lamp.type = 'SUN'\n lamp_object.location = lamp_location\n lamp_object.rotation_mode = 'XYZ'\n lamp_object.rotation_euler = [0, - math.pi / 2, 0]\n\ndef prepare_dir(name):\n \"\"\"Remove fixture directory if it exists. Create new one.\"\"\"\n if os.path.exists(name):\n shutil.rmtree(name)\n \n os.mkdir(name)\n\ndef save_models(name):\n \"\"\"Save each mesh to the Standford ply file. Return scene dictionary for\n config.\"\"\"\n scene = {}\n for mesh in bpy.data.meshes:\n if mesh.name in bpy.context.scene.objects.keys():\n mesh_ply = mesh.name + '.ply'\n bpy.ops.export_mesh.ply(override_context({'selected_objects': mesh}),\n filepath=os.path.join(name, mesh_ply), check_existing=False,\n use_mesh_modifiers=False, use_normals=False, use_uv_coords=False,\n use_colors=False)\n scene[mesh.name] = mesh_ply\n return scene\n\ndef render_frames(name, distance, camera, camera_object, bounds):\n \"\"\"Setup camera and render two frames.\"\"\"\n cameras = []\n for i, location in enumerate([\n [-distance / 2, 0, 0],\n [distance / 2, 0, 0]\n ]):\n render = bpy.context.scene.render\n width = render.resolution_x\n height = render.resolution_y\n cam_name = 'camera_' + str(i)\n render.filepath = os.path.join(name, cam_name + '.png')\n camera_object.location = location\n camera_object.rotation_mode = 'XYZ'\n camera_object.rotation_euler = [0, 0, 0]\n bpy.ops.render.render(animation=False, write_still=True)\n cam = {\n 'focus': camera.lens / 1000.0,\n 'pixel_size': camera.sensor_width / width / 1000.0,\n 'width': width,\n 'height': height,\n 'position': camera_object.location[:],\n 'rotation': camera_object.rotation_euler[:],\n 'type': 'image',\n 'source': camera.name + '.png'\n }\n cameras.append(cam)\n return cameras\n\ndef save_config(name, config):\n \"\"\"Generate config.json for given fixture.\"\"\"\n config_file = open(os.path.join(name, 'config.json'), 'w')\n json.dump(config, config_file, skipkeys=True, ensure_ascii=False,\n allow_nan=False, indent=2, sort_keys=True)\n config_file.write('\\n')\n\ndef save_fixture(name, distance, camera, camera_object, bounds):\n prepare_dir(name = name)\n\n config = {}\n config['scene'] = save_models(name = name)\n\n config['cameras'] = render_frames(name = name, distance = distance,\n camera = camera, camera_object = camera_object, bounds = bounds)\n\n save_config(name = name, config = config)\n\ndef generate_fixture(name, meshes, distance):\n \"\"\"Generate and save a fixture.\"\"\"\n \n name = os.path.normpath(name)\n \n (camera, camera_object) = get_camera()\n bounds = Bounds(camera)\n\n cleanup_scene()\n\n generate_meshes(count = meshes, bounds = bounds)\n setup_light(bounds = bounds)\n\n save_fixture(name = name, distance = distance, camera=camera,\n camera_object=camera_object, bounds=bounds)\n\ndef generate_fixtures(count, output, meshes, distance):\n \"\"\"Generate all fixtures.\"\"\"\n for i in range(count):\n generate_fixture(name = output.format(number = i),\n meshes = random.randrange(meshes) + 1, distance = distance)\n\ndef main():\n \"\"\"Entry point.\"\"\"\n (options, args) = parse_options()\n generate_fixtures(count = options.count, output = options.output,\n meshes = options.meshes, distance = options.distance)\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/fixture-generator.py","file_name":"fixture-generator.py","file_ext":"py","file_size_in_byte":9298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"53280433","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask,render_template\nfrom flask.ext.bootstrap import Bootstrap, WebCDN, ConditionalCDN, BOOTSTRAP_VERSION, JQUERY_VERSION, HTML5SHIV_VERSION, RESPONDJS_VERSION\nfrom flask.ext.moment import Moment\nfrom datetime import datetime\nimport sys\n\n\n#py2 unicode\nreload(sys)\nsys.setdefaultencoding('utf-8')\napp = Flask(__name__)\nbootstrap = Bootstrap(app)\nmoment = Moment(app)\n\n\ndef change_cdn_domestic(tar_app):\n \"\"\"\n 中国特色 flask-bootstrap\n 源码来自 新浪微博 @蛋-壳-酱 万分感谢!\n :param tar_app:\n :return:\n \"\"\"\n static = tar_app.extensions['bootstrap']['cdns']['static']\n local = tar_app.extensions['bootstrap']['cdns']['local']\n\n def change_one(tar_lib, tar_ver, fallback):\n tar_js = ConditionalCDN('BOOTSTRAP_SERVE_LOCAL', fallback,\n WebCDN('//cdn.bootcss.com/' + tar_lib + '/' + tar_ver + '/'))\n tar_app.extensions['bootstrap']['cdns'][tar_lib] = tar_js\n\n libs = {'jquery': {'ver': JQUERY_VERSION, 'fallback': local},\n 'bootstrap': {'ver': BOOTSTRAP_VERSION, 'fallback': local},\n 'html5shiv': {'ver': HTML5SHIV_VERSION, 'fallback': static},\n 'respond.js': {'ver': RESPONDJS_VERSION, 'fallback': static}}\n for lib, par in libs.items():\n change_one(lib, par['ver'], par['fallback'])\n\nchange_cdn_domestic(app)\n\n@app.route('/')\ndef index():\n return render_template('index.html', current_time=datetime.utcnow())\n\n@app.route('/user/')\ndef hello_world(name):\n return render_template('user.html', name=name)\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n@app.errorhandler(500)\ndef internal_server_error(e):\n return render_template('500.html'), 500\n\nif __name__ == '__main__':\n app.run(\n debug=True\n )\n","sub_path":"flasky-bolg.py","file_name":"flasky-bolg.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"49063578","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom movies.models import Movie, Person, Genre, Language, Watcher, Country, Rating, Review, Notification, MovieList\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, Http404\nfrom django.core import files\nfrom django.db.models import Q\nimport sys\nimport json\nimport requests\nimport datetime\nimport tempfile\nfrom movies.pagination import Pagination\n\ndef process_movies(w, ms, settings):\n if settings.get('owned', False):\n w_owned = w.owned.all()\n for m in w_owned:\n pass\n if settings.get('watched', False):\n w_ratings = Rating.objects.filter(watcher=w)\n for m in w_ratings:\n pass\n if settings.get('user', False):\n u_ratings = Rating.objects.filter(watcher=settings.get('user', False))\n for m in u_ratings:\n pass\n for m in ms:\n if settings.get('owned', False):\n m.is_owned = False\n for o in w_owned:\n if o.id == m.id:\n m.is_owned = True\n break\n if settings.get('watched', False):\n m.is_watched = False\n m.my_rating = 0\n for o in w_ratings:\n if o.movie_id == m.id:\n m.is_watched = True\n m.my_rating = o.rating\n break\n if settings.get('user', False):\n m.user_watched = False\n m.user_rating = 0\n for o in u_ratings:\n if o.movie_id == m.id:\n m.user_watched = True\n m.user_rating = o.rating\n break\n\n@login_required\ndef home(request):\n movies = Movie.objects.order_by('-date_added', '-id').prefetch_related('directors', 'actors', 'genres')[:10]\n movie_count = Movie.objects.all().count()\n ratings = Rating.objects.order_by('-date_added', '-id').select_related()[:10]\n rating_count = Rating.objects.all().count()\n reviews = Review.objects.order_by('-date_added', '-id').select_related()[:10]\n review_count = Review.objects.all().count()\n lists = MovieList.objects.order_by('-date_updated', '-id')[:10]\n list_count = MovieList.objects.all().count()\n process_movies(request.user.watcher, movies, {\n 'watched': True,\n 'owned': True,\n })\n return render(request, 'home.html', {\n 'movies': movies,\n 'ratings': ratings,\n 'reviews': reviews,\n 'lists': lists,\n 'menu_home': True,\n 'show_watched': True,\n 'show_owned': True,\n 'movie_pagination': Pagination(1, (movie_count+9)/10),\n 'rating_pagination': Pagination(1, (rating_count+9)/10),\n 'review_pagination': Pagination(1, (review_count+9)/10),\n 'list_pagination': Pagination(1, (list_count+9)/10),\n 'counter_offset': 0,\n })\n\n@login_required\ndef profile(request):\n w = request.user.watcher\n ratings = Rating.objects.filter(watcher=w).order_by('-date_added', '-id').select_related()[:10]\n rating_count = Rating.objects.filter(watcher=w).count()\n reviews = Review.objects.filter(watcher=w).order_by('-date_added', '-id').select_related()[:10]\n review_count = Review.objects.filter(watcher=w).count()\n return render(request, 'profile/profile.html', {\n 'ratings': ratings,\n 'reviews': reviews,\n 'menu_profile': True,\n 'rating_pagination': Pagination(1, (rating_count+9)/10),\n 'review_pagination': Pagination(1, (review_count+9)/10),\n 'counter_offset': 0,\n 'hide_user': True,\n })\n\n@login_required\ndef profile_waiting(request):\n w = request.user.watcher\n ms = Movie.objects.filter(owners=w).exclude(watched_by=w).order_by('year').prefetch_related('directors', 'actors', 'genres')\n process_movies(w, ms, {'watched': True})\n return render(request, 'profile/index.html', {\n 'movies': ms,\n 'menu_profile': True,\n 'submenu_waiting': True,\n 'show_watched': True,\n })\n\n@login_required\ndef profile_owned(request):\n ms = request.user.watcher.owned.all().order_by('year').prefetch_related('directors', 'actors', 'genres')\n process_movies(request.user.watcher, ms, {'owned': True})\n return render(request, 'profile/index.html', {\n 'movies': ms,\n 'menu_profile': True,\n 'submenu_owned': True,\n 'show_owned': True,\n })\n\n@login_required\ndef profile_watched(request):\n ms = request.user.watcher.watched.all().order_by('year').prefetch_related('directors', 'actors', 'genres')\n process_movies(request.user.watcher, ms, {'watched': True})\n return render(request, 'profile/index.html', {\n 'movies': ms,\n 'menu_profile': True,\n 'submenu_watched': True,\n 'show_watched': True,\n })\n\n@login_required\ndef profile_lists(request):\n ls = MovieList.objects.filter(watcher=request.user.watcher).order_by('-date_updated')\n return render(request, 'profile/lists.html', {\n 'lists': ls,\n 'menu_profile': True,\n 'submenu_lists': True,\n })\n\n@login_required\ndef profile_settings(request):\n return render(request, 'profile/settings.html', {\n 'menu_profile': True,\n 'submenu_settings': True,\n })\n\n@login_required\ndef profile_notifications(request):\n ns = Notification.objects.filter(watcher=request.user.watcher).order_by('-date_added')\n rs = Notification.objects.filter(target=request.user.watcher).order_by('-date_added')\n return render(request, 'profile/notifications.html', {\n 'all_notifications': ns,\n 'reverse_notifications': rs,\n 'menu_profile': True,\n 'submenu_notifications': True,\n })\n\n@login_required\ndef users(request):\n ws = Watcher.objects.exclude(id=request.user.watcher.id)\n return render(request, 'users/index.html', {\n 'watchers': ws,\n 'menu_users': True,\n 'hide_submenu': True,\n })\n\n@login_required\ndef user(request, id):\n w = get_object_or_404(Watcher, id=id)\n if w == request.user.watcher:\n return redirect(profile)\n ratings = Rating.objects.filter(watcher=w).order_by('-date_added', '-id').select_related()[:10]\n rating_count = Rating.objects.filter(watcher=w).count()\n reviews = Review.objects.filter(watcher=w).order_by('-date_added', '-id').select_related()[:10]\n review_count = Review.objects.filter(watcher=w).count()\n return render(request, 'users/profile.html', {\n 'ratings': ratings,\n 'reviews': reviews,\n 'menu_users': True,\n 'menu_users_value': w.get_name(),\n 'watcher': w,\n 'rating_pagination': Pagination(1, (rating_count+9)/10),\n 'review_pagination': Pagination(1, (review_count+9)/10),\n 'counter_offset': 0,\n 'hide_user': True,\n })\n\n@login_required\ndef user_borrow(request, id):\n w = get_object_or_404(Watcher, id=id)\n if w==request.user.watcher:\n return redirect(profile)\n ms = Movie.objects.filter(owners=w).exclude(owners=request.user.watcher).prefetch_related('directors', 'actors', 'genres')\n process_movies(request.user.watcher, ms, {\n 'owned': True,\n 'user': w,\n })\n return render(request, 'users/user.html', {\n 'movies': ms,\n 'menu_users': True,\n 'submenu_borrow': True,\n 'menu_users_value': w.get_name(),\n 'watcher': w,\n 'show_rated': True,\n 'show_owned': True,\n });\n\n@login_required\ndef user_owned(request, id):\n w = get_object_or_404(Watcher, id=id)\n if w==request.user.watcher:\n return redirect(profile)\n ms = w.owned.all().order_by('year').prefetch_related('directors', 'actors', 'genres')\n process_movies(request.user.watcher, ms, {'watched': True})\n return render(request, 'users/user.html', {\n 'movies': ms,\n 'menu_users': True,\n 'submenu_owned': True,\n 'menu_users_value': w.get_name(),\n 'watcher': w,\n 'show_watched': True,\n })\n\n@login_required\ndef user_watched(request, id):\n w = get_object_or_404(Watcher, id=id)\n if w==request.user.watcher:\n return redirect(profile)\n ms = w.watched.all().order_by('year').prefetch_related('directors', 'actors', 'genres')\n process_movies(request.user.watcher, ms, {\n 'watched': True,\n 'user': w,\n })\n return render(request, 'users/user.html', {\n 'movies': ms,\n 'menu_users': True,\n 'submenu_watched': True,\n 'menu_users_value': w.get_name(),\n 'watcher': w,\n 'show_rated': True,\n 'show_watched': True,\n })\n\n@login_required\ndef user_lists(request, id):\n w = get_object_or_404(Watcher, id=id)\n if w==request.user.watcher:\n return redirect(profile)\n ls = MovieList.objects.filter(watcher=w).order_by('-date_updated')\n return render(request, 'users/lists.html', {\n 'lists': ls,\n 'menu_users': True,\n 'submenu_lists': True,\n 'menu_users_value': w.get_name(),\n 'watcher': w,\n })\n\n@login_required\ndef show_list(request, id):\n l = get_object_or_404(MovieList, id=id)\n ms = l.movies.all().prefetch_related('directors', 'actors', 'genres')\n is_own = l.watcher == request.user.watcher\n if is_own:\n process_movies(request.user.watcher, ms, {'watched': True})\n return render(request, 'profile/list.html', {\n 'list': l,\n 'movies': ms,\n 'menu_profile': True,\n 'submenu_lists': True,\n 'show_watched': True,\n })\n else:\n process_movies(request.user.watcher, ms, {\n 'watched': True,\n 'user': l.watcher,\n })\n return render(request, 'users/list.html', {\n 'list': l,\n 'movies': ms,\n 'menu_users': True,\n 'submenu_lists': True,\n 'watcher': l.watcher,\n 'show_watched': True,\n 'show_rated': True,\n })\n\n@login_required\ndef remove_list(request, id):\n l = get_object_or_404(MovieList, id=id)\n if l.watcher != request.user.watcher:\n raise Http404()\n l.delete()\n return redirect(profile_lists)\n\n@login_required\ndef add_list(request):\n if request.method != \"POST\":\n raise Http404()\n title = request.POST['title']\n description = request.POST['description']\n if not title or not description:\n return redirect(profile_lists)\n MovieList.objects.create(title=title, description=description, watcher=request.user.watcher)\n return redirect(profile_lists)\n\n@login_required\ndef show_list_for_movie(request, id):\n m = get_object_or_404(Movie, id=id)\n ls = MovieList.objects.filter(watcher=request.user.watcher)\n for l in ls:\n l.has_movie = (l.movies.filter(id=m.id).count() > 0)\n return render(request, 'partial/lists_movie.html', {\n 'movie': m,\n 'lists': ls,\n })\n\n@login_required\ndef add_movie_to_list(request, list_id, movie_id):\n try:\n m = get_object_or_404(Movie, id=movie_id)\n l = get_object_or_404(MovieList, id=list_id)\n except:\n return HttpResponse(json.dumps({'status': False}), content_type='application/json')\n l.movies.add(m)\n l.date_updated = datetime.datetime.now()\n l.save()\n return HttpResponse(json.dumps({'status': True}), content_type='application/json')\n\n@login_required\ndef remove_movie_from_list(request, list_id, movie_id):\n try:\n m = get_object_or_404(Movie, id=movie_id)\n l = get_object_or_404(MovieList, id=list_id)\n except:\n return HttpResponse(json.dumps({'status': False}), content_type='application/json')\n l.movies.remove(m)\n l.date_updated = datetime.datetime.now()\n l.save()\n return HttpResponse(json.dumps({'status': True}), content_type='application/json')\n\n@login_required\ndef movie(request, id):\n m = get_object_or_404(Movie, id=id)\n if request.user.watcher.owned.filter(id=id).count() > 0:\n m.is_owned = True\n else:\n m.is_owned = False\n try:\n r = Rating.objects.get(watcher=request.user.watcher, movie=m)\n m.is_watched = True\n m.my_rating = r.rating\n except:\n m.is_watched = False\n m.my_rating = 0\n ratings = Rating.objects.filter(movie=m).exclude(watcher=request.user.watcher)\n reviews = Review.objects.filter(movie=m).exclude(watcher=request.user.watcher)\n try:\n my_review = Review.objects.get(watcher=request.user.watcher, movie=m)\n except:\n my_review = False\n rating_dict = dict([(o.watcher_id, o) for o in ratings])\n for r in reviews:\n r.rated = rating_dict.get(r.watcher_id, False)\n watchers = Watcher.objects.all().exclude(id=request.user.watcher.id)\n owners_dict = dict([(o.id, o.id) for o in m.owners.all()])\n reviewers_dict = dict([(r.watcher_id, r.watcher_id) for r in m.reviewed_by.all()])\n for w in watchers:\n w.owns = (w.id in owners_dict)\n w.reviewed = (w.id in reviewers_dict)\n ls = m.movielist_set.all().order_by('-date_updated', '-id')\n for l in ls:\n if l.watcher == request.user.watcher:\n l.is_own = True\n else:\n l.is_own = False\n return render(request, 'movies/movie.html', {\n 'movie': m,\n 'ratings': ratings,\n 'reviews': reviews,\n 'lists': ls,\n 'watchers': watchers,\n 'my_review': my_review,\n 'menu_movies': True,\n 'hide_submenu': True,\n 'menu_movies_value': m.title,\n })\n\n@login_required\ndef movie_review(request, id):\n m = get_object_or_404(Movie, id=id)\n if request.method == \"POST\":\n if not 'review' in request.POST or not request.POST['review']:\n return redirect(movie, id)\n try:\n review = Review.objects.get(watcher=request.user.watcher, movie=m)\n review.review = request.POST['review']\n review.save()\n except:\n review = Review.objects.create(watcher=request.user.watcher, movie=m, review=request.POST['review'])\n return redirect(movie, id)\n try:\n review = Review.objects.get(watcher=request.user.watcher, movie=m)\n except:\n review = False\n return render(request, 'movies/review.html', {\n 'movie': m,\n 'review': review,\n 'menu_movies': True,\n 'hide_submenu': True,\n 'menu_movies_value': \"Review (%s)\" % (m.title),\n })\n\n@login_required\ndef movies(request):\n ms = Movie.objects.all().order_by('year').prefetch_related('directors', 'actors', 'genres')\n process_movies(request.user.watcher, ms,{\n 'watched': True,\n 'owned': True,\n })\n return render(request, 'movies/index.html', {\n 'movies': ms,\n 'menu_movies': True,\n 'submenu_all': True,\n 'show_watched': True,\n 'show_owned': True,\n })\n\n@login_required\ndef years(request):\n ys = Movie.objects.all().order_by('year').values_list('year', flat=True).distinct()\n return render(request, 'movies/years.html', {\n 'years': ys,\n 'menu_movies': True,\n 'submenu_years': True,\n 'show_submenu_search': True,\n })\n\n@login_required\ndef by_year(request, y):\n ms = Movie.objects.filter(year=y).order_by('year').prefetch_related('directors', 'actors', 'genres')\n process_movies(request.user.watcher, ms, {\n 'watched': True,\n 'owned': True,\n })\n return render(request, 'movies/index.html', {\n 'movies': ms,\n 'menu_movies': True,\n 'submenu_years': True,\n 'menu_movies_value': \"By Year (%s)\" % (y),\n 'show_watched': True,\n 'show_owned': True,\n })\n\n@login_required\ndef genres(request):\n gs = Genre.objects.all().order_by('name')\n return render(request, 'movies/genres.html', {\n 'genres': gs,\n 'menu_movies': True,\n 'submenu_genres': True,\n 'show_submenu_search': True,\n })\n\n@login_required\ndef by_genre(request, id):\n g = get_object_or_404(Genre, id=id)\n ms = Movie.objects.filter(genres=g).order_by('year').prefetch_related('directors', 'actors', 'genres')\n process_movies(request.user.watcher, ms, {\n 'watched': True,\n 'owned': True,\n })\n return render(request, 'movies/index.html', {\n 'movies': ms,\n 'menu_movies': True,\n 'submenu_genres': True,\n 'menu_movies_value': \"By Genre (%s)\" % (g.name),\n 'show_watched': True,\n 'show_owned': True,\n })\n\n@login_required\ndef languages(request):\n ls = Language.objects.all().order_by('name')\n return render(request, 'movies/languages.html', {\n 'languages': ls,\n 'menu_movies': True,\n 'submenu_languages': True,\n 'show_submenu_search': True,\n })\n\n@login_required\ndef by_language(request, id):\n l = get_object_or_404(Language, id=id)\n ms = Movie.objects.filter(language=l).order_by('year').prefetch_related('directors', 'actors', 'genres')\n process_movies(request.user.watcher, ms, {\n 'watched': True,\n 'owned': True,\n })\n return render(request, 'movies/index.html', {\n 'movies': ms,\n 'menu_movies': True,\n 'submenu_languages': True,\n 'menu_movies_value': \"By Language (%s)\" % (l.name),\n 'show_watched': True,\n 'show_owned': True,\n })\n\n@login_required\ndef countries(request):\n cs = Country.objects.all().order_by('name')\n return render(request, 'movies/countries.html', {\n 'countries': cs,\n 'menu_movies': True,\n 'submenu_countries': True,\n 'show_submenu_search': True,\n })\n\n@login_required\ndef by_country(request, id):\n c = get_object_or_404(Country, id=id)\n ms = Movie.objects.filter(country=c).order_by('year').prefetch_related('directors', 'actors', 'genres')\n process_movies(request.user.watcher, ms, {\n 'watched': True,\n 'owned': True,\n })\n return render(request, 'movies/index.html', {\n 'movies': ms,\n 'menu_movies': True,\n 'submenu_countries': True,\n 'menu_movies_value': \"By Country (%s)\" % (c.name),\n 'show_watched': True,\n 'show_owned': True,\n })\n\n@login_required\ndef by_query(request, query):\n ms = Movie.objects.filter(Q(title__icontains=query) | Q(subtitle__icontains=query) | Q(imdbID=query)).order_by('year').prefetch_related('directors', 'actors', 'genres')\n ps = Person.objects.filter(name__icontains=query)\n if ms.count() == 1 and ps.count() == 0:\n return redirect(movie, ms[0].id)\n if ms.count() == 0 and ps.count() == 1:\n return redirect(by_person, ps[0].id)\n process_movies(request.user.watcher, ms, {\n 'watched': True,\n 'owned': True,\n })\n return render(request, 'movies/query.html', {\n 'movies': ms,\n 'persons': ps,\n 'menu_movies': True,\n 'submenu_query': True,\n 'menu_movies_value': \"By Query (%s)\" % (query),\n 'show_watched': True,\n 'show_owned': True,\n }) \n\n@login_required\ndef persons(request):\n ps = Person.objects.all().order_by('name')\n return render(request, 'movies/persons.html', {\n 'persons': ps,\n 'menu_movies': True,\n 'submenu_persons': True,\n 'show_submenu_search': True,\n })\n\n@login_required\ndef by_person(request, id):\n p = get_object_or_404(Person, id=id)\n directed = p.directed.all().order_by('year').prefetch_related('directors', 'actors', 'genres')\n written = p.written.all().order_by('year').prefetch_related('directors', 'actors', 'genres')\n acted_in = p.acted_in.all().order_by('year').prefetch_related('directors', 'actors', 'genres')\n process_movies(request.user.watcher, directed, {\n 'watched': True,\n 'owned': True,\n })\n process_movies(request.user.watcher, written, {\n 'watched': True,\n 'owned': True,\n })\n process_movies(request.user.watcher, acted_in, {\n 'watched': True,\n 'owned': True,\n })\n return render(request, 'movies/person.html', {\n 'person': p,\n 'directed': directed,\n 'acted_in': acted_in,\n 'written': written,\n 'menu_movies': True,\n 'submenu_persons': True,\n 'menu_movies_value': \"By Person (%s)\" % (p.name),\n 'show_watched': True,\n 'show_owned': True,\n })\n\n@login_required\ndef pagination_movie(request, page):\n if request.method != \"POST\":\n raise Http404()\n try:\n page = int(page)\n except:\n raise Http404()\n m_count = Movie.objects.all().count()\n if page*10-10 > m_count:\n raise Http404()\n ms = Movie.objects.order_by('-date_added', '-id').prefetch_related('directors', 'actors', 'genres')[page*10-10:page*10]\n process_movies(request.user.watcher, ms, {\n 'watched': True,\n 'owned': True,\n })\n return render(request, 'partial/movie_table.html', {\n 'movies': ms,\n 'show_watched': True,\n 'show_owned': True,\n 'pagination': Pagination(page, (m_count+9)/10),\n 'counter_offset': page*10-10,\n })\n\n@login_required\ndef pagination_rating(request, page):\n if request.method != \"POST\":\n raise Http404()\n try:\n page = int(page)\n except:\n raise Http404()\n r_count = Rating.objects.all().count()\n if page*10-10 > r_count:\n raise Http404()\n rs = Rating.objects.order_by('-date_added', '-id').select_related()[page*10-10:page*10]\n return render(request, 'partial/rating_table.html', {\n 'ratings': rs,\n 'pagination': Pagination(page, (r_count+9)/10),\n 'counter_offset': page*10-10,\n })\n\n@login_required\ndef pagination_review(request, page):\n if request.method != \"POST\":\n raise Http404()\n try:\n page = int(page)\n except:\n raise Http404()\n r_count = Review.objects.all().count()\n if page*10-10 > r_count:\n raise Http404()\n rs = Review.objects.order_by('-date_added', '-id').select_related()[page*10-10:page*10]\n return render(request, 'partial/review_table.html', {\n 'reviews': rs,\n 'pagination': Pagination(page, (r_count+9)/10),\n 'counter_offset': page*10-10,\n })\n\n@login_required\ndef pagination_list(request, page):\n if request.method != \"POST\":\n raise Http404()\n try:\n page = int(page)\n except:\n raise Http404()\n l_count = MovieList.objects.all().count()\n if page*10-10 > l_count:\n raise Http404()\n ls = MovieList.objects.order_by('-date_updated', '-id')[page*10-10:page*10]\n return render(request, 'partial/list_table.html', {\n 'lists': ls,\n 'pagination': Pagination(page, (l_count+9)/10),\n 'counter_offset': page*10-10,\n })\n\n@login_required\ndef pagination_user_movie(request, id, page):\n w = get_object_or_404(Watcher, id=id)\n if request.method != \"POST\":\n raise Http404()\n try:\n page = int(page)\n except:\n raise Http404()\n m_count = w.watched.all().count()\n if page*10-10 > m_count:\n raise Http404()\n ms = w.watched.order_by('-date_added', '-id').prefetch_related('directors', 'actors', 'genres')[page*10-10:page*10]\n process_movies(request.user.watcher, ms, {\n 'watched': True,\n 'owned': True,\n 'user': w,\n })\n return render(request, 'partial/movie_table.html', {\n 'movies': ms,\n 'show_watched': True,\n 'show_owned': True,\n 'show_rated': True,\n 'pagination': Pagination(page, (m_count+9)/10),\n 'counter_offset': page*10-10,\n })\n\n@login_required\ndef pagination_user_rating(request, id, page):\n w = get_object_or_404(Watcher, id=id)\n if request.method != \"POST\":\n raise Http404()\n try:\n page = int(page)\n except:\n raise Http404()\n r_count = Rating.objects.filter(watcher=w).count()\n if page*10-10 > r_count:\n raise Http404()\n rs = Rating.objects.filter(watcher=w).order_by('-date_added', '-id').select_related()[page*10-10:page*10]\n return render(request, 'partial/rating_table.html', {\n 'ratings': rs,\n 'pagination': Pagination(page, (r_count+9)/10),\n 'counter_offset': page*10-10,\n 'hide_user': True,\n })\n\n@login_required\ndef pagination_user_review(request, id, page):\n w = get_object_or_404(Watcher, id=id)\n if request.method != \"POST\":\n raise Http404()\n try:\n page = int(page)\n except:\n raise Http404()\n r_count = Review.objects.filter(watcher=w).count()\n if page*10-10 > r_count:\n raise Http404()\n rs = Review.objects.filter(watcher=w).order_by('-date_added', '-id').select_related()[page*10-10:page*10]\n return render(request, 'partial/review_table.html', {\n 'reviews': rs,\n 'pagination': Pagination(page, (r_count+9)/10),\n 'counter_offset': page*10-10,\n 'hide_user': True,\n })\n\n@login_required\ndef pagination_profile_movie(request, page):\n return pagination_user_movie(request, request.user.watcher.id, page)\n\n@login_required\ndef pagination_profile_rating(request, page):\n return pagination_user_rating(request, request.user.watcher.id, page)\n\n@login_required\ndef pagination_profile_review(request, page):\n return pagniation_user_rating(request, request.user.watcher.id, page)\n\n@login_required\ndef notification_read(request, id):\n if request.method != \"POST\":\n raise Http404()\n notif = get_object_or_404(Notification, id=id, watcher=request.user.watcher)\n notif.read = True\n notif.save()\n return HttpResponse(json.dumps({'status': True}), content_type='application/json') \n\n@login_required\ndef movie_suggest(request, movie_id, watcher_id):\n if request.method != \"POST\":\n raise Http404()\n movie = get_object_or_404(Movie, id=movie_id)\n watcher = get_object_or_404(Watcher, id=watcher_id)\n if watcher == request.user.watcher:\n raise Http404()\n Notification.create_suggestion(watcher, request.user.watcher, movie) \n return HttpResponse(json.dumps({'status': True}), content_type='application/json')\n\n@login_required\ndef movie_ask_borrow(request, movie_id, watcher_id):\n if request.method != \"POST\":\n raise Http404()\n movie = get_object_or_404(Movie, id=movie_id)\n watcher = get_object_or_404(Watcher, id=watcher_id)\n if watcher == request.user.watcher:\n raise Http404()\n Notification.create_ask_borrow(watcher, request.user.watcher, movie) \n return HttpResponse(json.dumps({'status': True}), content_type='application/json')\n\n@login_required\ndef movie_ask_review(request, movie_id, watcher_id):\n if request.method != \"POST\":\n raise Http404()\n movie = get_object_or_404(Movie, id=movie_id)\n watcher = get_object_or_404(Watcher, id=watcher_id)\n if watcher == request.user.watcher:\n raise Http404()\n Notification.create_ask_review(watcher, request.user.watcher, movie) \n return HttpResponse(json.dumps({'status': True}), content_type='application/json')\n\n@login_required\ndef user_ask_suggest(request, id):\n if request.method != \"POST\":\n raise Http404()\n w = get_object_or_404(Watcher, id=id)\n if request.user.watcher == w:\n raise Http404()\n Notification.create_ask_suggestion(w, request.user.watcher)\n return HttpResponse(json.dumps({'status': True}), content_type='application/json')\n\n@login_required\ndef profile_settings_mail(request):\n if request.method != \"POST\" or not 'value' in request.POST:\n raise Http404()\n receive = False\n if request.POST['value'] == 'true':\n receive = True\n request.user.watcher.settings_mail = receive\n request.user.watcher.save()\n return HttpResponse(json.dumps({'status': True}), content_type='application/json')\n\n@login_required\ndef own(request, id):\n try:\n m = Movie.objects.get(id=id)\n except:\n return HttpResponse(json.dumps({'status': False}), content_type='application/json')\n request.user.watcher.owned.add(m)\n return HttpResponse(json.dumps({'status': True}), content_type='application/json')\n \n@login_required\ndef unown(request, id):\n try:\n m = Movie.objects.get(id=id)\n except:\n return HttpResponse(json.dumps({'status': False}), content_type='application/json')\n request.user.watcher.owned.remove(m)\n return HttpResponse(json.dumps({'status': True}), content_type='application/json')\n \n@login_required\ndef watch(request, id):\n if request.method != 'POST' or not 'rating' in request.POST:\n return HttpResponse(json.dumps({'status': False}), content_type='application/json')\n try:\n m = Movie.objects.get(id=id)\n except:\n return HttpResponse(json.dumps({'status': False}), content_type='application/json')\n rating = request.POST['rating']\n if Rating.objects.filter(watcher=request.user.watcher, movie=m).count() > 0:\n return HttpResponse(json.dumps({'status': False}), content_type='application/json')\n try:\n Rating.objects.create(watcher=request.user.watcher, movie=m, rating=rating)\n except:\n return HttpResponse(json.dumps({'status': False}), content_type='application/json')\n rating = int(rating)\n raters = Rating.objects.filter(movie=m).count()\n if raters == 0:\n m.avg_rating = rating\n else:\n m.avg_rating = (m.avg_rating*(raters-1)+rating)/raters;\n m.save()\n return HttpResponse(json.dumps({'status': True, 'rating': m.avg_rating}), content_type='application/json')\n\n@login_required\ndef rate(request, id):\n if request.method != 'POST' or not 'rating' in request.POST:\n return HttpResponse(json.dumps({'status': False}), content_type='application/json')\n try:\n m = Movie.objects.get(id=id)\n ratingObj = Rating.objects.get(watcher=request.user.watcher, movie=m)\n except:\n return HttpResponse(json.dumps({'status': False}), content_type='application/json')\n rating = request.POST['rating']\n try:\n oldRating = ratingObj.rating\n ratingObj.rating = rating\n ratingObj.save()\n except:\n return HttpResponse(json.dumps({'status': False}), content_type='application/json')\n rating = int(rating)\n raters = Rating.objects.filter(movie=m).count()\n m.avg_rating = (m.avg_rating*(raters)+rating-oldRating)/raters;\n m.save()\n return HttpResponse(json.dumps({'status': True, 'rating': m.avg_rating}), content_type='application/json')\n\n@login_required\ndef add(request):\n if request.method == \"POST\":\n title = request.POST['title']\n subtitle = request.POST.get('subtitle', None)\n yr = request.POST['year']\n genres = []\n for g in request.POST['genres'].split(','):\n g = g.strip()\n try:\n g = Genre.objects.get(name=g)\n except:\n g = Genre.objects.create(name=g)\n genres.append(g)\n runtime = request.POST['runtime']\n plot = request.POST['plot']\n languages = []\n for l in request.POST['languages'].split(','):\n l = l.strip()\n try:\n l = Language.objects.get(name=l)\n except:\n l = Language.objects.create(name=l)\n languages.append(l)\n countries = []\n for c in request.POST['country'].split(','):\n c = c.strip()\n try:\n c = Country.objects.get(name=c)\n except:\n c = Country.objects.create(name=c)\n countries.append(c)\n imdbID = request.POST['imdbID']\n if Movie.objects.filter(imdbID=imdbID).count() > 0:\n return redirect(movie, get_object_or_404(Movie, imdbID=imdbID).id)\n directors = []\n for d in request.POST['directors'].split(','):\n d = d.strip()\n try:\n d = Person.objects.get(name=d)\n except:\n d = Person.objects.create(name=d)\n directors.append(d)\n writers = []\n for w in request.POST['writers'].split(','):\n w = w.strip()\n try:\n w = Person.objects.get(name=w)\n except:\n w = Person.objects.create(name=w)\n writers.append(w)\n actors = []\n for a in request.POST['actors'].split(','):\n a = a.strip()\n try:\n a = Person.objects.get(name=a)\n except:\n a = Person.objects.create(name=a)\n actors.append(a)\n owned = 'owned' in request.POST\n m = Movie.objects.create(title=title, year=yr, runtime=runtime,\n plot=plot, imdbID=imdbID, subtitle=subtitle)\n if 'url' in request.POST and request.POST['url']:\n url = request.POST['url']\n req = requests.get(url, stream=True)\n if req.status_code == requests.codes.ok:\n file_name = \"%s.jpg\" % (m.imdbID)\n lf = tempfile.NamedTemporaryFile()\n for block in req.iter_content(1024*8):\n if not block:\n break\n lf.write(block)\n m.poster.save(file_name, files.File(lf))\n m.save()\n for g in genres:\n m.genres.add(g)\n for l in languages:\n m.language.add(l)\n for c in countries:\n m.country.add(c)\n for d in directors:\n m.directors.add(d)\n for w in writers:\n m.writers.add(w)\n for a in actors:\n m.actors.add(a)\n if owned:\n request.user.watcher.owned.add(m)\n return redirect(movie, m.id)\n return render(request, 'add.html', {\n 'menu_add': True,\n })\n","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":33506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"5214668","text":"import arrow\nfrom keg.db import db\nfrom keg_elements.db.mixins import DefaultColsMixin, MethodsMixin\nfrom sqlalchemy.dialects.postgresql import insert as pgsql_insert\nfrom sqlalchemy.inspection import inspect\n\n\nclass EntityMixin(DefaultColsMixin, MethodsMixin):\n @classmethod\n def upsert(cls, values=None, on_conflict_do='update', index_elements=None, **kwargs):\n if index_elements is None:\n index_elements = cls.__upsert_index_elements__\n\n if values is None:\n values = kwargs\n\n primary_key_col = inspect(cls).primary_key[0]\n stmt = pgsql_insert(cls.__table__).returning(primary_key_col).values(**values)\n\n assert on_conflict_do in ('nothing', 'update')\n if on_conflict_do == 'update':\n touching_timestamps = {}\n if hasattr(cls, 'updated_utc'):\n # on_conflict_do_update won't take into account columns onupdate attributes\n touching_timestamps.update({'updated_utc': arrow.utcnow()})\n stmt = stmt.on_conflict_do_update(\n index_elements=index_elements,\n set_={**values, **touching_timestamps}\n )\n else:\n stmt = stmt.on_conflict_do_nothing(index_elements=index_elements)\n\n result = db.session.execute(stmt)\n\n return result.scalar()\n\n\ndef tc_relation(kwargs, rel_col_name, rel_ent_cls):\n id_col_name = f'{rel_col_name}_id'\n rel_name_dunder = f'{rel_col_name}__'\n\n if id_col_name not in kwargs and rel_col_name not in kwargs:\n rel_keys = [key for key in kwargs.keys() if key.startswith(rel_name_dunder)]\n rel_kwargs = {key.replace(rel_name_dunder, '', 1): kwargs.pop(key) for key in rel_keys}\n rel_ent_inst = rel_ent_cls.testing_create(**rel_kwargs)\n kwargs[id_col_name] = rel_ent_inst.id\n\n return kwargs\n","sub_path":"{{cookiecutter.src_dirname}}/{{cookiecutter.project_pymod}}/libs/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"301455407","text":"from flask import Flask, render_template\nfrom data import db_session\nfrom data.users import User\nfrom data.jobs import Jobs\nimport os\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\n\n\n@app.route(\"/\")\ndef main():\n db_session.global_init(\"db/blogs.db\")\n db_sess = db_session.create_session()\n data = []\n for job in db_sess.query(Jobs).all():\n for user in db_sess.query(User).filter(User.id == job.team_leader):\n full_name = f\"{user.surname} {user.name}\"\n data.append([job.job, full_name, job.work_size, job.collaborators, job.is_finished])\n return render_template('journal.html', data=data)\n\n\nif __name__ == '__main__':\n app.run(port=8080, host='127.0.0.1')\n","sub_path":"WEB. Знакомство с flask-sqlalchemy/Журнал работ/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"111662331","text":"#БСБО-05-19 Салынь Даниил Леонидович\nwb_tree = []\n\n\ndef fill_wb_tree():\n global wb_tree\n global i\n black = []\n white = []\n black.extend([white, white, white])\n white.extend([black, black])\n wb_tree.append(black)\n\n\nfill_wb_tree()\n","sub_path":"lr3/21.5.py","file_name":"21.5.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"162398798","text":"import time\nfrom queue import Empty, Full\n\nimport cv2\nfrom imutils import resize\n\nfrom pipert.core.message import Message\nfrom pipert.core.message_handlers import RedisHandler\nfrom pipert.core.message import message_decode, message_encode\nfrom pipert.core.routine import Routine\n\n\nclass Listen2Stream(Routine):\n\n def __init__(self, stream_address, queue, fps=30., *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.stream_address = stream_address\n self.isFile = str(stream_address).endswith(\"mp4\")\n self.stream = None\n # self.stream = cv2.VideoCapture(self.stream_address)\n self.queue = queue\n self.fps = fps\n self.updated_config = {}\n\n def begin_capture(self):\n self.stream = cv2.VideoCapture(self.stream_address)\n if self.isFile:\n self.fps = self.stream.get(cv2.CAP_PROP_FPS)\n self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n self.logger.info(\"Starting video capture on %s\", self.stream_address)\n\n def change_stream(self):\n if self.stream_address == self.updated_config['stream_address']:\n return\n self.stream_address = self.updated_config['stream_address']\n self.fps = self.updated_config['FPS']\n self.isFile = str(self.stream_address).endswith(\"mp4\")\n self.logger.info(\"Changing source stream address to %s\",\n self.updated_config['stream_address'])\n self.begin_capture()\n\n def grab_frame(self):\n grabbed, frame = self.stream.read()\n msg = Message(frame, self.stream_address)\n msg.record_entry(self.component_name, self.logger)\n return grabbed, msg\n\n def main_logic(self, *args, **kwargs):\n if self.updated_config:\n self.change_stream()\n self.updated_config = {}\n\n start = time.time()\n grabbed, msg = self.grab_frame()\n if grabbed:\n frame = msg.get_payload()\n frame = resize(frame, 640, 480)\n # if the stream is from a webcam, flip the frame\n if self.stream_address == 0:\n frame = cv2.flip(frame, 1)\n try:\n self.queue.get(block=False)\n except Empty:\n pass\n finally:\n msg.update_payload(frame)\n self.queue.put(msg)\n if self.isFile:\n wait = time.time() - start\n time.sleep(max(1 / self.fps - wait, 0))\n # self.queue.put(frame, block=False)\n time.sleep(0)\n return True\n\n def setup(self, *args, **kwargs):\n self.begin_capture()\n\n def cleanup(self, *args, **kwargs):\n self.stream.release()\n del self.stream\n\n\n# TODO: add Error handling to connection\nclass Message2Redis(Routine):\n\n def __init__(self, out_key, url, queue, maxlen, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.out_key = out_key\n self.url = url\n self.queue = queue\n self.maxlen = maxlen\n self.msg_handler = None\n\n def main_logic(self, *args, **kwargs):\n try:\n msg = self.queue.get(block=False)\n msg.record_exit(self.component_name, self.logger)\n encoded_msg = message_encode(msg)\n self.msg_handler.send(encoded_msg, self.out_key)\n time.sleep(0)\n return True\n except Empty:\n time.sleep(0) # yield the control of the thread\n return False\n\n def setup(self, *args, **kwargs):\n self.msg_handler = RedisHandler(self.url, self.maxlen)\n self.msg_handler.connect()\n\n def cleanup(self, *args, **kwargs):\n self.msg_handler.close()\n\n\nclass MessageFromRedis(Routine):\n\n def __init__(self, in_key, url, queue, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.in_key = in_key\n self.url = url\n self.queue = queue\n self.msg_handler = None\n self.flip = False\n self.negative = False\n\n def main_logic(self, *args, **kwargs):\n encoded_msg = self.msg_handler.receive(self.in_key)\n if encoded_msg:\n msg = message_decode(encoded_msg)\n msg.record_entry(self.component_name, self.logger)\n try:\n self.queue.put(msg, block=False)\n return True\n except Full:\n try:\n self.queue.get(block=False)\n except Empty:\n pass\n finally:\n self.queue.put(msg, block=False)\n return True\n else:\n time.sleep(0)\n return False\n\n def setup(self, *args, **kwargs):\n self.msg_handler = RedisHandler(self.url)\n self.msg_handler.connect()\n\n def cleanup(self, *args, **kwargs):\n self.msg_handler.close()\n\n\nclass DisplayCV2(Routine):\n def __init__(self, queue, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.queue = queue\n self.negative = False\n\n def main_logic(self, *args, **kwargs):\n try:\n msg = self.queue.get(block=False)\n frame = msg.get_payload()\n if self.negative:\n frame = 255 - frame\n cv2.imshow('Display', frame)\n cv2.waitKey(1)\n except Empty:\n time.sleep(0)\n\n def setup(self, *args, **kwargs):\n pass\n\n def cleanup(self, *args, **kwargs):\n cv2.destroyAllWindows()\n\n\nclass DisplayFlask(Routine):\n def __init__(self, in_key, queue, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.in_key = in_key\n self.queue = queue\n self.negative = False\n\n def main_logic(self, *args, **kwargs):\n try:\n frame = self.queue.get(block=False)\n if self.negative:\n frame = 255 - frame\n cv2.imshow('Display', frame)\n cv2.waitKey(1)\n return True\n except Empty:\n time.sleep(0)\n return False\n\n def setup(self, *args, **kwargs):\n pass\n\n def cleanup(self, *args, **kwargs):\n cv2.destroyAllWindows()\n","sub_path":"pipert/core/mini_logics.py","file_name":"mini_logics.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"525894511","text":"from path_finder.utility.Graph import Graph\nfrom django.db.models import Q\nfrom path_finder.models import Routes, Airports\n\n\nclass PathSearcher():\n \"\"\"\n Queries routes starting at specified origin on a BFS basis until a path\n to the request destination is found.\n \"\"\"\n\n def __init__(self):\n self.valid_path_exists = True\n self.graph = None\n self.errors_list = []\n\n def build_graph(self, origin_str, destination_str):\n self.verify_origin_and_destination(origin_str, destination_str)\n self.graph = Graph(origin_str)\n traversed = [] # Routes that have already been traversed\n next_level_of_paths = self.find_initial_routes_from_origin_string(\n origin_str, )\n if (len(self.errors()) is not 0):\n return\n while next_level_of_paths != []:\n for path in next_level_of_paths:\n traversed.append(path.id)\n self.graph.add_edge(path.origin.IATA, path.destination.IATA)\n if (path.destination.IATA == destination_str):\n return\n\n temp = self.find_routes_from_origins(\n next_level_of_paths,\n traversed)\n next_level_of_paths = temp\n\n # If entire function finishes then there is no solution\n self.log_error(\"No Route\")\n\n def find_shortest_path(self, origin, destination):\n \"\"\"\n A BFS that finds the shortest path from origin to destination in the\n in-memory graph.\n \"\"\"\n\n graph = self.graph.graph\n queue = []\n queue.append([origin])\n\n while queue:\n path = queue.pop(0)\n last_node = path[-1]\n if last_node == destination:\n return path\n\n for adjacent in graph.get(last_node):\n if adjacent not in path:\n new_path = list(path)\n new_path.append(adjacent)\n queue.append(new_path)\n\n def find_routes_from_origins(self, origins, traversed):\n \"\"\"\n Queries database for flights origination from an airport in the\n origins list\n\n Parameters:\n origins [str] - List of IATA codes of origin airport.\n traversed [int] - List of Routes ids that have already been queried.\n\n Return Value:\n [Route] - List of Routes objects from datbase\n \"\"\"\n routes = []\n for location in origins:\n new_routes = Routes.objects.filter(\n Q(origin=location.destination) & ~Q(id__in=traversed))\n routes += [route for route in new_routes]\n return routes\n\n def find_initial_routes_from_origin_string(self, origin_str):\n return Routes.objects.filter(Q(origin=origin_str))\n\n def verify_origin_and_destination(self, origin_str, destination_str):\n if not Airports.objects.filter(Q(IATA=origin_str)):\n self.log_error(\"Invalid Origin\")\n if not Airports.objects.filter(Q(IATA=destination_str)):\n self.log_error(\"Invalid Destination\")\n\n def log_error(self, error_msg):\n self.errors_list.append(error_msg)\n\n def errors(self):\n return self.errors_list\n","sub_path":"path_finder/utility/PathSearch.py","file_name":"PathSearch.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"155212893","text":"def str_replace(string):\r\n \"\"\"\r\n 把字符串中的每个空格替换成\"%20\"\r\n \"\"\"\r\n # 从后面开始复制和替换\r\n str_len1 = len(string)\r\n space_num = 0\r\n\r\n for i in range(str_len1):\r\n if string[i] == ' ':\r\n space_num += 1\r\n\r\n str_len2 = str_len1 + 2 * space_num\r\n str_list = [''] * (str_len2)\r\n p = str_len1 - 1\r\n q = str_len2 - 1 \r\n\r\n while p != q:\r\n if string[p] != ' ':\r\n str_list[q] = string[p]\r\n p -= 1\r\n q -= 1\r\n else:\r\n str_list[q-2:q+1] = ['%', '2', '0']\r\n p -= 1\r\n q -= 3\r\n\r\n return string[0:p+1] + ''.join(str_list)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n string = \"We are happy\"\r\n print(str_replace(string))\r\n\r\n print(string.replace(' ', '%20'))","sub_path":"interview/string/05-str_replace.py","file_name":"05-str_replace.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"491477066","text":"#-*- coding: utf-8 -*-\n\nfrom pathlib import Path\nimport sqlite3, os, copy, glob, time, threading, datetime, shutil, logging, json, csv\nfrom itertools import chain\nfrom PIL import Image, ImageSequence\nimport my_utils as mutl\n\nLIST_DELIMITER = ';'\nsqlite3.dbapi2.converters['DATETIME'] = sqlite3.dbapi2.converters['TIMESTAMP']\n\nclass DataBaseManager():\n \"\"\"\n GUIメインからのデータベース操作をSQLを意識せずに済むようにラッピングする。\n \"\"\"\n logger = logging.getLogger('MyDBApp') \n\n DATABASE_NAME = 'database.sqlite'\n\n FILE_ENCODING = 'utf-8'\n\n DATA_DIR = 'data'\n TAG_IMAGE_DIR = 'tag_image'\n BACKUP_DIR = 'backup'\n FILE_NUMBER_OF_DIGITS = 5\n\n SUPPORTED_EXT = [\"jpg\", \"jpeg\", \"png\", \"bmp\", \"gif\", \"zip\"]\n\n TEMPLATE_MAIN_COLUMN = {\n 'Title': 'text primary key',\n 'InitialCharacter': 'text',\n 'Updated': 'datetime',\n 'FileNum': 'Integer',\n 'Size': 'Real',\n 'Link':'text',\n 'IsFavorite': 'Integer',\n 'favorite': 'text',\n 'chapter': 'text'\n }\n\n TEMPLATE_TAGS_COLUMN = {\n 'Name': 'text primary key',\n 'InitialCharacter': 'text',\n 'Link': 'text',\n 'Image': 'text',\n 'IsFavorite': 'Integer'\n }\n\n db_root = ''\n data_dir = ''\n tag_dir = ''\n\n record_num = 0\n\n # ファイル複製スレッド\n file_op_progress = {\n 'task_num':0,\n 'task_index':0,\n 'title':'',\n 'file_num':0,\n 'done_file':0,\n 'all_size':0,\n 'done_size':0,\n 'speed':0,\n 'remaining_time':0.0\n }\n is_cancel = False\n\n # cursorのスレッド以外で発行されたSQL文を溜め込む\n sql_tasks = []\n\n dbm_thread_id = 0 \n\n file_op_tasks = []\n file_op_thread = threading.Thread() # ファイル操作用のスレッド\n\n def __init__(self):\n self.logger.debug('get instance')\n\n def __del__(self):\n self.logger.debug('delete instance')\n self.close()\n\n # データベース操作\n def close(self):\n\n if self.file_op_thread.is_alive():\n self.file_op_thread.join()\n\n self.sql_tasks = []\n\n if self.db_root != '':\n self.cursor.close()\n self.connection.close()\n self.db_root == ''\n\n \n def database_is_exist(self, db_root):\n db_path = os.path.join(db_root, self.DATABASE_NAME)\n return os.path.exists(db_path)\n\n def connect_database(self, db_root):\n\n db_path = os.path.join(db_root, self.DATABASE_NAME)\n if not os.path.exists(db_path):\n return False\n\n self.db_root = db_root\n self.data_dir = os.path.join(self.db_root, self.DATA_DIR)\n self.tag_dir = os.path.join(self.db_root, self.TAG_IMAGE_DIR)\n\n self.logger.debug('open database')\n\n self.connection = sqlite3.connect(db_path, detect_types = sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n self.cursor = self.connection.cursor()\n\n return self.get_free_space() / 1024 #MiBで帰ってくる\n \n def create_database(self, db_root, db_option={}):\n \"\"\"\n 概要:\n 新規データベースを作成する。\n 引数:\n db_root: データベースのルートディレクトリ\n additional_tags: ユーザが任意で追加していくタグのdict\n - key:タグ名、value:tuple(sqlite内でのデータ型, 最大登録数)\n 返り値:\n db_rootにdatabase.sqliteが存在する場合Falseリターン\n メモ:\n \"\"\"\n\n # すでにデータベースがある場合はFalseリターン\n db_path = os.path.join(db_root, self.DATABASE_NAME)\n if os.path.exists(db_path):\n return False\n\n # データベースとデータ置き場の作成\n self.db_root = db_root\n self.connection = sqlite3.connect(db_path, detect_types = sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n self.cursor = self.connection.cursor()\n \n self.data_dir = os.path.join(self.db_root, self.DATA_DIR)\n if not os.path.exists(self.data_dir):\n os.mkdir(self.data_dir)\n \n self.tag_dir = os.path.join(self.db_root, self.TAG_IMAGE_DIR)\n if not os.path.exists(self.tag_dir):\n os.mkdir(self.tag_dir)\n\n # Mainテーブルを作成する。\n table_name = 'MainTable'\n table_columns = copy.deepcopy(self.TEMPLATE_MAIN_COLUMN)\n table_columns.update(db_option)\n tmp_str = ','.join(['{} {}'.format(key, value) for key,value in table_columns.items()]) \n sql = 'create table if not exists {} ({})'.format(table_name, tmp_str)\n \n self.logger.debug(sql)\n self.cursor.execute(sql)\n\n # 追加タグ記録用のテーブルを作成する\n for key in db_option.keys():\n table_name = key\n tmp_str = ','.join(['{} {}'.format(key, value) for key,value in self.TEMPLATE_TAGS_COLUMN.items()]) \n sql = 'create table if not exists {} ({})'.format(table_name, tmp_str)\n self.logger.debug(sql)\n self.cursor.execute(sql)\n\n self.connection.commit()\n\n return True\n\n def copy_database(self, dst_path):\n\n if self.file_op_thread.is_alive():\n return False\n\n self.file_op_tasks = []\n data_list = self.get_items(['Title', 'FileNum', 'Size'])\n\n dst_data_path = os.path.join(dst_path, self.DATA_DIR)\n\n all_size = 0\n for row in data_list:\n\n title = row[0]\n filenum = row[1]\n src_size = row[2]\n all_size += src_size\n\n src_path = os.path.join(self.data_dir, title)\n new_path = os.path.join(dst_data_path, title)\n \n # ファイルコピーのタスクを作成する -簡易版\n self.file_op_tasks.append({\n 'title':title,\n 'src_path':src_path,\n 'src_size':src_size,\n 'dst_path':new_path,\n 'file_num':filenum\n })\n\n os.mkdir(dst_data_path)\n \n dst_tag_path = os.path.join(dst_path, self.TAG_IMAGE_DIR)\n shutil.copytree(self.tag_dir, dst_tag_path)\n\n src_db_path = os.path.join(self.db_root, self.DATABASE_NAME)\n dst_db_path = os.path.join(dst_path, self.DATABASE_NAME)\n shutil.copyfile(src_db_path, dst_db_path)\n\n self.file_op_thread = threading.Thread(target=self._copy_database,daemon=False)\n self.file_op_thread.start()\n\n return True\n\n def _copy_database(self):\n\n # TODO: このやりかただと進捗表示が細かくできない\n\n self.file_op_progress['task_num'] = len(self.file_op_tasks)\n\n self.file_op_progress['done_size'] = 0\n self.file_op_progress['all_size'] = 0\n for ftask in self.file_op_tasks:\n self.file_op_progress['all_size'] += ftask['src_size']\n\n start = time.time()\n\n for t_idx, ftask in enumerate(self.file_op_tasks):\n\n # 初期設定\n title = ftask['title']\n trg_path = ftask['dst_path']\n src_path = ftask['src_path']\n\n self.file_op_progress['task_index'] = t_idx+1\n self.file_op_progress['title'] = title\n self.file_op_progress['file_num'] = ftask['file_num']\n\n shutil.copytree(src_path, trg_path)\n\n # 処理速度、残り時間予測\n self.file_op_progress['done_size'] += ftask['src_size']\n process_time = time.time() - start\n if process_time != 0:\n self.file_op_progress['speed'] = self.file_op_progress['done_size']/process_time # [MiB/s]\n\n if self.file_op_progress['speed'] != 0:\n self.file_op_progress['remaining_time'] = (self.file_op_progress['all_size'] - self.file_op_progress['done_size']) / self.file_op_progress['speed']\n\n\n def get_free_space(self):\n \"\"\"\n 接続中のデータベースのディスク空き容量を取得する\n \"\"\"\n try:\n disk_info = shutil.disk_usage(self.db_root)\n return disk_info.free / (1024*1024)\n except:\n return 0\n \n\n # MainTable操作\n def get_db_info(self):\n size_list = self.get_items('Size', convert=True)\n num = len(size_list)\n if num == 0:\n sum_size = 0\n else:\n sum_size = sum(size_list)\n return num, sum_size\n\n def insert_records(self, records, move_file=False):\n \"\"\"\n 概要:\n レコードを登録する。\n 登録のみでファイルの移動はせず、クラス内のfile_op_tasksに追加される。\n 別途start_file_operationでコピータスクを実行する。\n 本関数を小分けで使用してタスクを貯めてからまとめて実行もできるはず...\n 引数:\n records: 下記の構造を持つ辞書のリスト\n {\n path:登録ファイル群のフォルダのパス(必須)\n Title:登録タイトル(レコードの主キー、必須)\n values_dict:{その他のカラム内容の辞書、任意}\n }\n move_file:コピーではなくファイルを移動する。\n 返り値:\n err_info: 'title: エラー内容'のstringのリスト\n メモ:\n ・err_infoは辞書のほうが使い勝手がよいかも\n ・未実装のエラーハンドリング\n - values_dictの不正なカラム\n - pathとtitleがない場合\n ・上書きができるようにすると色々と幅が広がる\n - 上書きモードと追加モードなどなど\n \"\"\"\n\n op_mode = 'move' if move_file else 'copy'\n title_list = self.get_items('Title', convert=True)\n\n self.file_op_tasks = []\n \n err_info = []\n for info in records:\n\n # 情報の取り出し\n title = info['Title']\n src_path = info['path']\n values_dict = copy.deepcopy(info['values_dict'])\n \n # 同名タイトル\n if title in title_list:\n err_info.append('{} : 同名タイトルがあります。'.format(title))\n break\n\n # パスの正当性確認\n if not os.path.exists(src_path):\n err_info.append('{} : 該当するフォルダがありません。'.format(title))\n break\n\n # ファイル一覧取得\n file_list, file_size = mutl.search_files_deep(src_path, self.SUPPORTED_EXT)\n file_size /= (1024 * 1024)\n #file_list = sorted(list(chain.from_iterable([glob.glob(os.path.join(src_path, \"*.\" + ext)) for ext in self.SUPPORTED_EXT])))\n\n # ファイルがない場合\n if len(file_list) == 0:\n err_info.append('{} : 該当するファイルがありません。'.format(title))\n break\n\n # レコードを作成する\n try:\n values_dict['Title'] = title\n values_dict['FileNum'] = 0\n values_dict['Size'] = 0.0\n values_dict['Link'] = ''\n values_dict['IsFavorite'] = 0\n values_dict['favorite'] = []\n values_dict['chapter'] = []\n sql_tmp, sql_values = self._convert_dict4sql(values_dict)\n sql = 'insert into MainTable {}'.format(sql_tmp)\n self.logger.debug('{} {}'.format(sql, sql_values))\n self.cursor.execute(sql, sql_values)\n except:\n err_info.append((title, 'SQL Insert Error'))\n break\n\n # ファイルコピーのタスクを作成する\n self.file_op_tasks.append({\n 'operation':op_mode,\n 'title':title,\n 'src_list':file_list,\n 'src_size':file_size,\n 'dst_path':os.path.join(self.data_dir, title),\n 'init_num':0,\n 'init_size':0.0\n })\n\n self.connection.commit()\n\n return err_info\n\n def add_files(self, title, file_list):\n\n if len(file_list) == 0:\n return False\n\n self.file_op_tasks = []\n\n file_size = 0\n for fl in file_list:\n file_size += os.path.getsize(fl)\n file_size /= (1024 * 1024)\n\n init_info = self.get_items(['FileNum','Size'], title=title)\n\n # ファイルコピーのタスクを作成する\n self.file_op_tasks.append({\n 'operation':'copy',\n 'title':title,\n 'src_list':sorted(file_list, key=mutl.numericalSort),\n 'src_size':file_size,\n 'dst_path':os.path.join(self.data_dir, title),\n 'init_num':init_info[0][0],\n 'init_size':init_info[0][1]\n })\n \n return True\n\n def delete_record(self, titles):\n \"\"\"\n 概要:\n 指定されたレコードと該当するファイルを削除する。\n 引数:\n titles: 削除するレコードのtitleのリスト\n 返り値:\n False:前のスレッドが生きている場合にFalseを返す。\n メモ:\n ファイル削除は別スレッドだが、shutil.rmtreeでの一括消去のため進捗は確認できない。\n copyのタスクと同じ形式でまとめたいところ。\n \"\"\"\n\n if self.file_op_thread.is_alive():\n return False\n\n for title in titles:\n sql = 'DELETE FROM MainTable WHERE Title=\"{}\"'.format(title)\n self.logger.debug(sql)\n self.cursor.execute(sql)\n\n self.file_op_thread = threading.Thread(target=self._delete_titles, args=([titles]),daemon=False)\n self.file_op_thread.start()\n\n self.connection.commit()\n\n return True\n\n def delete_files(self, title,file_idx):\n \"\"\"\n 概要:\n 引数:\n 返り値:\n メモ:\n \"\"\"\n if len(file_idx) == 0:\n return False\n\n self.file_op_tasks = []\n\n init_info = self.get_items(['FileNum','Size','favorite','chapter'], title=title, convert=True)\n\n init_favorite = []\n for i in init_info['favorite'][0]:\n if i.isdecimal():\n init_favorite.append(int(i))\n init_chapter = []\n for i in init_info['chapter'][0]:\n if i.isdecimal():\n init_chapter.append(int(i))\n\n # ファイルコピーのタスクを作成する\n self.file_op_tasks.append({\n 'operation':'delete',\n 'title':title,\n 'src_list':sorted(file_idx),\n 'src_size':0, # TODO: 現状、使っていない\n 'dst_path':os.path.join(self.data_dir, title),\n 'init_num':init_info['FileNum'][0][0],\n 'init_size':init_info['Size'][0][0],\n 'init_favorite': copy.deepcopy(init_favorite),\n 'init_chapter':copy.deepcopy(init_chapter)\n })\n\n return True\n\n def start_file_operation(self):\n \"\"\"\n 概要:\n insert_recordsで発生したfile_op_tasksを別スレッドで実行する。\n レコード内容を更新するsqlをsql_tasksとして溜め込むため、コピー完了後にresolve_sql_tasksの実行が必要。\n スレッドの生存は、file_op_is_aliveで確認できる。\n コピーの進捗状況は、get_file_op_progressで取得できる。\n 引数:\n 返り値:\n False:前のスレッドが生きている場合にFalseを返す。\n メモ:\n ・sqlが、cursorを取得したスレッドでしか実行できないので、タスクとしてためている。\n - sqlの実施だけメインスレッドに帰ってきて実行とかできないだろうか...\n ・キャンセル操作への対応\n \"\"\"\n\n if self.file_op_thread.is_alive():\n return False\n\n self.file_op_thread = threading.Thread(target=self._run_file_operation,daemon=False)\n self.file_op_thread.start()\n\n return True\n\n def sort_files(self, title,ref_index, insert_index):\n\n if len(insert_index) == 0:\n return\n\n title_dir, file_list, _ = self.get_file_list(title)\n init_info = self.get_items(['favorite','chapter'], title=title, convert=True)\n\n init_favorite = set([])\n for i in init_info['favorite'][0]:\n if i.isdecimal():\n init_favorite.add(int(i))\n init_chapter = set([])\n for i in init_info['chapter'][0]:\n if i.isdecimal():\n init_chapter.add(int(i))\n\n # 対象範囲のインデックスをすべて取得\n tmp_set = set(insert_index) | set([ref_index])\n trg_idx = set(range(min(tmp_set),max(tmp_set)+1))\n # 入れ替え対象のインデックスを除く, リスト化して指定位置に挿入\n new_order = sorted(trg_idx ^ insert_index)\n ref_pos = new_order.index(ref_index)\n new_order[ref_pos:ref_pos] = sorted(insert_index)\n\n # ソート1 - 元ファイルをいったん別名にして避難しつつ、ファイル名変更タスクの作成\n old_order = sorted(trg_idx)\n sort_task = []\n new_favorite = set([])\n new_chapter = set([])\n for i, old_idx in enumerate(new_order):\n\n new_idx = old_order[i]\n if old_idx == new_idx:\n continue\n\n if old_idx in init_favorite:\n init_favorite.remove(old_idx)\n new_favorite.add(new_idx)\n if old_idx in init_chapter:\n init_chapter.remove(old_idx)\n new_chapter.add(new_idx)\n\n fname = file_list[old_idx]\n old_idx_str, ext = os.path.splitext(fname) # インデックスと拡張子に分解\n\n old_path = os.path.join(title_dir, fname)\n tmp_path = os.path.join(title_dir, '_' + fname)\n os.rename(old_path, tmp_path)\n #print('{} -> {}'.format(old_path, tmp_path))\n\n new_idx_str = str(new_idx).zfill(self.FILE_NUMBER_OF_DIGITS)\n new_path = os.path.join(title_dir, new_idx_str + ext)\n\n sort_task.append((tmp_path, new_path))\n\n if ext == '.zip':\n old_thum_path = os.path.join(title_dir, '__thumbnail__', old_idx_str + '.png')\n tmp_thum_path = os.path.join(title_dir, '__thumbnail__', '_' + old_idx_str + '.png')\n os.rename(old_thum_path, tmp_thum_path)\n #print('{} -> {}'.format(old_thum_path, tmp_thum_path))\n \n new_thum_path = os.path.join(title_dir, '__thumbnail__', new_idx_str + '.png')\n sort_task.append((tmp_thum_path, new_thum_path))\n\n # ソート2 - ファイル名変更の実施\n for stask in sort_task:\n os.rename(stask[0], stask[1])\n #print('{} -> {}'.format(stask[0], stask[1]))\n\n new_favorite |= init_favorite\n new_chapter |= init_chapter\n\n # データベースの情報変更\n values_dict = {}\n values_dict['favorite'] = sorted(new_favorite)\n values_dict['chapter'] = sorted(new_chapter)\n #print(values_dict)\n\n self.update_record(title, values_dict=values_dict)\n\n def update_record(self, title, values_dict):\n \"\"\"\n 登録済みのデータのアップデート\n \"\"\"\n data_list = []\n\n for key, value in values_dict.items():\n \n if key is 'Title':\n b_path = os.path.join(self.data_dir, title)\n n_path = os.path.join(self.data_dir, value)\n os.rename(b_path, n_path)\n\n if type(value) is list:\n tmp_val = LIST_DELIMITER.join([str(i) for i in value])\n else:\n tmp_val = value\n data_list.append('{0}=\"{1}\"'.format(key,tmp_val))\n\n sql_set_data = ','.join(data_list)\n sql = 'UPDATE MainTable SET {0} WHERE Title=\"{1}\"'.format(sql_set_data, title)\n \n # cursorのスレッドのみ実行可能なため、それ以外はタスクとして溜め込む\n try:\n self.logger.debug(sql)\n self.cursor.execute(sql)\n self.connection.commit()\n except:\n self.logger.debug('before SQL STACKED')\n self.sql_tasks.append(sql) \n\n def title_is_exist(self, title):\n sql = 'SELECT COUNT(*) FROM MainTable WHERE Title=\"{}\"'.format(title)\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n return result[0][0] != 0\n\n def resolve_sql_tasks(self):\n \"\"\"\n 未実行のSQL文を実行する\n \"\"\"\n for sql in self.sql_tasks:\n self.logger.debug(sql)\n self.cursor.execute(sql)\n\n self.sql_tasks = []\n self.connection.commit()\n\n\n def _run_file_operation(self):\n\n # TODO: 中断操作対応\n\n self.file_op_progress['task_num'] = len(self.file_op_tasks)\n\n self.file_op_progress['done_size'] = 0\n self.file_op_progress['all_size'] = 0\n for ftask in self.file_op_tasks:\n self.file_op_progress['all_size'] += ftask['src_size']\n\n start = time.time()\n\n for t_idx, ftask in enumerate(self.file_op_tasks):\n\n # 初期設定\n op_mode = ftask['operation']\n title = ftask['title']\n trg_path = ftask['dst_path']\n src_list = ftask['src_list']\n idx_offset = ftask['init_num']\n \n values_dict = {}\n values_dict['FileNum'] = ftask['init_num']\n values_dict['Size'] = ftask['init_size']\n\n self.file_op_progress['task_index'] = t_idx+1\n self.file_op_progress['title'] = title\n self.file_op_progress['file_num'] = len(src_list)\n self.file_op_progress['done_file'] = 0\n\n if op_mode in ['move', 'copy']:\n\n if not os.path.exists(trg_path):\n os.mkdir(trg_path)\n\n for i, file_path in enumerate(src_list):\n _, ext = os.path.splitext(file_path)\n new_name = str(i + idx_offset).zfill(self.FILE_NUMBER_OF_DIGITS)\n new_path = os.path.join(trg_path, new_name)\n if ext == '.gif':\n thum_dir = os.path.join(trg_path, '__thumbnail__')\n if not os.path.exists(thum_dir):\n os.mkdir(thum_dir)\n self._gif_to_zip(file_path, new_path, thum_dir)\n fsize = os.path.getsize(new_path + '.zip') / (1024*1024)\n values_dict['Size'] += fsize \n else:\n # TODO: 上書き操作は仮で禁止にしている\n new_path = new_path + ext\n if not os.path.exists(new_path):\n if op_mode == 'move':\n shutil.move(file_path, new_path)\n elif op_mode == 'copy':\n shutil.copyfile(file_path, new_path)\n fsize = os.path.getsize(new_path) / (1024*1024)\n values_dict['Size'] += fsize\n values_dict['FileNum'] += 1\n self.file_op_progress['done_file'] += 1\n\n # 処理速度、残り時間予測\n self.file_op_progress['done_size'] += fsize\n process_time = time.time() - start\n if process_time != 0:\n self.file_op_progress['speed'] = self.file_op_progress['done_size']/process_time # [MiB/s]\n\n if self.file_op_progress['speed'] != 0:\n self.file_op_progress['remaining_time'] = (self.file_op_progress['all_size'] - self.file_op_progress['done_size']) / self.file_op_progress['speed']\n\n values_dict['Updated'] = datetime.datetime.now()\n\n elif op_mode == 'delete':\n\n # deleteの場合はindexのリストが渡されている想定\n # renameも兼ねるため、結局trg_path内を全探索する\n\n title_dir, file_list, _ = self.get_file_list(title)\n \n new_favorite = []\n new_chapter = []\n count_new_idx = 0\n for i, fname in enumerate(file_list):\n\n idx_str, ext = os.path.splitext(fname) # インデックスと拡張子に分解\n idx = int(idx_str)\n\n fpath = os.path.join(title_dir, fname)\n\n if ext == '.zip':\n thum_name = idx_str + '.png'\n thum_path = os.path.join(title_dir, '__thumbnail__', thum_name)\n\n if idx in src_list:\n fsize = os.path.getsize(fpath) / (1024*1024)\n os.remove(fpath)\n if ext == '.zip':\n os.remove(thum_path)\n values_dict['FileNum'] -= 1\n values_dict['Size'] -= fsize\n self.file_op_progress['done_file'] += 1\n else:\n if idx != count_new_idx:\n new_idx_str = str(count_new_idx).zfill(self.FILE_NUMBER_OF_DIGITS)\n new_path = os.path.join(title_dir, new_idx_str + ext)\n os.rename(fpath, new_path)\n if ext == '.zip':\n new_thum_path = os.path.join(title_dir, '__thumbnail__', new_idx_str + '.png')\n os.rename(thum_path, new_thum_path)\n if idx in ftask['init_favorite']:\n new_favorite.append(count_new_idx)\n if idx in ftask['init_chapter']:\n new_chapter.append(count_new_idx)\n count_new_idx += 1\n \n values_dict['favorite'] = sorted(new_favorite)\n values_dict['chapter'] = sorted(new_chapter)\n\n # データベース情報更新\n self.update_record(title, values_dict=values_dict)\n self.logger.debug('Copy Operation Finished')\n\n def _delete_titles(self, titles):\n\n for title in titles:\n del_path = os.path.join(self.data_dir, title)\n if os.path.exists(del_path):\n shutil.rmtree(del_path)\n\n self.logger.debug('Delete Operation Finished')\n\n\n def file_op_is_alive(self):\n return self.file_op_thread.is_alive()\n\n def get_file_op_progress(self):\n return copy.deepcopy(self.file_op_progress)\n\n def get_items(self, col_name, title=None, convert=False):\n \"\"\"\n col_namesのリストに対するSELECT結果(tuple)を返す。\n covertをTrueにすると、tupleから変換して返す。\n  ・col_nameがstrならlist\n  ・col_nameがlistならcol_nameをkeyとしたdict\n \"\"\"\n\n if type(col_name) is list:\n sql = 'SELECT {} FROM MainTable'.format(','.join(col_name))\n elif type(col_name) is str:\n sql = 'SELECT {} FROM MainTable'.format(col_name)\n else:\n return []\n\n if not title is None:\n sql += ' WHERE Title=\"{}\"'.format(title)\n\n self.logger.debug(sql)\n self.cursor.execute(sql)\n\n result = self.cursor.fetchall()\n\n if convert:\n if type(col_name) is list:\n tmp_dict = {}\n for cl in col_name:\n tmp_dict[cl] = []\n for row in result:\n for i, cl in enumerate(col_name):\n tmp_dict[cl].append(self._convert_list(row[i]))\n return tmp_dict\n elif type(col_name) is str:\n tmp_list = []\n for row in result:\n tmp_list += self._convert_list(row[0])\n return tmp_list\n else:\n return result\n\n def get_titles(self, filter_option={}, init_chars=[], enable_or=False, sort_option=('Updated',True)):\n\n col_names = ['Title', 'FileNum', 'IsFavorite', 'InitialCharacter'] + list(filter_option.keys())\n\n sort_order = 'DESC' if sort_option[1] else 'ASC'\n sql = 'SELECT {} FROM MainTable ORDER BY {} {}'.format(','.join(col_names), sort_option[0], sort_order)\n ret = self.cursor.execute(sql)\n \n title_list = []\n for row in ret.fetchall():\n\n if len(init_chars) != 0:\n if not row[3] in init_chars:\n continue\n\n if len(filter_option.keys()) == 0:\n flag = True\n else: \n flag = False if enable_or else True\n for i in range(4, len(col_names)):\n # 値取得\n ref_val = self._convert_list(row[i])\n key = col_names[i]\n f_val = filter_option[key]\n for fv in f_val:\n if enable_or:\n flag |= (fv in ref_val)\n else:\n flag &= (fv in ref_val)\n if flag:\n title_list.append((row[0],row[1],row[2]))\n\n return title_list\n\n def get_file_list(self, title):\n\n file_dir = os.path.join(self.data_dir, title)\n\n if not os.path.exists(file_dir):\n return '', []\n\n #登録時に拡張子はフィルタされるのでファイルすべてを取得しているが、早くなるかは不明\n #tmp_list = sorted(glob.glob(os.path.join(file_dir, \"*\")))\n #file_list = [os.path.basename(r) for r in tmp_list]\n\n tmp_list, file_size = mutl.search_files(file_dir, self.SUPPORTED_EXT)\n #tmp_list = sorted(list(chain.from_iterable([glob.glob(os.path.join(file_dir, \"*.\" + ext)) for ext in self.SUPPORTED_EXT])))\n \n file_list = [os.path.basename(r) for r in tmp_list]\n\n return file_dir, file_list, file_size\n\n\n # 追加項目操作\n def get_additional_table(self):\n \"\"\"\n ユーザ作成のタグ一覧の取得\n (= MainTable以外のテーブル名の一覧)\n \"\"\"\n table_list = []\n self.cursor.execute(\"select * from sqlite_master where type='table'\")\n for x in self.cursor.fetchall():\n if not x[1] == 'MainTable':\n table_list.append(x[1])\n return table_list\n\n def add_tag(self, table, values_dict):\n \"\"\"\n 新規タグを追加する\n tagはMainTable内に存在するタグ種別名\n valuesは追加するタグ内容のリスト[Name, InitialCharacter]\n Nameが重複した場合はFalseリターン\n \"\"\"\n if not 'Name' in values_dict.keys():\n return False\n elif self.tag_is_exist(table, values_dict['Name']):\n return False\n\n try:\n sql_tmp, sql_values = self._convert_dict4sql(values_dict)\n sql = 'insert into {} {}'.format(table, sql_tmp)\n #sql = 'insert into {}(Name, InitialCharacter, Link, IsFavorite, Image) VALUES(?,?,?,?,?)'.format(table)\n self.logger.debug('{} {}'.format(sql, sql_values))\n self.cursor.execute(sql, sql_values)\n self.connection.commit()\n\n return True\n except:\n return False\n\n def update_tag(self, table, tag_name, values_dict):\n \"\"\"\n タグの情報を変更する。\n \"\"\"\n if 'Name' in values_dict.keys():\n if self.tag_is_exist(table, values_dict['Name']):\n return False\n\n data_list = []\n\n for key, value in values_dict.items():\n\n if type(value) is list:\n tmp_val = LIST_DELIMITER.join([str(i) for i in value])\n else:\n tmp_val = value\n\n if key == 'Image':\n # valueが空欄の場合は削除指令\n \n im_path = self.get_tag_image(table,tag_name)\n try:\n os.remove(im_path)\n except:\n self.logger.debug('Failed to remove file. {}'.format(im_path))\n finally:\n if value == '':\n tmp_val = ''\n else:\n tmp_val = self._asign_tag_image(table, value, tag_name)\n\n data_list.append('{0}=\"{1}\"'.format(key,tmp_val))\n\n sql_set_data = ','.join(data_list)\n sql = 'UPDATE {0} SET {1} WHERE Name=\"{2}\"'.format(table, sql_set_data, tag_name)\n\n try:\n self.logger.debug(sql)\n self.cursor.execute(sql)\n self.connection.commit()\n except:\n return False\n\n # タグ名を変更した場合 - MainTableにも反映する\n if 'Name' in values_dict.keys():\n old_name = tag_name\n new_name = values_dict['Name']\n update_list = [] # tuple(Title, NewList)\n\n self._update_tag_image(table, old_name, new_name)\n\n sql = 'SELECT Title,{} FROM MainTable'.format(table)\n ret = self.cursor.execute(sql)\n\n for row in ret.fetchall():\n title = row[0]\n tag_list = self._convert_list(row[1])\n if old_name in tag_list:\n new_list = [new_name if t == old_name else t for t in tag_list]\n update_list.append((title,{table:copy.deepcopy(new_list)}))\n \n for ul in update_list:\n self.update_record(title=ul[0],values_dict=ul[1])\n\n return True\n\n def delete_tags(self, table, names):\n\n for name in names:\n sql = 'DELETE FROM {} WHERE Name=\"{}\"'.format(table, name)\n\n im_path = self.get_tag_image(table, name)\n if im_path != '':\n try:\n os.remove(im_path)\n except:\n self.logger.debug('Failed to remove file. {}'.format(im_path))\n\n self.logger.debug(sql)\n self.cursor.execute(sql)\n self.connection.commit()\n\n # MainTableも変更しに行く\n update_list = [] # tuple(Title, NewList)\n sql = 'SELECT Title,{} FROM MainTable'.format(table)\n ret = self.cursor.execute(sql)\n\n for row in ret.fetchall():\n title = row[0]\n tmp_list = self._convert_list(row[1])\n\n if name in tmp_list:\n tmp_list.remove(name)\n update_list.append((title,{table:copy.deepcopy(tmp_list)}))\n for ul in update_list:\n self.update_record(title=ul[0],values_dict=ul[1])\n\n return\n\n def tag_is_exist(self, table, name):\n sql = 'SELECT COUNT(*) FROM {} WHERE Name=\"{}\"'.format(table, name)\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n return result[0][0] != 0\n\n def get_tag_image(self, table, name):\n sql = 'SELECT Image FROM {} WHERE Name=\"{}\"'.format(table, name)\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n if result[0][0] == '':\n return ''\n else:\n im_name = '{}{}'.format(name, result[0][0])\n return os.path.join(self.tag_dir, table, im_name)\n\n\n def _asign_tag_image(self, table, src_path, tag_name):\n\n tmp_path = os.path.join(self.tag_dir, table)\n if not os.path.exists(tmp_path):\n os.mkdir(tmp_path)\n\n _, ext = os.path.splitext(src_path)\n new_name = '{}{}'.format(tag_name, ext)\n\n try:\n shutil.copyfile(src_path, os.path.join(tmp_path, new_name))\n except:\n self.logger.debug('Failed to copy file. {}'.format(src_path))\n\n return ext\n\n def _update_tag_image(self, table, old_name, new_name):\n\n sql = 'SELECT Image FROM {} WHERE Name=\"{}\"'.format(table, new_name)\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n if result[0][0] == '':\n return\n\n ext = result[0][0]\n\n old_im_path = os.path.join(self.tag_dir, table, '{}{}'.format(old_name, ext))\n new_im_path = os.path.join(self.tag_dir, table, '{}{}'.format(new_name, ext))\n\n try:\n os.rename(old_im_path, new_im_path)\n except:\n self.logger.debug('Failed to change name. {} -> {}'.format(old_im_path, new_im_path))\n\n\n def get_tag_list(self, tag):\n sql = 'SELECT * FROM {}'.format(tag)\n self.cursor.execute(sql)\n return self.cursor.fetchall()\n\n def get_tag_items_with_num(self, tag_table, init_chars=[]):\n \"\"\"\n タグ内容に該当する項目数をカウントして返す。\n リスト項目があるためfor文ループをしている\n \"\"\"\n sql = 'SELECT Name,InitialCharacter,IsFavorite FROM {}'.format(tag_table)\n\n tag_info = {}\n self.cursor.execute(sql)\n for row in self.cursor.fetchall():\n if len(init_chars) != 0:\n if row[1] in init_chars:\n tag_info[row[0]] = [0, row[2]]\n else:\n tag_info[row[0]] = [0, row[2]]\n\n sql = 'SELECT {} FROM MainTable'.format(tag_table)\n self.cursor.execute(sql)\n for row in self.cursor.fetchall():\n item_list = self._convert_list(row[0])\n for il in item_list:\n if il in tag_info.keys():\n tag_info[il][0] += 1\n\n return tag_info\n \n def get_tag_items(self, table, col_name, name=None, convert=False):\n \"\"\"\n get_itemのタグ用のテーブル版\n \"\"\"\n\n if type(col_name) is list:\n sql = 'SELECT {} FROM {}'.format(','.join(col_name), table)\n elif type(col_name) is str:\n sql = 'SELECT {} FROM {}'.format(col_name, table)\n else:\n return []\n\n if not name is None:\n sql += ' WHERE Name=\"{}\"'.format(name)\n\n self.logger.debug(sql)\n ret = self.cursor.execute(sql)\n\n if convert:\n if type(col_name) is list:\n tmp_dict = {}\n for cl in col_name:\n tmp_dict[cl] = []\n for row in ret.fetchall():\n for i, cl in enumerate(col_name):\n tmp_dict[cl].append(self._convert_list(row[i]))\n return tmp_dict\n elif type(col_name) is str:\n tmp_list = []\n for row in ret.fetchall():\n tmp_list += self._convert_list(row[0])\n return tmp_list\n else:\n return ret.fetchall()\n\n\n def tag_backup(self):\n \n date_str = datetime.datetime.now().strftime('%Y%m%d%H%M')\n save_dir = os.path.join(self.db_root, self.BACKUP_DIR, date_str)\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n\n table_list = self.get_additional_table()\n for table in table_list:\n fname = os.path.join(save_dir, table + '.csv')\n with open(fname, \"w\", encoding=self.FILE_ENCODING) as write_file:\n writer = csv.writer(write_file, lineterminator='\\n')\n sql = 'SELECT Name,InitialCharacter,Link,IsFavorite FROM {}'.format(table)\n for row in self.cursor.execute(sql):\n writer.writerow(list(row))\n #write_file.write(write_txt)\n\n def add_tag_batch(self, table, path):\n \"\"\"\n CSVファイルを使ってタグを追加する.\n [Name, InitialCharacter, Link, IsFavoriteの順]\n \"\"\"\n success_tags = []\n\n if not os.path.exists(path):\n return success_tags\n\n with open(path, \"r\", encoding=self.FILE_ENCODING) as read_file:\n reader = csv.reader(read_file)\n\n for row in reader:\n\n num = len(row)\n if num < 2:\n continue\n elif (row[0] == '') | (row[1] == ''):\n continue\n \n values_dict = {\n 'Name':row[0],\n 'InitialCharacter':row[1],\n 'Link':'' if num < 3 else row[2],\n 'IsFavorite':'' if num < 4 else row[3],\n 'Image':''\n }\n\n if self.add_tag(table, values_dict):\n success_tags.append(row[0])\n\n return success_tags\n\n def show_all(self):\n sql = 'SELECT * FROM MainTable'\n ret = self.cursor.execute(sql)\n for row in ret.fetchall():\n print(row)\n\n def _convert_dict4sql(self, input_dict):\n \"\"\"\n 辞書形式のデータをSQLのVALUES文に対応させる。\n プレースホルダー型。\n \"\"\"\n \n key_list = []\n tmp_list = []\n values = []\n\n for key, value in input_dict.items():\n key_list.append(key)\n tmp_list.append('?')\n\n if type(value) is list:\n values.append(LIST_DELIMITER.join([str(i) for i in value]))\n else:\n values.append(value)\n\n key_list_str = ','.join(key_list)\n tmp_list_str = ','.join(tmp_list)\n\n sql = '({0}) values ({1})'.format(key_list_str, tmp_list_str)\n sql_values = tuple(values)\n\n return sql, sql_values\n\n def _convert_list(self, src):\n if type(src) is str:\n tmp_list = src.split(LIST_DELIMITER)\n return tmp_list\n else:\n return list([src])\n\n def _gif_to_zip(self, src_gif, dst_dir, thum_dir):\n \n if not os.path.exists(dst_dir):\n os.mkdir(dst_dir)\n\n gif = Image.open(src_gif)\n for i, f in enumerate(ImageSequence.Iterator(gif)):\n if i == 0:\n thum_name = os.path.basename(dst_dir) + '.png'\n thum_path = '{}/{}'.format(thum_dir, thum_name)\n f.save(thum_path)\n \n img_name = str(i).zfill(3) + \".png\"\n img_path = '{}/{}'.format(dst_dir, img_name)\n #print(img_path)\n f.save(img_path)\n\n \"\"\"\n thum_name = os.path.basename(dst_dir)\n for f in frames:\n name = '{}/{}{}'.format(thum_dir, thum_name, '.gif')\n f.save(name)\n break\n \"\"\"\n shutil.make_archive(dst_dir, 'zip', dst_dir)\n shutil.rmtree(dst_dir)\n","sub_path":"databasemanager.py","file_name":"databasemanager.py","file_ext":"py","file_size_in_byte":43165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"551647723","text":"from flask import Flask\nfrom flask import request\nfrom flask import jsonify\nfrom flask import Response\nfrom flask import abort\nfrom flask_cors import CORS\nfrom weather import weather\nfrom api_errors import *\nimport datetime\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\ncors = CORS(app)\n\n@app.errorhandler(ApiError)\ndef handle_api_error(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n@app.route('/api/historical', methods=['GET'])\ndef historical():\n dates = weather.get_historical()\n date_dict_array = [{'DATE':date} for date in dates]\n return jsonify(date_dict_array), 200\n\n@app.route('/api/historical/', methods=['GET'])\ndef get_date(date):\n validate_date(date)\n date_info = weather.get_date(date)\n return jsonify(date_info), 200\n\n@app.route('/api/historical', methods=['POST'])\ndef add_date():\n content = request.get_json()\n if 'DATE' not in content or 'TMAX' not in content or 'TMIN' not in content:\n raise ApiError('A parameter is missing.', status_code=400)\n\n validate_date(content['DATE'])\n date_text = content['DATE']\n\n tmax = 0.0\n try:\n tmax = float(content['TMAX'])\n except ValueError:\n raise ApiError('TMAX cannot be converted to a float.', status_code=400)\n\n tmin = 0.0\n try:\n tmin = float(content['TMIN'])\n except ValueError:\n raise ApiError('TMIN cannot be converted to a float.', status_code=400)\n \n date = weather.add_date(date_text, tmax, tmin)\n return jsonify({'DATE':date['DATE']}), 201\n\n@app.route('/api/forecast/', methods=['GET'])\ndef get_forecast(start_date):\n validate_date(start_date)\n forecast = weather.get_forecast(start_date)\n return jsonify(forecast), 200\n\n@app.route('/api/historical/', methods=['DELETE'])\ndef delete_date(date):\n validate_date(date)\n weather.delete_date(date)\n return ('', 204)\n\ndef validate_date(date_text):\n if type(date_text) is not str:\n raise ApiError(\"Incorrect data type.\", 400)\n try:\n if date_text != datetime.datetime.strptime(date_text, r'%Y%m%d').strftime(r'%Y%m%d'):\n raise ValueError\n except ValueError:\n raise ApiError(\"Incorrect date format, should be YYYYMMDD.\", 400)\n\n# Start flask server when calling on the file\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=False, port=80)\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"170353737","text":"from datetime import timedelta\nfrom functools import update_wrapper\nfrom flask import Flask, json, render_template, request, redirect, jsonify, request, make_response\nfrom catboost import CatBoostRegressor\nfrom flask.globals import current_app\nfrom flask.helpers import make_response\nfrom flask_sqlalchemy import SQLAlchemy\nimport sqlite3\nfrom flask_cors import CORS\nfrom Python_algo.support import user_details\n\n\nimport pandas as pd\nimport numpy as np\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///burnout.db'\ndb = SQLAlchemy(app)\nCORS(app)\n\ndb_file = \"burnout.db\"\n\n\n# Entry class for Db stuff\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n emp_id = db.Column(db.String(25), unique=True, nullable=False)\n date = db.Column(db.String(20), nullable=False)\n Gender = db.Column(db.String(6), nullable=False)\n company = db.Column(db.String(10), nullable=False)\n WFH = db.Column(db.String(10), nullable=False)\n Designation = db.Column(db.Float, nullable=False)\n Resource_Allocation = db.Column(db.Float, nullable=False)\n\n def __repr__(self):\n return f\"User('{self.emp_id}', '{self.date}', '{self.Gender}', {self.Designation}, {self.Resource_Allocation})\"\n\n\nclass bout(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n emp_id = db.Column(db.String(25), nullable=False)\n Burn = db.Column(db.Float, nullable=False)\n\n def __repr__(self):\n return f\"User('{self.emp_id}', {self.fat})\"\n\n\ndef check_user_db(emp_id):\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n names = cur.execute(\n \"SELECT emp_id FROM User WHERE emp_id='{emp_id}'\".format(emp_id=emp_id))\n nam = names.fetchall()\n for i in nam:\n if(i[0] == emp_id):\n return True\n else:\n return False\n \n\n\nfrom_file = CatBoostRegressor()\nmodel = from_file.load_model(\"model.pkl\")\n\n\n@app.route('/')\ndef hello_world():\n return render_template(\"index.html\")\n\n@app.route('/error')\ndef error():\n return render_template(\"404.html\")\n\n\n@app.route('/login', methods=['POST'])\ndef signup():\n req = request.form \n\n emp1 = str(req.get('employee_id'))\n Men = float(req.get('mental_fatigue'))\n print(emp1, Men)\n respo = \"You are not a user, Please sign up\"\n\n\n details = user_details(emp1, Men)\n burnout = model.predict(details)\n \n if (check_user_db(emp1)):\n try:\n entry = bout(emp_id=emp1, Burn=burnout)\n db.session.add(entry)\n db.session.commit()\n\n return str(burnout)\n except Exception as e:\n return str(burnout)\n else:\n return respo\n\n\n@app.route('/submit', methods=['POST'])\ndef submit():\n if request.method == 'POST':\n\n emp = request.form['Employee Id']\n date_emp = (request.form['date'])\n Gender = int(request.form['Gender'])\n Com = int(request.form['Company type'])\n WFH = int(request.form['WFH'])\n Des = float(request.form['Designation'])\n Res = float(request.form['Resorce Allocation'])\n \n if (not check_user_db(emp)):\n if(Gender == 0):\n G=\"Female\"\n else:\n G=\"Male\"\n if Com == 0:\n c = \"Service\"\n else:\n c = \"Product\"\n if WFH == 0:\n w = \"Not WFH\"\n else:\n w = \"WFH\"\n # try:\n entry = User(emp_id=emp, date=date_emp, Gender=G, company=c,\n WFH=w, Designation=Des, Resource_Allocation=Res)\n db.session.add(entry)\n db.session.commit()\n\n return redirect(\"/\")\n \n\n else:\n return \"You are already registered! Please login!!\"\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"609939523","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.utils.translation import gettext_lazy as __\nfrom django.shortcuts import redirect, reverse\nfrom django.shortcuts import render\nfrom django.db import transaction\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django import forms\n\nfrom bioresources.models.Resource import Collaboration\nfrom bioseq.models.Taxon import Taxon\n\n\n@login_required\ndef submission_start(request):\n return render(request, 'submission/submission_start.html', {})\n\nfrom django_select2.forms import ModelSelect2Widget\n\nclass TaxSelect(ModelSelect2Widget):\n def label_from_instance(self,obj):\n return str(obj.scientific_name())\n\nclass TaxChoiceField(forms.ChoiceField):\n def valid_value(self, value):\n return True\n def clean(self, value):\n value = super(self.__class__,self).clean(value)\n if value:\n return Taxon.objects.get(taxon_id=value)\n return None\n\ndef form_clean_data(form):\n cleaned_data = super(form.__class__, form).clean()\n qs = form._meta.model.objects.filter(name=cleaned_data[\"name\"])\n # if cleaned_data[\"ncbi_tax\"]:\n # form.data[\"ncbi_tax\"] = Taxon.objects.get(taxon_id=cleaned_data[\"ncbi_tax\"])\n if \"pk\" in form.data:\n if qs.exclude(id=form.data[\"pk\"]).exists():\n form._errors['name'] = form._errors.get('name', [])\n form._errors['name'].append(__(\"%s already exists\") % cleaned_data[\"name\"])\n else:\n if qs.exists():\n form._errors['name'] = form._errors.get('name', [])\n form._errors['name'].append(__(\"%s already exists\") % cleaned_data[\"name\"])\n\n\ndef submit_model(form_class, request):\n if request.method == 'POST':\n if \"pk\" in request.GET:\n obj = form_class._meta.model.objects.get(id=request.GET[\"pk\"])\n form = form_class(request.POST, instance=obj)\n else:\n form = form_class(request.POST)\n\n if form.is_valid():\n with transaction.atomic():\n obj = form.save()\n if not Collaboration.objects.filter(resource=obj, person=request.user.person).exists():\n Collaboration.objects.create(resource=obj, person=request.user.person,\n type=Collaboration.COLLABORATION_TYPES.owner)\n return HttpResponseRedirect(reverse(\"bioresources:\" + obj.type_name() + \"_view\", args=[obj.id]))\n else:\n if \"pk\" in request.GET:\n resource = form_class._meta.model.objects.get(id=request.GET[\"pk\"])\n form = form_class(instance=resource)\n else:\n form = form_class()\n\n data = {'form': form}\n if \"pk\" in request.GET:\n data[\"pk\"] = request.GET[\"pk\"]\n\n return render(request, 'submission/tool_submission.html', data)\n","sub_path":"bioresources/views/submission/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"399392188","text":"import random\nimport discord\nfrom discord.ext import commands\n\nfrom .inputs import cl, cf, chill, cfe, ur\nfrom .utils import COLOR\n\n\nclass Coffee(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command(aliases=['ask_out'])\n async def wannagrabacoffee(self, ctx, *, member: discord.Member):\n '''Wanna ask someone out on coffee'''\n embed = discord.Embed(\n title=f'{member}, Someone wants to grab a coffee with you...*wink *wink',\n color=COLOR.DEFAULT)\n embed.add_field(name='This happened....', value=f'{random.choice(cf)}')\n embed.set_footer(text='not actually')\n await ctx.send(embed=embed)\n\n @commands.command(aliases=['brew'])\n async def coffee(self, ctx):\n '''A lovely coffee command (sip, sip)'''\n op = f'{random.choice(cfe)}'\n embed = discord.Embed(title='Coffee',\n description=op,\n color=COLOR.DEFAULT)\n embed.set_footer(\n text=f'Caffeiene Level-{random.choice(cl)}.{random.choice(chill)}')\n embed.set_image(url=random.choice(ur))\n await ctx.send(embed=embed)\n\n\ndef setup(client):\n client.add_cog(Coffee(client))\n","sub_path":"bot/cogs/coffee_cog.py","file_name":"coffee_cog.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"271681671","text":"\"\"\"\n Show how to use a sprite backed by a graphic.\n \n Sample Python/Pygame Programs\n Simpson College Computer Science\n http://programarcadegames.com/\n http://simpson.edu/computer-science/\n \n Explanation video: http://youtu.be/vRB_983kUMc\n\"\"\"\n \nimport pygame\n \n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nBROWN = (255, 64, 0)\nSILVER = (192, 192, 192)\n\n\ndef christmas_tree_ball(screen, x = 0, y = 0, size=5):\n pygame.draw.circle(screen, SILVER, (x, y), size)\n\n\ndef christmas_tree(screen, x=0, y=0):\n pygame.draw.rect(screen, BROWN, (40 + x, 30 + y, 20, 100))\n pygame.draw.polygon(screen, GREEN, [(0 + x, 110 + y), (100 + x, 110 + y), (50 + x, 50 + y)])\n pygame.draw.polygon(screen, GREEN, [(10 + x, 80 + y), (90 + x, 80 + y), (50 + x, 30 + y)])\n pygame.draw.polygon(screen, GREEN, [(20 + x, 50 + y), (80 + x, 50 + y), (50 + x, 0 + y)])\n christmas_tree_ball(screen, 30 + x, 70 + y)\n christmas_tree_ball(screen, 50 + x, 60 + y)\n christmas_tree_ball(screen, 58 + x, 25 + y)\n christmas_tree_ball(screen, 40 + x, 100 + y)\n christmas_tree_ball(screen, 80 + x, 75 + y)\n\n\ndef dog(screen, x=0, y=0):\n # The dogs body\n pygame.draw.rect(screen, BROWN, (30 + x, 20 + y, 70, 30))\n # The dogs head\n pygame.draw.rect(screen, BROWN, (10 + x, 10 + y, 20, 20))\n # The dogs legs\n pygame.draw.rect(screen, BROWN, (40 + x, 50 + y, 10, 30))\n pygame.draw.rect(screen, BROWN, (80 + x, 50 + y, 10, 30))\n # The dogs tail\n pygame.draw.polygon(screen, BROWN, [(100 + x, 20 + y), (100 + x, 25 + y), (130 + x, 5 + y), (130 + x, 0 + y)])\n # The dogs ears\n pygame.draw.polygon(screen, BROWN, [(17 + x, 10 + y), (27 + x, 10 + y), (22 + x, 0 + y)])\n # The dogs snout\n pygame.draw.rect(screen, BROWN, (0 + x, 20 + y, 15, 7))\n # The dogs eyes\n pygame.draw.circle(screen, BLACK, (22 + x, 15 + y), 5)\n pygame.draw.circle(screen, BLUE, (21 + x, 16 + y), 3)\n\n\npygame.init()\n \n# Set the width and height of the screen [width, height]\nsize = (700, 500)\nscreen = pygame.display.set_mode(size)\n \npygame.display.set_caption(\"My Game\")\n \n# Loop until the user clicks the close button.\ndone = False\n \n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n\n# Game Controller\n# Current position\nx_coord = 10\ny_coord = 10\n\n# Count the joysticks the computer has\njoystick_count = pygame.joystick.get_count()\nif joystick_count == 0:\n # No joysticks!\n print(\"Error, I didn't find any joysticks!\")\nelse:\n # Use joystick #0 and initialise it.\n my_joystick = pygame.joystick.Joystick(0)\n my_joystick.init()\n \n# -------- Main Program Loop -----------\nwhile not done:\n # --- Main event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n \n # --- Game logic should go here\n pos = pygame.mouse.get_pos()\n mouse_x = pos[0]\n mouse_y = pos[1]\n\n button_clicked = pygame.MOUSEBUTTONDOWN\n\n\n # As long as there is a joystick:\n if joystick_count != 0:\n # This gets the position of the axis on the game controller.\n # It returns a number between -1.0 and +1.0\n horiz_axis_pos = my_joystick.get_axis(2)\n vert_axis_pos = my_joystick.get_axis(3)\n\n # Move x according to the axis. We multiply by 10 to speed up the movement.\n # Convert to an integer because we can't draw at pixel 3.5, just 3 or 4.\n if x_coord < 565 and x_coord > 0:\n x_coord = x_coord + int(horiz_axis_pos * 10)\n elif x_coord >=565 and horiz_axis_pos <= 0:\n x_coord = x_coord + int(horiz_axis_pos * 10)\n elif x_coord <= 0 and horiz_axis_pos >= 0:\n x_coord = x_coord + int(horiz_axis_pos * 10)\n if y_coord <= 420 and y_coord > 0:\n y_coord = y_coord + int(vert_axis_pos * 10)\n elif y_coord >= 420 and vert_axis_pos <= 0:\n y_coord = y_coord + int(vert_axis_pos * 10)\n elif y_coord <= 0 and vert_axis_pos >= 0:\n y_coord = y_coord + int(vert_axis_pos * 10)\n\n # --- Drawing code should go here\n \n # First, clear the screen to white. Don't put other drawing commands\n # above this, or they will be erased with this command.\n screen.fill(WHITE)\n christmas_tree(screen, mouse_x, mouse_y)\n dog(screen, x_coord, y_coord)\n \n # --- Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n \n # --- Limit to 60 frames per second\n clock.tick(60)\n \n# Close the window and quit.\n# If you forget this line, the program will 'hang'\n# on exit if running from IDLE.\npygame.quit()","sub_path":"10_controllers_and_graphics/lab_10_controllers_and_graphics.py","file_name":"lab_10_controllers_and_graphics.py","file_ext":"py","file_size_in_byte":4616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"548472020","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2008 Zuza Software Foundation\n#\n# This file is part of Spelt.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see .\n\nfrom lxml import etree, objectify\nfrom StringIO import StringIO\n\nfrom spelt.models.xml_model import XMLModel\n\nclass TestXMLModel:\n \"\"\"Unit test for XMLModel class.\"\"\"\n\n xml = StringIO(\"\"\"\n \n 182\n 70.5\n \n \n \"\"\")\n\n def __init__(self):\n self.model = XMLModel(\n 'person',\n values=['height', 'weight', 'notes'],\n attribs=['sex', 'race'],\n elem=objectify.parse(TestXMLModel.xml).getroot()\n )\n\n def test_from_xml(self):\n \"\"\"\n Test that XMLModel.from_xml() works by checking that members are assigned\n according to the hard-coded values represented in xml.\n \"\"\"\n assert self.model.sex == 'male'\n assert self.model.race == 'chinese'\n assert self.model.height == '182'\n assert self.model.weight == '70.5'\n assert self.model.notes == ''\n\n def test_to_xml(self):\n \"\"\"\n Test that XMLModel.to_xml() works by comparing the source\n lxml.objectify.ObjectifiedElement used to create a XMLModel and the element\n returned by to_xml().\n \"\"\"\n toroot = self.model.elem\n\n assert self.model.sex == toroot.get('sex')\n assert self.model.race == toroot.get('race')\n assert float(self.model.height) == float(toroot.height)\n assert float(self.model.weight) == float(toroot.weight)\n assert self.model.notes == toroot.notes\n\nif __name__ == '__main__':\n test = TestXMLModel()\n test.test_from_xml()\n test.test_to_xml()\n","sub_path":"spelt/models/test_xml_model.py","file_name":"test_xml_model.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"47875305","text":"import pygame\nimport datetime\nimport maingui\npygame.init()\ndisplay_width = 800\ndisplay_height = 480\nblack = (255,255,255)\nsingle_click=True\nDisplay = pygame.display.set_mode((display_width,display_height))\nclock = pygame.time.Clock()\ndef close():\n maingui.main_window()\ndef wallpaper_window():\n single_click=True\n click = pygame.mouse.get_pressed()\n background = pygame.image.load(\"/home/pi/Desktop/package/snowboy/icon/wallpaper.jpg\").convert()\n font = pygame.font.Font(\"/home/pi/Desktop/package/snowboy/icon/COMIC.TTF\",50)\n currentime = datetime.datetime.time(datetime.datetime.now())\n Date=datetime.date.today().strftime(\"%A\")[:3]+\" /\"+datetime.date.today().strftime(\"%B\")[:3]+\" /\"+str(datetime.date.today().strftime(\"%d\"))\n time1= font.render(currentime.strftime(\"%I:%M %p\"), 1, black)\n date=font.render(Date, True, black)\n Display.blit(background,[0,0])\n while True: \n for event in pygame.event.get():\n \n if event.type == pygame.QUIT:\n close()\n elif event.type==pygame.MOUSEBUTTONDOWN:\n close()\n Display.blit(background,[0,0])\n Display.blit(time1,[280,10])\n Display.blit(date,[260,65])\n pygame.display.update()\n clock.tick(5)\nif __name__ == \"__main__\":\n wallpaper_window()\n","sub_path":"snowboy/wallpaper.py","file_name":"wallpaper.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"487484585","text":"import torch\r\nfrom torch import nn\r\n\r\n\r\nclass VGG(nn.Module):\r\n def __init__(self, mode):\r\n super(VGG, self).__init__()\r\n\r\n if mode == 'motion':\r\n self.block1 = nn.Sequential(\r\n nn.Conv1d(in_channels=20, out_channels=320, kernel_size=3, padding=1, padding_mode='reflect', groups=20),\r\n nn.LeakyReLU(),\r\n nn.Conv1d(in_channels=320, out_channels=32, kernel_size=3, padding=1, padding_mode='reflect'),\r\n nn.BatchNorm1d(32),\r\n nn.LeakyReLU(),\r\n )\r\n if mode == 'music':\r\n self.block1 = nn.Sequential(\r\n nn.Conv1d(in_channels=46, out_channels=46*8, kernel_size=3, padding=1, padding_mode='reflect', groups=46),\r\n nn.LeakyReLU(),\r\n nn.Conv1d(in_channels=46*8, out_channels=32, kernel_size=3, padding=1, padding_mode='reflect'),\r\n nn.BatchNorm1d(32),\r\n nn.LeakyReLU(),\r\n )\r\n\r\n self.block2 = nn.Sequential(\r\n nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3, padding=1, padding_mode='reflect'),\r\n nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3, padding=1, padding_mode='reflect'),\r\n nn.BatchNorm1d(32),\r\n nn.LeakyReLU(),\r\n nn.AvgPool1d(kernel_size=2, stride=2)\r\n )\r\n self.block3 = nn.Sequential(\r\n nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3, padding=1, padding_mode='reflect'),\r\n nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3, padding=1, padding_mode='reflect'),\r\n nn.BatchNorm1d(32),\r\n nn.LeakyReLU(),\r\n nn.AvgPool1d(kernel_size=2, stride=2)\r\n )\r\n self.block4 = nn.Sequential(\r\n nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3, padding=1, padding_mode='reflect'),\r\n nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3, padding=1, padding_mode='reflect'),\r\n nn.BatchNorm1d(32),\r\n nn.LeakyReLU(),\r\n nn.AvgPool1d(kernel_size=2, stride=2)\r\n )\r\n self.block5 = nn.Sequential(\r\n nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3, padding=1, padding_mode='reflect'),\r\n nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3, padding=1, padding_mode='reflect'),\r\n nn.BatchNorm1d(32),\r\n nn.LeakyReLU(),\r\n nn.AvgPool1d(kernel_size=2, stride=2)\r\n )\r\n\r\n def forward(self, input):\r\n h1 = self.block1(input.transpose(1, 2))\r\n h2 = self.block2(h1)\r\n h3 = self.block3(h2)\r\n h4 = self.block4(h3)\r\n #h5 = self.block5(h4)\r\n\r\n return h4\r\n\r\n def features(self, input):\r\n h1 = self.block1(input.transpose(1, 2))\r\n h2 = self.block2(h1)\r\n h3 = self.block3(h2)\r\n h4 = self.block4(h3)\r\n #h5 = self.block5(h4)\r\n\r\n return [h1, h2, h3, h4]\r\n\r\n\r\nclass M2SNet(nn.Module):\r\n\r\n def __init__(self):\r\n super(M2SNet, self).__init__()\r\n\r\n self.music_encoder = VGG('music')\r\n self.motion_encoder = VGG('motion')\r\n\r\n self.music_fc = nn.Linear(32,16)\r\n self.motion_fc = nn.Linear(32,16)\r\n self.fues_out_fc = nn.Sequential(\r\n nn.Linear(32,8),\r\n nn.ReLU(),\r\n nn.Linear(8,1),\r\n nn.Sigmoid()\r\n )\r\n\r\n self.l2norm = nn.MSELoss()\r\n\r\n def forward(self, x, y):\r\n hx = self.music_encoder(x)\r\n hx = self.music_fc(hx.transpose(1,2))\r\n hy = self.motion_encoder(y)\r\n hy = self.motion_fc(hy.transpose(1,2))\r\n\r\n h_fuse = torch.cat([hx,hy],dim=2)\r\n out = self.fues_out_fc(h_fuse)\r\n\r\n return self.l2norm(hx, hy), out\r\n\r\n def features(self, x, y):\r\n x_features = self.music_encoder.features(x)\r\n y_features = self.motion_encoder.features(y)\r\n\r\n return x_features, y_features\r\n\r\nif __name__ == '__main__':\r\n\r\n from dataset import *\r\n\r\n import seaborn as sns\r\n import numpy as np\r\n import pandas as pd\r\n import matplotlib.pyplot as plt\r\n\r\n training_set = ConductorDataset(sample_length=1000, dataset_dir='..\\dataset\\\\', sample_limit=None,mode='low')\r\n train_loader = DataLoader(dataset=training_set, batch_size=1, shuffle=False, pin_memory=False)\r\n\r\n M2SNet = torch.load('..\\checkpoints/low_AMC_globalstep225000.pt')\r\n # M2SNet = torch.load('..\\checkpoints/high_AMC_globalstep300000.pt')\r\n\r\n for step, (batch_x, batch_y) in enumerate(train_loader):\r\n var_x, var_y = batch_x.cuda(), batch_y.cuda()\r\n hxs,hys = M2SNet.features(var_x,var_y)\r\n\r\n var_x = var_x.transpose(1,2)\r\n var_y = var_y.transpose(1,2)\r\n print(step,var_x.size(),var_y.size())\r\n feature_maps = []\r\n\r\n\r\n feature_maps.append(var_y.cpu().detach().numpy()[0,:,:])\r\n for i in range(len(hys)):\r\n feature_maps.append(hys[i].cpu().detach().numpy()[0,:,:])\r\n\r\n feature_maps.append(var_x.cpu().detach().numpy()[0,:,:])\r\n for i in range(len(hxs)):\r\n feature_maps.append(hxs[i].cpu().detach().numpy()[0,:,:])\r\n\r\n for i in range(len(feature_maps)):\r\n # sns.heatmap(feature_maps[i], cmap=\"YlGnBu\", xticklabels=False, yticklabels=False, cbar=False)\r\n plt.matshow(feature_maps[i], cmap=plt.get_cmap('plasma'))\r\n plt.axis('off')\r\n plt.savefig('..\\\\EXP\\\\low\\\\'+'{}_{}.png'.format(step,i), bbox_inches='tight', transparent=True,pad_inches=0)\r\n plt.close()\r\n exit()\r\n","sub_path":"models/M2SNet.py","file_name":"M2SNet.py","file_ext":"py","file_size_in_byte":5562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"115946087","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nimport json\nfrom urllib.parse import urlencode\nimport logging\n\nfrom wandoujia.items import WandoujiaItem\n\n\n# 把日志输出到文件内\nlogging.basicConfig(filename=\"wandoujia.log\",filemode=\"w\",level=logging.DEBUG,\n format=\"%(asctime)s %(message)s\",datefmt=\"%Y/%m/%d %I/%M/%S %p\")\nlogging.warning(\"warn message\")\nlogging.error(\"error message\")\n\n\nclass WandouSpider(scrapy.Spider):\n name = 'wandou'\n allowed_domains = ['www.wandoujia.com']\n start_urls = ['http://www.wandoujia.com/']\n\n def __init__(self):\n # 软件分类页面\n self.cate_url = \"https://www.wandoujia.com/category/app\"\n # 小分类,需主分类和子分类编号\n self.url = \"https://www.wandoujia.com/category/\"\n # ajax 请求url,需参数\n self.ajax_url = \"https://www.wandoujia.com/wdjweb/api/category/more?\"\n # 实例化分类标签\n self.wandou_category = Get_category()\n\n\n def start_requests(self):\n yield scrapy.Request(self.cate_url,callback=self.get_category)\n\n def get_category(self,response):\n cate_content = self.wandou_category.parse_category(response)\n for item in cate_content:\n child_cate = item[\"child_cate_codes\"]\n for cate in child_cate:\n cate_code = item[\"cate_code\"] # 5029\n cate_name = item[\"cate_name\"] # 影音播放\n child_cate_code = cate[\"child_cate_code\"] # 716\n child_cate_name = cate[\"child_cate_name\"] # 视频\n\n page = 1\n logging.debug(\"正在爬取:%s-%s 第 %s 页\" % (cate_name, child_cate_name, page))\n\n if page == 1:\n # 构造首页url\n category_url = '{}{}_{}'.format(self.url,cate_code,child_cate_code)\n else:\n params ={\n 'catId':cate_code,\n 'subCatId':child_cate_code,\n 'page':page,\n 'ctoken': 'kamD4KvHwl9PwHYkn3CsZomD'\n }\n category_url = self.ajax_url + urlencode(params)\n\n dict = {'page': page, 'cate_name': cate_name, 'cate_code': cate_code,\n 'child_cate_name': child_cate_name, 'child_cate_code': child_cate_code}\n\n yield scrapy.Request(category_url,callback=self.parse,meta=dict)\n\n def parse(self,response):\n if len(response.body) >= 100:\n page = response.meta['page']\n cate_name = response.meta['cate_name']\n cate_code = response.meta['cate_code']\n child_cate_name = response.meta['child_cate_name']\n child_cate_code = response.meta['child_cate_code']\n\n if page == 1:\n contents = response\n else:\n jsonresponse = json.loads(response.body_as_unicode())\n contents = jsonresponse['data']['content']\n contents = scrapy.Selector(text=contents,type=\"html\")\n\n contents = contents.css('.card')\n for content in contents:\n item = WandoujiaItem()\n item['cate_name'] = cate_name\n item['child_cate_name'] = child_cate_name\n item['app_name'] = self.clean_name(content.css('.name::text').extract_first())\n item['install'] = content.css('.install-count::text').extract_first()\n item['volume'] = content.css('.meta span:last-child::text').extract_first()\n item['comment'] = content.css('.comment::text').extract_first().strip()\n item['icon_url'] = self.get_icon_url(content.css('.icon-wrap a img'),page)\n yield item\n\n page += 1\n params = {\n 'catId':cate_code,\n 'subCatId':child_cate_code,\n 'page':page,\n 'ctoken': 'kamD4KvHwl9PwHYkn3CsZomD'\n }\n ajax_url = self.ajax_url + urlencode(params)\n\n dict = {'page': page, 'cate_name': cate_name, 'cate_code': cate_code,\n 'child_cate_name': child_cate_name, 'child_cate_code': child_cate_code}\n\n yield scrapy.Request(ajax_url, callback=self.parse, meta=dict)\n\n # 去除不能用于文件命名的特殊字符\n def clean_name(self,name):\n pattern = re.compile(r'[\\/\\\\\\:\\*\\?\\\"\\<\\>\\|]')\n name = re.sub(pattern, '', name)\n return name\n\n def get_icon_url(self,item,page):\n if page == 1:\n if item.css('::attr(\"src\")').extract_first().startswith('https'):\n url = item.css('::attr(\"src\")').extract_first()\n else:\n url = item.css('::attr(\"data-original\")').extract_first()\n else:\n url = item.css('::attr(\"data-original\")').extract_first()\n return url\n\n\n# 获得主分类和子分类的编号\nclass Get_category():\n def parse_category(self,response):\n category = response.css(\".parent-cate\")\n data = [{\n \"cate_name\": item.css(\".cate-link::text\").extract_first(),\n \"cate_code\": self.get_category_code(item),\n \"child_cate_codes\": self.get_child_category(item)\n } for item in category]\n return data\n\n # 提取主分类编号\n def get_category_code(self,item):\n cate_url = item.css('.cate-link::attr(\"href\")').extract_first()\n pattern = re.compile(r\".*/(\\d+)\")\n cate_code = re.search(pattern,cate_url)\n return cate_code.group(1)\n\n\n # 获取所有子分类标签数值编码\n def get_child_category(self,item):\n child_cate = item.css('.child-cate a')\n child_cate_url = [{\n \"child_cate_name\": child.css('::text').extract_first(),\n \"child_cate_code\": self.get_child_category_code(child)\n } for child in child_cate]\n return child_cate_url\n\n # 提取子分类的编号\n def get_child_category_code(self,child):\n child_cate_url = child.css('::attr(\"href\")').extract_first()\n pattern = re.compile(r'.*_(\\d+)')\n child_cate_code = re.search(pattern,child_cate_url)\n return child_cate_code.group(1)\n\n\n\n\n\n\n\n\n\n","sub_path":"Scrapy+阿布云多级页面爬取豌豆荚/wandoujia/wandoujia/spiders/wandou.py","file_name":"wandou.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"139456717","text":"from nltk.parse.corenlp import CoreNLPDependencyParser, CoreNLPParser\nfrom nltk.stem.wordnet import WordNetLemmatizer\nimport sqlite3,os\nfrom sqlite3 import Error\nimport sjuman2_spate505 as part1\n\n#--- Lambda function to return Noun and generate appropriate where clause for that root word\ndef returnNounReps(ner_tag,l):\n\tmovie=\"\"\n\tlocation=\"\"\n\tperson=\"\"\n\ttitle=\"\"\n\ttag=[ner_tag[i][1] for i in range(0,len(ner_tag)) if ner_tag[i][0]==l[0]]\n\tif len(tag)==0:\n\t\ttag='X'\n\telse:\n\t\ttag=tag[0]\n\tif tag=='PERSON':\n\t\tif person!=\"\":\n\t\t\tperson+=\" and \"\n\t\tperson+=\" lower(p.name) like '%\" + str(l[0]).lower() + \"%'\" \n\telif tag=='LOCATION':\n\t\tlocation=\" lower(p.pob) like '%\" + str(l[0]).lower() + \"%'\"\n\telif tag=='NATIONALITY':\n\t\tplace=nationality_mapping[l[0].upper()]\n\t\tlocation=\" lower(p.pob) like '%\" + str(place).lower() + \"%'\"\n\telif tag=='TITLE':\n\t\ttitle=l2[0].lower()\n\telse:\n\t\tif movie!=\"\":\n\t\t\tmovie+=\" and \"\n\t\tmovie+=\" lower(m.name) like '%\" + str(l[0]).lower() + \"%'\"\n\treturn person,location,title,movie\n\n\n#-- Generate the clauses to be used in the query\ndef getClause(parse,ner_tag):\n\tnationality_mapping={\"ITALIAN\": \"ITALY\", \"GERMAN\": \"GERMANY\", \"FRENCH\":\"FRANCE\", \"BRITISH\": \"UK\", \"AMERICAN\": \"USA\" }\n\tacts=['act','star']\n\tperson=\"\"\t\n\tlocation=\"\"\n\tmovie=\"\"\n\ttitle=\"\"\n\ttitle1=\"\"\n\tdirect='N'\n\tact='N'\n\toscar='N'\n\tosc=\"\"\n\tfrom_clause=\"\"\n\tyear=\"\"\n\tborn=\"\"\n\tmovie_mapping={'movie': 'film', 'film' : 'film', 'picture' : 'film'}\n\tfor l in list(parse.triples()):\n\t\t# Handling the subject and modifiers: Nouns associated with the VERB\n\t\tif 'nsubj' in l[1] or 'nmod' in l[1] or 'compound' in l[1]:\n\t\t\tif 'VB' in l[0][1]:\n\t\t\t\tlm=WordNetLemmatizer().lemmatize(l[0][0],'v')\n\t\t\t\tif lm==\"win\":\n\t\t\t\t\toscar='Y'\n\t\t\t\telif lm=='born':\n\t\t\t\t\tborn='Y'\n\t\t\t\telif lm in ['star','act']:\n\t\t\t\t\tact='Y'\n\t\t\t\telif lm in ['direct']:\n\t\t\t\t\tdirect='Y'\n\t\t\telif 'NNP' in l[0][1]:\n\t\t\t\tper,loc,title,mov=returnNounReps(ner_tag,l[0])\n\t\t\t\tif person!=\"\" and per!=\"\":\n\t\t\t\t\tperson+=\" and \"\n\t\t\t\tperson+=per\n\t\t\t\tif loc!=\"\" and person!=\"\":\n\t\t\t\t\tperson+=\" and \"\n\t\t\t\tperson+=loc\n\t\t\t\tif movie!=\"\" and mov!=\"\":\n\t\t\t\t\tmovie+=\" and \"\n\t\t\t\tmovie+=mov\n\t\t\tif 'NNP' in l[2][1]:\n\t\t\t\tper,loc, title, mov=returnNounReps(ner_tag,l[2])\n\t\t\t\tif person!=\"\" and per!=\"\":\n\t\t\t\t\tperson+=\" and \"\n\t\t\t\tperson+=per\n\t\t\t\tif loc!=\"\" and person!=\"\":\n\t\t\t\t\tperson+=\" and \"\n\t\t\t\tperson+=loc\n\t\t\t\tif movie!=\"\" and mov!=\"\":\n\t\t\t\t\tmovie+=\" and \"\n\t\t\t\tmovie+=mov\n\t\t\tif 'NN' in l[0][1]:\n\t\t\t\ttag=[ner_tag[i][1] for i in range(0,len(ner_tag)) if ner_tag[i][0].lower()==l[0][0].lower()]\n\t\t\t\tif len(tag)==0:\n\t\t\t\t\ttag='X'\n\t\t\t\telse:\n\t\t\t\t\ttag=tag[0]\n\t\t\t\tif tag=='TITLE':\n\t\t\t\t\t\n\t\t\t\t\tif l[0][0].lower() in ['actor','actress','act']:\n\t\t\t\t\t\tact='Y'\n\t\t\t\t\tif l[0][0].lower() in ['director','directors']:\n\t\t\t\t\t\tdirect='Y'\n\t\t# Handling adjectives: finding category for oscars\n\t\tif 'amod' in l[1] or ('nsubj' in l[1] and 'JJ' in l[2][1]) or ('dobj' in l[1]):\n\t\t\tif 'best' in l[2][0].lower():\n\t\t\t\toscar='Y'\n\t\t\t\tword=l[0][0].lower()\n\t\t\t\tif word in ['movie','picture','cinema']:\n\t\t\t\t\tword=\"film\"\n\t\t\t\ttitle1=\"best%\" + word\n\t\t\tif 'dobj' in l[1] and 'best' in title1:\n\t\t\t\ttitle1+=\"%\" +l[2][0].lower()\n\t\t\t\n\t\t# Handling case: by-direct and with-act\n\t\tif 'case' in l[1]:\n\t\t\t\n\t\t\tif l[2][1]=='IN':\n\t\t\t\tif l[2][0].lower()=='with':\n\t\t\t\t\tact='Y'\n\t\t\t\tif l[2][0].lower()=='by':\n\t\t\t\t\tdirect='Y'\n\t\t\tif l[2][1]=='CD' or l[0][1]=='CD':\n\t\t\t\tif l[2][1]=='CD':\n\t\t\t\t\td=l[2][0]\n\t\t\t\telse:\n\t\t\t\t\td=l[0][0]\n\t\t\t\tif born=='Y':\n\t\t\t\t\tif person!=\"\":\n\t\t\t\t\t\tperson+=\" and \"\n\t\t\t\t\tperson+=\" p.dob like '\" + d + \"%'\"\n\t\t\t\tif oscar=='Y':\n\n\t\t\t\t\tif osc!=\"\":\n\t\t\t\t\t\tosc+=\" and \"\n\t\t\t\t\tosc+=\" o.year=\" + d\n\t\t\t\telse:\n\t\t\t\t\tif movie!=\"\":\n\t\t\t\t\t\tmovie+=\" and \"\n\t\t\t\t\tmovie+=\" m.year=\" + d\n\t\n\t\n\tif title1!=\"\": \n\t\tif osc!=\"\":\n\t\t\tosc+=\" and \"\n\t\tosc+=\" lower(o.type) like '%\" + title1 + \"%'\"\n\tfrom_clause=\"\"\n\tif person!=\"\":\n\t\tfrom_clause+=\" person p \"\n\tif movie!=\"\":\n\t\tfrom_clause+=\", movie m \"\n\tif oscar=='Y':\n\t\tfrom_clause+=\", oscar o \"\n\tif act=='Y':\n\t\tfrom_clause+=\", actor a \"\n\tif direct=='Y':\n\t\tfrom_clause+=\", director d \"\n\t\n\treturn from_clause,person,movie,location,oscar,osc,born\n\n# generate the query + base conditions\t\ndef generate_query(dep_parse,ner_tag, parse):\n\tfrom_clause,person,movie,location,oscar,osc,born=getClause(dep_parse,ner_tag)\n\tselect_clause=\"\"\n\tif 'SBAR' not in parse:\n\t\tselect_clause=\"Select case when count(*)>=1 then 'Yes' else 'No' end \"\n\t# Setting base conditions for where clause\n\t\n\twh_query=person + movie + osc + location \n\tif person!=\"\":\n\t\t#wh_query=person \n\t\tif 'actor' in from_clause:\n\t\t\twh_query+=\" and p.id=a.actor_id\"\n\t\telif 'director' in from_clause:\n\t\t\twh_query+= \" and p.id=d.director_id\"\n\t\t\t\n\t\n\tif 'oscar' in from_clause and 'movie' in from_clause:\n\t\twh_query+=\" and m.id=o.movie_id \"\n\t\n\tif 'oscar' in from_clause and 'person' in from_clause:\n\t\twh_query+=\" o.person_id=p.id and \" + osc\n\t\t\n\tif 'movie' in from_clause and 'actor' in from_clause:\n\t\twh_query+=\" a.movie_id=m.id\"\n\t\n\tquery=select_clause + \" from \" + from_clause[1:] + \" where \" + wh_query\n\tprint(query)\n\treturn query\n\t\n\t\nqstn=\"Which album by Swift was released in 2012?\"\nparser = CoreNLPDependencyParser()\n\t\ndepparse = next(parser.raw_parse(qstn))\nfor l in list(depparse.triples()):\n\tprint(l)\nner_tagger = CoreNLPParser(url='http://localhost:9000', tagtype='ner')\nner_tag=ner_tagger.tag(qstn.split())\ngenerate_query(depparse,ner_tag,'S')\n\t\n","sub_path":"sjuman2_spate505/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":5283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"466934035","text":"\r\nfrom flask import Flask, render_template, Markup\r\nimport pandas as pd\r\n\r\npath = 'F:/task4/'\r\ndata = pd.read_json(f'{path}movie-sessions-full.json', encoding = 'UTF-8')\r\ndata = data.values.tolist()\r\n\r\napp = Flask('server')\r\n\r\n@app.route('/')\r\ndef homepage():\r\n full_temp = \"\"\r\n for _3d, cinema_name, distance, film_name, image, price, rating, startTime in data:\r\n full_temp += f\"\"\"\t
\r\n \t\t

{film_name}

\r\n \t\t\t\"Oops!\"\r\n \t\t\t
\r\n \t\t\t\t

{cinema_name}: {rating} баллов

\r\n \t\t\t\t

Расстояние от девушки: {distance} км

\r\n \t\t\t\t\"\"\"\r\n full_temp += \"

Снято в 3D!

\" if _3d else \"\" \r\n full_temp += f\"\"\"

Время начала: {startTime}

\r\n \t\t\t\t

Цена: {price}

\r\n \t\t\t
\r\n \t\t
\r\n \"\"\"\r\n \r\n full_temp = Markup(full_temp)\r\n \r\n return render_template('index.html', elements = full_temp)\r\n\r\napp.run(host = '0.0.0.0')\r\n","sub_path":"Task_4_site/Task4.py","file_name":"Task4.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"428206763","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\"\"\"\n序号:44\n题目:翻转单词顺序列\n\n题目描述:\n牛客最近来了一个新员工 Fish,\n每天早晨总是会拿着一本英文杂志,写些句子在本子上。\n同事 Cat 对 Fish 写的内容颇感兴趣,有一天他向 Fish 借来翻看,\n但却读不懂它的意思。例如,“student. a am I”。\n后来才意识到,这家伙原来把句子单词的顺序翻转了,\n正确的句子应该是 \"I am a student.\"。\nCat 对一一的翻转这些单词顺序可不在行,你能帮助他么?\n\n时间限制:1秒 空间限制:32768K\n本题知识点:字符串,知识迁移能力\n\"\"\"\n\nimport sys\nimport time\n\n\nclass Solution:\n @staticmethod\n def reverse_sentence(s):\n if s is None or len(s) <= 0:\n return ''\n\n # 本题默认用空格分开单词\n if s.find(' ') >= 0:\n # 如果有空格,通过 split 分开单词\n s_list = s.split(' ')\n # 翻转列表顺序\n s_list.reverse()\n # 加入空格合成新字符串\n return ' '.join(s_list)\n else:\n # 如果 s 中没有空格分开,那就直接输出 s\n return s\n\n\ndef main():\n solution = Solution()\n\n s = 'student. a am I' # 'I am a student.'\n # s = 'Hello.' # 'Hello.'\n # s = 'Hello. ' # ' Hello.'\n # s = '' # ''\n # s = ' ' # ' '\n # s = ' ' # ' '\n\n start = time.process_time()\n answer = solution.reverse_sentence(s)\n end = time.process_time()\n\n if answer is not None:\n print(answer)\n else:\n print('Answer is None.')\n\n print('Running Time: %.5f ms' % ((end - start) * 1000))\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"Online-Judge/NowCoder/Aim_at_Offer/source_code/Prob_44.py","file_name":"Prob_44.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"448905285","text":"import time\nimport logging\nimport os\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\n\nclass TripCubeWriter:\n def __init__(self, filename, options):\n self.output_corename = filename\n\n def write(self, detector):\n import getpass\n from pymchelper.shieldhit.detector.detector_type import SHDetType\n from pymchelper import __version__ as _pmcversion\n # TODO add printing information how to install pytrip if it's missing\n from pytrip import __version__ as _ptversion\n\n pixel_size_x = (detector.xmax - detector.xmin) / detector.nx\n pixel_size_z = (detector.zmax - detector.zmin) / detector.nz\n\n logging.debug(\"psx: {:.6f} [cm]\".format(pixel_size_x))\n logging.debug(\"psz: {:.6f} [cm]\".format(pixel_size_z))\n\n _patient_name = \"Anonymous\"\n _created_by = getpass.getuser()\n _creation_info = \"Created with pymchelper {:s}; using PyTRiP98 {:s}\".format(_pmcversion,\n _ptversion)\n\n if detector.dettyp == SHDetType.dose:\n\n from pytrip import dos\n\n cube = dos.DosCube()\n # Warning: PyTRiP cube dimensions are in [mm]\n cube.create_empty_cube(\n 1.0, detector.nx, detector.ny, detector.nz,\n pixel_size=pixel_size_x * 10.0,\n slice_distance=pixel_size_z * 10.0)\n\n # .dos dose cubes are usually in normalized integers,\n # where \"1000\" equals 100.0 % dose.\n # The next are also the defaults, but just to be clear\n # this is specifially set.\n cube.data_type = \"integer\"\n cube.num_bytes = 2\n cube.pydata_type = np.int16\n\n cube.cube = detector.data.reshape(detector.nx, detector.ny, detector.nz)\n\n if detector.tripdose >= 0.0 and detector.tripntot > 0:\n cube.cube = (cube.cube * detector.tripntot * 1.602e-10) / detector.tripdose * 1000.0\n else:\n cube.cube = (cube.cube / cube.cube.max()) * 1200.0\n\n # Save proper meta information\n cube.patient_name = _patient_name\n cube.created_by = _created_by\n cube.creation_info = _creation_info\n\n cube.write(self.output_corename)\n\n elif detector.dettyp in (SHDetType.dlet, SHDetType.tlet, SHDetType.dletg, SHDetType.tletg):\n\n from pytrip import let\n\n cube = let.LETCube()\n # Warning: PyTRiP cube dimensions are in [mm]\n cube.create_empty_cube(\n 1.0, detector.nx, detector.ny, detector.nz,\n pixel_size=pixel_size_x * 10.0,\n slice_distance=pixel_size_z * 10.0)\n\n # .dosemlet.dos LET cubes are usually in 32 bit floats.\n cube.data_type = \"float\"\n cube.num_bytes = 4\n cube.pydata_type = np.float32\n\n # need to redo the cube, since by default np.float32 are allocated.\n # When https://github.com/pytrip/pytrip/issues/35 is fixed,\n # then this should not be needed.\n cube.cube = np.ones((cube.dimz, cube.dimy, cube.dimx), dtype=cube.pydata_type)\n\n cube.cube = detector.data.reshape(detector.nx, detector.ny, detector.nz)\n cube.cube *= 0.1 # MeV/cm -> keV/um\n # Save proper meta information\n\n cube.patient_name = _patient_name\n cube.created_by = _created_by\n cube.creation_info = _creation_info\n\n cube.write(self.output_corename)\n else:\n logger.error(\"Tripcube target is only allowed with dose- or LET-type detectors.\")\n raise Exception(\"Illegal detector for tripcube.\")\n\n\nclass TripDddWriter(object):\n\n _ddd_header_template = \"\"\"!filetype ddd\n!fileversion {fileversion:s}\n!filedate {filedate:s}\n!projectile {projectile:s}\n!material {material:s}\n!composition {composition:s}\n!density {density:f}\n!energy {energy:f}\n# z[g/cm**2] dE/dz[MeV/(g/cm**2)] FWHM1[g/cm**2] factor FWHM2[g/cm**2]\n!ddd\n\"\"\"\n\n def __init__(self, filename, options):\n\n import matplotlib\n matplotlib.use('Agg')\n self.ddd_filename = filename\n self.energy_MeV = options.energy\n self.ngauss = options.ngauss\n self.verbosity = options.verbose\n if not self.ddd_filename.endswith(\".ddd\"):\n self.ddd_filename += \".ddd\"\n self.outputdir = os.path.abspath(os.path.dirname(self.ddd_filename))\n\n def write(self, detector):\n from pymchelper.shieldhit.detector.detector_type import SHDetType\n\n if detector.dettyp == SHDetType.ddd:\n\n # extract data from detector data\n self._extract_data(detector)\n\n # in order to avoid fitting data to noisy region far behind Bragg peak tail,\n # find the range of z coordinate which containes (1-threshold) of the deposited energy\n threshold = 3e-3\n cum_dose = self._cumulative_dose()\n cum_dose_left = self._cumulative_dose_left(cum_dose)\n\n thr_ind = cum_dose_left.size - np.searchsorted(cum_dose_left[::-1], threshold) - 1\n z_fitting_cm_1d = self.z_data_cm_1d[:thr_ind]\n dose_fitting_MeV_g_1d = self.dose_data_MeV_g_1d[:thr_ind]\n\n r_fitting_cm_2d, z_fitting_cm_2d = np.meshgrid(self.r_data_cm_1d, z_fitting_cm_1d)\n dose_fitting_MeV_g_2d = self.dose_data_MeV_g_2d[0:thr_ind]\n\n logger.info(\"Plotting 1..\")\n if self.verbosity > 0:\n self._pre_fitting_plots(\n cum_dose_left=cum_dose_left,\n z_fitting_cm_1d=z_fitting_cm_1d,\n dose_fitting_MeV_g_1d=dose_fitting_MeV_g_1d,\n threshold=threshold,\n zmax_cm=z_fitting_cm_1d[-1])\n\n self._plot_2d_map(z_fitting_cm_2d, r_fitting_cm_2d, dose_fitting_MeV_g_2d, z_fitting_cm_1d)\n\n logger.info(\"Fitting...\")\n fwhm1_cm_data = np.zeros_like(z_fitting_cm_1d)\n fwhm2_cm_data = np.zeros_like(z_fitting_cm_1d)\n weight_data = np.zeros_like(z_fitting_cm_1d)\n dz0_MeV_cm_g_data = np.zeros_like(z_fitting_cm_1d)\n fwhm1_cm_error_data = np.zeros_like(z_fitting_cm_1d)\n fwhm2_cm_error_data = np.zeros_like(z_fitting_cm_1d)\n weight_error_data = np.zeros_like(z_fitting_cm_1d)\n dz0_MeV_cm_g_error_data = np.zeros_like(z_fitting_cm_1d)\n if self.ngauss in (1, 2):\n # for each depth fit a lateral beam with gaussian models\n for ind, z_cm in enumerate(z_fitting_cm_1d):\n\n dose_at_z = self.dose_data_MeV_g_2d[ind]\n\n # take into account only this position in r for which dose is positive\n r_fitting_cm = self.r_data_cm_1d[dose_at_z > 0]\n dose_fitting_1d_positive_MeV_g = dose_at_z[dose_at_z > 0]\n\n # perform the fit\n params, params_error = self._lateral_fit(r_fitting_cm,\n dose_fitting_1d_positive_MeV_g,\n z_cm,\n self.energy_MeV,\n self.ngauss)\n\n fwhm1_cm, factor, fwhm2_cm, dz0_MeV_cm_g = params\n fwhm1_cm_error, factor_error, fwhm2_cm_error, dz0_MeV_cm_g_error = params_error\n fwhm1_cm_data[ind] = fwhm1_cm\n dz0_MeV_cm_g_data[ind] = dz0_MeV_cm_g\n fwhm1_cm_error_data[ind] = fwhm1_cm_error\n dz0_MeV_cm_g_error_data[ind] = dz0_MeV_cm_g_error\n if self.ngauss == 2:\n fwhm2_cm_data[ind] = fwhm2_cm # set to 0 in case ngauss = 1\n weight_data[ind] = factor # set to 0 in case ngauss = 1\n fwhm2_cm_error_data[ind] = fwhm2_cm_error\n weight_error_data[ind] = factor_error\n\n logger.info(\"Plotting 2...\")\n if self.verbosity > 0 and self.ngauss in (1, 2):\n self._post_fitting_plots(z_fitting_cm_1d,\n dose_fitting_MeV_g_1d,\n dz0_MeV_cm_g_data,\n fwhm1_cm_data,\n fwhm2_cm_data,\n weight_data,\n dz0_MeV_cm_g_error_data,\n fwhm1_cm_error_data,\n weight_error_data,\n fwhm2_cm_error_data)\n self._plot_2d_map(\n z_fitting_cm_2d,\n r_fitting_cm_2d,\n dose_fitting_MeV_g_2d,\n z_fitting_cm_1d,\n fwhm1_cm_data,\n fwhm2_cm_data,\n weight_data,\n dz0_MeV_cm_g_data,\n suffix='_fwhm')\n\n logger.info(\"Writing \" + self.ddd_filename)\n\n # prepare header of DDD file\n header = self._ddd_header_template.format(\n fileversion='19980520',\n filedate=time.strftime('%c'), # Locale's appropriate date and time representation\n projectile='C',\n material='H20',\n composition='H20',\n density=1,\n energy=self.energy_MeV)\n\n # write the contents of the files\n with open(self.ddd_filename, 'w') as ddd_file:\n ddd_file.write(header)\n # TODO write to DDD gaussian amplitude, not the dose in central bin\n if self.ngauss == 2:\n for z_cm, dose, fwhm1_cm, weight, fwhm2_cm in zip(z_fitting_cm_1d, dose_fitting_MeV_g_1d,\n fwhm1_cm_data, weight_data, fwhm2_cm_data):\n ddd_file.write('{:g} {:g} {:g} {:g} {:g}\\n'.format(z_cm, dose, fwhm1_cm, weight, fwhm2_cm))\n elif self.ngauss == 1:\n for z_cm, dose, fwhm_cm in zip(z_fitting_cm_1d, dose_fitting_MeV_g_1d, fwhm1_cm_data):\n ddd_file.write('{:g} {:g} {:g}\\n'.format(z_cm, dose, fwhm_cm))\n elif self.ngauss == 0:\n for z_cm, dose in zip(z_fitting_cm_1d, dose_fitting_MeV_g_1d):\n ddd_file.write('{:g} {:g}\\n'.format(z_cm, dose))\n\n def _extract_data(self, detector):\n # 2D arrays of r,z and dose\n self.r_data_cm_2d = np.array(list(detector.x)).reshape(detector.nz, detector.nx)\n self.z_data_cm_2d = np.array(list(detector.z)).reshape(detector.nz, detector.nx)\n self.dose_data_MeV_g_2d = np.array(detector.v).reshape(detector.nz, detector.nx)\n\n self.dose_error_MeV_g_2d = np.array(detector.error).reshape(detector.nz, detector.nx)\n\n # 1D arrays of r,z and dose in the very central bin\n self.r_data_cm_1d = self.r_data_cm_2d[0] # middle points of the bins\n self.z_data_cm_1d = np.asarray(list(detector.z)[0:detector.nz * detector.nx:detector.nx])\n\n # np.savez(\"data\", r2d=self.r_data_cm_2d, z2d=self.z_data_cm_2d, d2d=self.dose_data_MeV_g_2d,\n # r1d=self.r_data_cm_1d, z1d=self.z_data_cm_1d, e2d=self.dose_error_MeV_g_2d)\n\n bin_depth_z_cm = self.z_data_cm_1d[1] - self.z_data_cm_1d[0]\n r_step_cm = self.r_data_cm_1d[1] - self.r_data_cm_1d[0]\n\n # i-th bin volume = dz * pi * (r_i_max^2 - r_i_min^2 )\n # r_i_max = r_i + dr / 2\n # r_i_min = r_i - dr / 2\n # r_i_max^2 - r_i_min^2 = (r_i_max - r_i_min)*(r_i_max + r_i_min) = dr * 2 * r_i thus\n # i-th bin volume = 2 * pi * dr * r_i * dz\n bin_volume_data_cm3_1d = 2.0 * np.pi * r_step_cm * self.r_data_cm_1d * bin_depth_z_cm\n # we assume density of 1 g/c3\n density_g_cm3 = 1.0\n total_bin_mass_g = density_g_cm3 * bin_depth_z_cm * np.pi * (self.r_data_cm_1d[-1] + r_step_cm/2.0)**2\n energy_in_bin_MeV_2d = self.dose_data_MeV_g_2d * bin_volume_data_cm3_1d * density_g_cm3\n total_energy_at_depth_MeV_1d = np.sum(energy_in_bin_MeV_2d, axis=1)\n self.dose_data_MeV_g_1d = total_energy_at_depth_MeV_1d / total_bin_mass_g\n\n def _cumulative_dose(self):\n cumsum = np.cumsum(self.dose_data_MeV_g_1d)\n cumsum /= np.sum(self.dose_data_MeV_g_1d)\n return cumsum\n\n def _cumulative_dose_left(self, cumsum):\n cum_dose_left = np.array(cumsum)\n cum_dose_left *= -1.0\n cum_dose_left += 1.0\n return cum_dose_left\n\n def _plot_2d_map(self,\n z_fitting_cm_2d,\n r_fitting_cm_2d,\n dose_fitting_MeV_g2d,\n z_fitting_cm_1d=None,\n fwhm1_cm=None,\n fwhm2_cm=None,\n weight=None,\n dz0_MeV_cm_g_data=None,\n suffix=''):\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n from matplotlib.colors import LogNorm\n\n prefix = os.path.join(self.outputdir, 'ddd_{:3.1f}MeV_'.format(self.energy_MeV))\n plt.pcolormesh(\n z_fitting_cm_2d, r_fitting_cm_2d, dose_fitting_MeV_g2d, norm=LogNorm(), cmap='gnuplot2', label='dose')\n cbar = plt.colorbar()\n cbar.set_label(\"dose [MeV/g]\", rotation=270, verticalalignment='bottom')\n if z_fitting_cm_1d is not None and np.any(fwhm1_cm):\n plt.plot(z_fitting_cm_1d, fwhm1_cm, color='g', label=\"fwhm1\")\n if z_fitting_cm_1d is not None and np.any(fwhm2_cm):\n plt.plot(z_fitting_cm_1d, fwhm2_cm, color='r', label=\"fwhm2\")\n\n # plot legend only if some of the FWHM 1-D overlays are present\n # adding legend to only pcolormesh plot will result in a warning about missing labels\n if z_fitting_cm_1d is not None and (np.any(fwhm1_cm) or np.any(fwhm2_cm)):\n plt.legend(loc=0)\n plt.xlabel(\"z [cm]\")\n plt.ylabel(\"r [cm]\")\n plt.xlim((z_fitting_cm_2d.min(), z_fitting_cm_2d.max()))\n if np.any(fwhm1_cm) and np.any(fwhm2_cm):\n plt.ylim((r_fitting_cm_2d.min(), max(max(fwhm1_cm), max(fwhm2_cm))))\n plt.clim((1e-8 * dose_fitting_MeV_g2d.max(), dose_fitting_MeV_g2d.max()))\n out_filename = prefix + 'dosemap' + suffix + '.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n if self.verbosity > 1:\n plt.yscale('log')\n out_filename = prefix + 'dosemap_log' + suffix + '.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n plt.close()\n\n if self.verbosity > 2 and (np.any(fwhm1_cm) or np.any(fwhm2_cm)):\n # TODO add plotting sum of 2 gausses\n sigma1_cm = fwhm1_cm / 2.354820045\n sigma2_cm = fwhm2_cm / 2.354820045\n gauss_amplitude_MeV_g = dz0_MeV_cm_g_data\n for z_cm, sigma1_at_z_cm, sigma2_at_z_cm, factor, amplitude_MeV_g in \\\n zip(z_fitting_cm_1d, sigma1_cm, sigma2_cm, weight, gauss_amplitude_MeV_g):\n dose_mc_MeV_g = self.dose_data_MeV_g_2d[self.z_data_cm_2d == z_cm]\n title = \"Z = {:4.3f} cm, sigma1 = {:4.3f} cm\".format(z_cm, sigma1_at_z_cm)\n plt.plot(self.r_data_cm_1d, dose_mc_MeV_g, 'k.', label=\"data\")\n if self.ngauss == 1:\n gauss_data_MeV_g = self.gauss_MeV_g(self.r_data_cm_1d, amplitude_MeV_g, sigma1_at_z_cm)\n plt.plot(self.r_data_cm_1d, gauss_data_MeV_g, label=\"fit\")\n elif self.ngauss == 2:\n gauss_data_MeV_g = self.gauss2_MeV_g(self.r_data_cm_1d, amplitude_MeV_g,\n sigma1_at_z_cm, factor, sigma2_at_z_cm)\n gauss_data_MeV_g_1st = self.gauss2_MeV_g_1st(self.r_data_cm_1d, amplitude_MeV_g,\n sigma1_at_z_cm, factor, sigma2_at_z_cm)\n gauss_data_MeV_g_2nd = self.gauss2_MeV_g_2nd(self.r_data_cm_1d, amplitude_MeV_g,\n sigma1_at_z_cm, factor, sigma2_at_z_cm)\n plt.plot(self.r_data_cm_1d, gauss_data_MeV_g, label=\"fit\")\n plt.plot(self.r_data_cm_1d, gauss_data_MeV_g_1st, label=\"fit 1st gauss\")\n plt.plot(self.r_data_cm_1d, gauss_data_MeV_g_2nd, label=\"fit 2nd gauss\")\n title += \", sigma2 = {:4.3f} cm, factor = {:4.6f}\".format(sigma2_at_z_cm, factor)\n logger.debug(\"Plotting at \" + title)\n plt.title(title)\n plt.legend(loc=0)\n plt.yscale('log')\n plt.xlabel(\"r [cm]\")\n plt.ylabel(\"dose [MeV/g]\")\n plt.ylim([dose_mc_MeV_g.min(), dose_mc_MeV_g.max()])\n if self.ngauss == 1:\n plt.ylim([dose_mc_MeV_g.min(), max(gauss_data_MeV_g.max(), dose_mc_MeV_g.max())])\n out_filename = prefix + \"fit_details_{:4.3f}_log\".format(z_cm) + suffix + '.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n\n plt.xscale('log')\n plt.xlim([0, self.r_data_cm_1d.max()])\n out_filename = prefix + \"fit_details_{:4.3f}_loglog\".format(z_cm) + suffix + '.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n\n plt.xscale('linear')\n plt.xlim([0, 5.0*sigma2_at_z_cm])\n plt.ylim([dose_mc_MeV_g[self.r_data_cm_1d < 5.0*sigma2_at_z_cm].min(), dose_mc_MeV_g.max()])\n out_filename = prefix + \"fit_details_{:4.3f}_small_log\".format(z_cm) + suffix + '.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n\n plt.close()\n\n plt.plot(self.r_data_cm_1d, dose_mc_MeV_g * self.r_data_cm_1d, 'k.', label=\"data\")\n plt.plot(self.r_data_cm_1d, gauss_data_MeV_g * self.r_data_cm_1d, label=\"fit\")\n plt.legend(loc=0)\n plt.ylabel(\"dose * r [MeV cm/g]\")\n plt.ylim([(dose_mc_MeV_g * self.r_data_cm_1d).min(), (dose_mc_MeV_g * self.r_data_cm_1d).max()])\n plt.yscale('log')\n out_filename = prefix + \"fit_details_{:4.3f}_r_log\".format(z_cm) + suffix + '.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n plt.xscale('log')\n out_filename = prefix + \"fit_details_{:4.3f}_r_loglog\".format(z_cm) + suffix + '.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n plt.close()\n\n def _pre_fitting_plots(self, cum_dose_left, z_fitting_cm_1d, dose_fitting_MeV_g_1d, threshold, zmax_cm):\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n prefix = os.path.join(self.outputdir, 'ddd_{:3.1f}MeV_'.format(self.energy_MeV))\n\n plt.plot(self.z_data_cm_1d, self.dose_data_MeV_g_1d, color='blue', label='dose')\n plt.axvspan(\n 0,\n zmax_cm,\n alpha=0.1,\n color='green',\n label=\"fitting area, covers {:g} % of dose\".format(100.0 * (1 - threshold)))\n plt.legend(loc=0)\n plt.xlabel('z [cm]')\n plt.ylabel('dose [a.u.]')\n if self.verbosity > 1:\n out_filename = prefix + 'dose_all.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n plt.yscale('log')\n out_filename = prefix + 'all_log.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n plt.close()\n\n if self.verbosity > 1:\n bp_max_z_pos_cm = self.z_data_cm_1d[self.dose_data_MeV_g_1d == self.dose_data_MeV_g_1d.max()]\n\n plt.semilogy(self.z_data_cm_1d, cum_dose_left, color='blue', label=\"cumulative missing dose\")\n plt.axvspan(\n 0,\n zmax_cm,\n alpha=0.1,\n color='green',\n label=\"fitting area, covers {:g} % of dose\".format(100.0 * (1 - threshold)))\n plt.axhline(threshold, color='black', label='threshold {:g}'.format(threshold))\n plt.axvline(bp_max_z_pos_cm, color='red', label='BP max')\n plt.legend(loc=0)\n plt.xlabel('z [cm]')\n plt.ylabel('fraction of total dose deposited behind z')\n out_filename = prefix + 'dose_frac.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n plt.close()\n\n if self.verbosity > 1:\n plt.plot(z_fitting_cm_1d, dose_fitting_MeV_g_1d, 'b', label='dose')\n plt.xlabel('z [cm]')\n plt.ylabel('dose [MeV/g]')\n plt.yscale('log')\n out_filename = prefix + 'dose_log.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n plt.close()\n\n def _post_fitting_plots(self, z_fitting_cm_1d,\n dose_fitting_MeV_g_1d,\n dz0_MeV_cm_g_data,\n fwhm1_cm_data,\n fwhm2_cm_data,\n weight_data,\n dz0_MeV_cm_g_error_data,\n fwhm1_cm_error_data,\n weight_error_data,\n fwhm2_cm_error_data):\n import matplotlib.pyplot as plt\n prefix = os.path.join(self.outputdir, 'ddd_{:3.1f}MeV_'.format(self.energy_MeV))\n\n # left Y axis dedicated to FWHM, right one to weight\n fig, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n lns1 = ax1.plot(z_fitting_cm_1d, fwhm1_cm_data, 'g', label='fwhm1')\n upper_fwhm1_line = fwhm1_cm_data + fwhm1_cm_error_data\n lower_fwhm1_line = fwhm1_cm_data - fwhm1_cm_error_data\n ax1.fill_between(z_fitting_cm_1d, lower_fwhm1_line, upper_fwhm1_line,\n where=upper_fwhm1_line >= lower_fwhm1_line,\n facecolor='green',\n alpha=0.1,\n interpolate=True)\n if self.ngauss == 2:\n lns2 = ax1.plot(z_fitting_cm_1d, fwhm2_cm_data, 'r', label='fwhm2')\n upper_fwhm2_line = fwhm2_cm_data + fwhm2_cm_error_data\n lower_fwhm2_line = fwhm2_cm_data - fwhm2_cm_error_data\n ax1.fill_between(z_fitting_cm_1d, lower_fwhm2_line, upper_fwhm2_line,\n where=upper_fwhm2_line >= lower_fwhm2_line,\n facecolor='red',\n alpha=0.1,\n interpolate=True)\n\n lns3 = ax2.plot(z_fitting_cm_1d, weight_data, 'b', label='weight')\n upper_weight_line = weight_data + weight_error_data\n lower_weight_line = weight_data - weight_error_data\n ax2.fill_between(z_fitting_cm_1d, lower_weight_line, upper_weight_line,\n where=upper_weight_line >= lower_weight_line,\n facecolor='blue',\n alpha=0.1,\n interpolate=True)\n ax2.set_ylabel('weight of FWHM1')\n ax2.set_ylim([0, 1])\n ax1.set_xlabel('z [cm]')\n ax1.set_ylabel('FWHM [cm]')\n\n # add by hand line plots and labels to legend\n line_objs = lns1\n if self.ngauss == 2:\n line_objs += lns2\n line_objs += lns3\n labels = [l.get_label() for l in line_objs]\n ax1.legend(line_objs, labels, loc=0)\n\n out_filename = prefix + 'fwhm.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n plt.close()\n\n r_step_cm = self.r_data_cm_1d[1] - self.r_data_cm_1d[0]\n r_max_cm = self.r_data_cm_1d[-1] + 0.5 * r_step_cm\n\n # beam model for single gaussian is following:\n # G(r, sigma) = 1 / (2pi sigma) * exp( - 0.5 r^2 / sigma^2)\n # D(z,r) = D(z,0) * G(r, sigma)\n # for double gaussian it is following:\n # D(z,r) = D(z,0) * ( w * G(r, sigma1) + (1-w) * G(r, sigma2))\n #\n # to get depth dose profile D(z) we need to calculate average dose in some volume at depth z\n # (calculating average dose in a subspace separated by two planes at z=z0 and z=z0+dz will lead to zero dose)\n # we cannot use simple arithmetic mean, as we are dealing with cylindrical scoring and bin mass depends on r\n #\n # let rmax be radius of biggest bin in cylindrical scoring\n # we calculate D(z) which will correspond to depth-dose profile measured with ion. chamber of radius rmax\n # it is basically energy E(z) deposited in slice of radius rmax and thickness dz divided by slice mass\n # D(z) = E(z) / m(z) = E(z) / (pi rmax^2 dz rho)\n # energy E(z) is the sum of energy in all cylyndrical shell in a slice and can be calculated as integral\n # thin shell has surface at radius r has surface: 2 pi r dr, thus\n # E(z) = \\int_0^rmax D(r,z) rho dz 2 pi r dr\n # finally:\n # D(z) = \\int_0^rmax D(r,z) rho dz 2 pi r dr / (pi rmax^2 dz rho) which leads to:\n #\n # D(z) = 2 / rmax^2 \\int_0^rmax D(r,z) r dr\n #\n # for single gaussian model this gives:\n #\n # D(z) = 2 / rmax^2 \\int_0^rmax D(z,0) * G(r, sigma) r dr = D(z,0) / rmax^2 \\int_0^rmax G(r, sigma) r dr\n # = D(z,0) / (2 pi sigma rmax^2) \\int_0^rmax exp( - 0.5 r^2 / sigma^2) r dr\n #\n # integral \\int exp( - 0.5 r^2 / sigma^2) r dr is easy to calculate:\n # https://www.wolframalpha.com/input/?i=%5Cint+exp(+-+0.5+r%5E2+%2F+sigma%5E2)+r+dr\n #\n # \\int exp( - 0.5 r^2 / sigma^2) r dr = -sigma^2 exp( - 0.5 r^2 / sigma^2)\n #\n # which leads to\n #\n # \\int_0^rmax exp( - 0.5 r^2 / sigma^2) r dr = sigma^2 ( 1 - exp( - 0.5 rmax^2 / sigma^2))\n #\n # this means depth-dose curve for single gaussian model can be expressed as:\n #\n # D(z) = D(z,0) / (2 pi sigma rmax^2) * sigma^2 ( 1 - exp( - 0.5 rmax^2 / sigma^2))\n #\n # or\n #\n # D(z) = sigma * D(z,0) * ( 1 - exp( - 0.5 rmax^2 / sigma^2)) / (2 pi rmax^2)\n #\n # double gaussian can be calculated in similar way and leads to:\n #\n # D(z) = D(z,0) / (2 pi rmax^2) * ( w * sigma1 * ( 1 - exp( - 0.5 rmax^2 / sigma1^2)) +\n # ( (1-w) * sigma2 * ( 1 - exp( - 0.5 rmax^2 / sigma2^2)))\n #\n\n if self.ngauss == 1:\n sigma1_cm = fwhm1_cm_data / 2.354820045\n # sigma * D(z,0) / (2 pi rmax^2)\n fit_dose_MeV_g = sigma1_cm * dz0_MeV_cm_g_data / (2.0 * np.pi * r_max_cm ** 2)\n # missing ( 1 - exp( - 0.5 rmax^2 / sigma^2))\n fit_dose_MeV_g *= (np.ones_like(sigma1_cm) - np.exp(-0.5 * r_max_cm / sigma1_cm**2))\n plt.plot(z_fitting_cm_1d, fit_dose_MeV_g, 'r', label='dose fit')\n if self.ngauss == 2:\n sigma1_cm = fwhm1_cm_data / 2.354820045\n sigma2_cm = fwhm2_cm_data / 2.354820045\n w = weight_data\n\n # ( w * sigma1 * ( 1 - exp( - 0.5 rmax^2 / sigma1^2))\n fit_dose_MeV_g = w * sigma1_cm * \\\n (np.ones_like(sigma1_cm) - np.exp(-0.5 * r_max_cm / sigma1_cm**2))\n\n # ( (1-w) * sigma2 * ( 1 - exp( - 0.5 rmax^2 / sigma2^2)))\n fit_dose_MeV_g += (np.ones_like(w) - w) * sigma2_cm * \\\n (np.ones_like(sigma2_cm) - np.exp(-0.5 * r_max_cm / sigma2_cm**2))\n\n # D(z,0) / (2 pi rmax^2)\n fit_dose_MeV_g *= dz0_MeV_cm_g_data / (2.0 * np.pi * r_max_cm ** 2)\n plt.plot(z_fitting_cm_1d, fit_dose_MeV_g, 'r', label='dose fit')\n\n plt.plot(z_fitting_cm_1d, dose_fitting_MeV_g_1d, 'b', label='dose MC')\n plt.xlabel('z [cm]')\n plt.ylabel('dose [MeV/g]')\n plt.legend(loc=0)\n out_filename = prefix + 'dose_fit.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n if self.verbosity > 1:\n plt.yscale('log')\n out_filename = prefix + 'dose_fit_log.png'\n logger.info('Saving ' + out_filename)\n plt.savefig(out_filename)\n plt.close()\n\n @classmethod\n def gauss_MeV_g(cls, x_cm, amp_MeV_cm_g, sigma_cm):\n return amp_MeV_cm_g / (2.0 * np.pi * sigma_cm) * np.exp(-x_cm ** 2 / (2.0 * sigma_cm ** 2))\n\n @classmethod\n def gauss_r_MeV_cm_g(cls, x_cm, amp_MeV_cm_g, sigma_cm):\n return cls.gauss_MeV_g(x_cm, amp_MeV_cm_g, sigma_cm) * x_cm\n\n @classmethod\n def gauss2_MeV_g(cls, x_cm, amp_MeV_cm_g, sigma1_cm, weight, sigma2_add_cm):\n return amp_MeV_cm_g / (2.0 * np.pi) * (\n (weight / sigma1_cm) * np.exp(-x_cm ** 2 / (2.0 * sigma1_cm ** 2))\n + ((1.0 - weight) / (sigma1_cm + sigma2_add_cm)) * np.exp(\n -x_cm ** 2 / (2.0 * (sigma1_cm + sigma2_add_cm) ** 2))\n )\n\n @classmethod\n def gauss2_MeV_g_1st(cls, x_cm, amp_MeV_cm_g, sigma1_cm, weight, sigma2_add_cm):\n return amp_MeV_cm_g / (2.0 * np.pi) * (weight / sigma1_cm) * np.exp(-x_cm ** 2 / (2.0 * sigma1_cm ** 2))\n\n @classmethod\n def gauss2_MeV_g_2nd(cls, x_cm, amp_MeV_cm_g, sigma1_cm, weight, sigma2_add_cm):\n return amp_MeV_cm_g / (2.0 * np.pi) * ((1.0 - weight) / (sigma1_cm + sigma2_add_cm)) * np.exp(\n -x_cm ** 2 / (2.0 * (sigma1_cm + sigma2_add_cm) ** 2))\n\n @classmethod\n def gauss2_r_MeV_cm_g(cls, x_cm, amp_MeV_cm_g, sigma1_cm, weight, sigma2_add_cm):\n return cls.gauss2_MeV_g(x_cm, amp_MeV_cm_g, sigma1_cm, weight, sigma2_add_cm) * x_cm\n\n @classmethod\n def _lateral_fit(cls, r_cm, dose_MeV_g, z_cm, energy_MeV, ngauss=2):\n variance = np.average(r_cm ** 2, weights=dose_MeV_g)\n\n starting_amp_MeV_g = dose_MeV_g.max()\n starting_sigma_cm = np.sqrt(variance)\n\n min_amp_MeV_g = 1e-10 * dose_MeV_g.max()\n min_sigma_cm = 1e-2 * starting_sigma_cm\n\n max_amp_MeV_g = 2.0 * dose_MeV_g.max()\n max_sigma_cm = 1e4 * starting_sigma_cm\n\n from scipy.optimize import curve_fit\n\n if ngauss == 1:\n popt, pcov = curve_fit(f=cls.gauss_r_MeV_cm_g,\n xdata=r_cm,\n ydata=dose_MeV_g * r_cm,\n p0=[starting_amp_MeV_g, starting_sigma_cm],\n bounds=([[min_amp_MeV_g, min_sigma_cm], [max_amp_MeV_g, max_sigma_cm]]),\n sigma=None)\n # TODO return also parameter errors\n perr = np.sqrt(np.diag(pcov))\n\n dz0_MeV_cm_g, sigma_cm = popt\n dz0_MeV_cm_g_error, sigma_cm_error = perr\n factor = 0.0\n factor_error = 0.0\n fwhm2_cm = 0.0\n fwhm2_cm_error = 0.0\n\n elif ngauss == 2:\n starting_weigth = 0.99\n starting_sigma2_add_cm = 0.1\n\n min_weigth = 0.55\n min_sigma2_add_cm = 1e-1\n\n max_weigth = 1.0 - 1e-12\n max_sigma2_add_cm = 20.0\n\n popt, pcov = curve_fit(f=cls.gauss2_r_MeV_cm_g,\n xdata=r_cm,\n ydata=dose_MeV_g * r_cm,\n p0=[starting_amp_MeV_g, starting_sigma_cm, starting_weigth, starting_sigma2_add_cm],\n bounds=([min_amp_MeV_g, min_sigma_cm, min_weigth, min_sigma2_add_cm],\n [max_amp_MeV_g, max_sigma_cm, max_weigth, max_sigma2_add_cm]),\n sigma=None)\n # TODO return also parameter errors\n perr = np.sqrt(np.diag(pcov))\n dz0_MeV_cm_g_error, sigma_cm_error, factor_error, sigma2_add_cm_error = perr\n\n dz0_MeV_cm_g, sigma_cm, factor, sigma2_add_cm = popt\n sigma2_cm = sigma_cm + sigma2_add_cm\n sigma2_cm_error = np.sqrt(sigma_cm_error**2 + sigma2_add_cm_error**2)\n fwhm2_cm = sigma2_cm * 2.354820045\n fwhm2_cm_error = sigma2_cm_error * 2.354820045\n\n fwhm1_cm = sigma_cm * 2.354820045\n\n fwhm1_cm_error = sigma_cm_error * 2.354820045\n\n params = fwhm1_cm, factor, fwhm2_cm, dz0_MeV_cm_g\n params_error = fwhm1_cm_error, factor_error, fwhm2_cm_error, dz0_MeV_cm_g_error\n\n return params, params_error\n","sub_path":"pymchelper/writers/trip98.py","file_name":"trip98.py","file_ext":"py","file_size_in_byte":32968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"509199509","text":"n = int(input())\r\ns = []\r\nt = []\r\nfor i in range(n):\r\n S, T = map(str, input().split())\r\n s.append(S)\r\n t.append(T)\r\n\r\nfor i in range(n):\r\n for j in range(i):\r\n if s[i] == s[j] and t[i] == t[j]:\r\n print(\"Yes\")\r\n exit()\r\n\r\nprint(\"No\")","sub_path":"AtCoder/ABC/216/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"291006969","text":"from django.urls import path\nfrom django.views.generic.base import TemplateView\n\nfrom . import views\n\nurlpatterns = [\n path('counsellor/', views.counsellor_home, name='counsellor'),\n path('counsellor/update/', views.update, name='update'),\n path('recent/', views.recentChatroom, name='recent'),\n\n path('waiting_students/', views.available_chatroom, name='waiting_students'),\n\n path('studentCounselling/', views.studentCounselling, name='studentCounselling'),\n path('chatroom//', views.Chat, name='chatroom'),\n path('chatroom_refresh//', views.messagerefresh, name='chatroomrefresh'),\n\n path('student/', TemplateView.as_view(template_name='student.html'), name='student'),\n]\n","sub_path":"portal/chat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"520924099","text":"from selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport time\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\n\ntry:\n # Oldal betöltése\n driver.get(\"https://witty-hill-0acfceb03.azurestaticapps.net/mutant_teams.html\")\n time.sleep(2)\n\n # Függvény az elemek attributumainak és az elvárt, csoport szerinti attributumok összehasonlítására:\n\n\n def hero_team_finder(hero, teams):\n hero_element = driver.find_element_by_id(hero)\n assert hero_element.get_attribute('data-teams') == teams\n\n\n # Angel (aki mindegyik csoport tagja)\n hero_team_finder('angel', 'original force factor hellfire')\n\n # Beast (Aki original és factor)\n hero_team_finder('beast', 'original factor')\n\n # Cyclops\n hero_team_finder('cyclops', 'original force factor')\n\n # És a többi:\n hero_team_finder('emma-frost', 'hellfire')\n hero_team_finder('iceman', 'original factor')\n\n # És így tovább....\n\nfinally:\n driver.close()\n","sub_path":"testproject/mutants.py","file_name":"mutants.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"303814032","text":"from django.test import Client, TestCase\nfrom django.urls import reverse\n\nfrom http import HTTPStatus\n\n\nclass StaticViewsTests(TestCase):\n def setUp(self):\n self.guest_client = Client()\n self.url_template = {\n 'about:author': 'about/author.html',\n 'about:tech': 'about/tech.html'\n }\n\n def test_page_accessible(self):\n for adress in self.url_template.keys():\n with self.subTest(adress=adress):\n response = self.guest_client.get(reverse(adress))\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_page_accessible(self):\n for adress, template in self.url_template.items():\n with self.subTest(adress=adress):\n response = self.guest_client.get(reverse(adress))\n self.assertTemplateUsed(response, template)\n","sub_path":"yatube/about/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"54644153","text":"import os\n\nDEBUG = \"DEBUG\" in os.environ\n\nSECRET_KEY = os.environ.get(\"SECRET_KEY\", \"development\")\n\n# API Auth\nAPI_TOKEN = os.environ.get(\"SECRET_KEY\", \"development\")\n\nENVIRONMENT = os.environ.get(\"ENVIRONMENT\", \"development\")\n\n# DB\nSQLALCHEMY_DATABASE_URI = os.environ.get(\"DATABASE_URL\", \"sqlite:////tmp/test.db\")\n\nif ENVIRONMENT == \"production\":\n SQLALCHEMY_POOL_SIZE = os.environ.get(\"SQLALCHEMY_POOL_SIZE\", 1)\n\n# Test Settings\nif ENVIRONMENT == 'test':\n CSRF_ENABLED = False\n SQLALCHEMY_DATABASE_URI = \"sqlite:////tmp/testing.db\"\n DATABASE_URL = \"sqlite:////tmp/testing.db\"\n TESTING = True\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"355309062","text":"from xml.etree.ElementTree import Element\nfrom .. import TReqz\n\n\nclass reqif_attribute_definition(TReqz.reqif_identifiable):\n\n def __init__(self, content: Element = None, id_dict=None):\n self.default_value: TReqz.reqif_attribute_value = None # element, optional\n self.type: TReqz.reqif_datatype_definition = None # local_ref, required\n self.is_editable: str = None # attribute, optional\n super(reqif_attribute_definition, self).__init__(content, id_dict)\n self.name:str = \"ATTRIBUTE-DEFINITION\"\n\n def decode(self, content: Element, id_dict: TReqz.reqif_id_dict = None):\n super().decode(content, id_dict)\n\n self.is_editable = TReqz.reqif_utils.unescapeAttribute(content.get(\"IS-EDITABLE\"))\n\n def encode(self):\n elem = super().encode()\n elem.tag = self.name\n TReqz.xml_utils.setElementAttribute(\n elem, \"IS-EDITABLE\", TReqz.reqif_utils.escapeAttribute(self.is_editable))\n return elem\n","sub_path":"TReqz/reqif_attribute_definition.py","file_name":"reqif_attribute_definition.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"194014176","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 30 11:49:22 2017\n\n@author: gibraanrahman\n\nScript to cluster t-SNE data for tumor 170410 from DBSCAN identified \nclusters and perform histogram analysis on clusters 1 and 5.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib.pylab import savefig\nfrom matplotlib.colors import ListedColormap\n\ndef cluster_map():\n \"\"\"Map 6 clusters and noise to colors on t-SNE map.\"\"\"\n data = pd.read_csv('170410_Tumor/1000it_tseed1_comp_clusterlab.csv')\n data['Color'] = range(data.shape[0])\n \n cmap = ListedColormap(['black', 'orange', 'blue', \n 'green', 'red', 'purple', 'grey'], 'indexed')\n \n plt.scatter(data['X0'], data['X1'], c=cmap(data['cluster']), s=15)\n plt.xlabel('t-SNE X1')\n plt.ylabel('t-SNE X2')\n\nstain_data = pd.read_csv('170410_Tumor/woPI_comp_lgcl.csv')\nstain_data = stain_data[stain_data['FSC-A'] > 50000]\n\ndrops = ['Alexa Fluor 700-A_Comp', 'FSC-A', 'SSC-A']\nstain_data = stain_data.drop(drops, axis=1)\n\nstain_data.index = range(stain_data.shape[0])\nstain_data.columns = ['CD31', 'CD45', 'HER2', 'CD309']\n\n# CD309 placed next to CD31 for comparison w/in cluster 5\nCD309 = list(stain_data['CD309'])\nCD45 = list(stain_data['CD45'])\nstain_data['CD45'] = CD309\nstain_data['CD309'] = CD45\nstain_data.columns = ['CD31', 'CD309', 'HER2', 'CD45']\nprint(stain_data.head())\n\ntsne_data = pd.read_csv('170410_Tumor/1000it_tseed1_comp_clusterlab.csv')\nclust1 = tsne_data['cluster'] == 1\nclust5 = tsne_data['cluster'] == 5\n\n# Don't think these are necessary\ntsne_data1 = tsne_data[clust1]\ntsne_data5 = tsne_data[clust5]\n\n# Stain intensities for clusters 1 & 5\nstain_data1 = stain_data[clust1]\nstain_data5 = stain_data[clust5]\n\nbinwidth = 0.1\n\n# Histogram analysis of cluster 1\nplt.figure(num=5, figsize=(15,4))\nfor i, col in enumerate(stain_data.columns):\n plt.subplot(1, 4, i+1)\n plt.xlim([-1, 5])\n plt.ylim([0, 0.12])\n # Weight intensities as fraction of cluster\n weights = np.ones_like(stain_data5[col])/len(stain_data5[col])\n num_bins = np.arange(stain_data5[col].min(), \n stain_data5[col].max() + binwidth, \n binwidth)\n plt.hist(stain_data5[col], \n bins=num_bins, \n weights=weights, \n color='purple')\n plt.xlabel(col)\n if col == 'CD31': # Plot y-axis on left-most subplot\n plt.ylabel('Fraction of Cluster')\n \nsavefig('170410_Tumor/Plots/Clust5')\n\n# Histogram analysis of cluster 5\nplt.figure(num=1, figsize=(15,4))\nfor i, col in enumerate(stain_data.columns):\n plt.subplot(1, 4, i+1)\n plt.xlim([-1, 5])\n plt.ylim([0, 0.12])\n # Weight intensities as fraction of cluster\n weights = np.ones_like(stain_data1[col])/len(stain_data1[col])\n num_bins = np.arange(stain_data1[col].min(), \n stain_data1[col].max() + binwidth, \n binwidth)\n plt.hist(stain_data1[col], \n bins = num_bins, \n weights=weights, \n color='orange')\n plt.xlabel(col)\n if col == 'CD31': # Plot y-axis on left-most subplot\n plt.ylabel('Fraction of Cluster')\n \nsavefig('170410_Tumor/Plots/Clust1')","sub_path":"cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"534991559","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom plotnine import ggplot, aes, geom_point, labs\nfrom ._themes import theme_std\n\ndef scatter_pseudotime(adata, y, size = 1.5, alpha = 1, color = 'black'):\n \"\"\"Plots a scatter plot of pseudotime vs one or multiple variables\n\n Parameters\n --------------\n adata: AnnData\n The AnnData object being used for the analysis. Must be previously\n evaluated by `tl.pseudotime`.\n y: str or list\n If type(y) == str, y must be a variable annotated in adata.obs and\n will be used as the y-axis. If type(y) == list, then multiple variables\n will be plotted using a shared y-axis but different point colors.\n size: float\n Controls the point size of the plot.\n alpha: float\n A value between 0 and 1. Controls point transparency.\n color: str\n A supported color name. Controls the point color if type(y)==str.\n Ignored otherwise.\n \n Returns\n -------------\n A plotnine scatter plot of pseudotime.\n \"\"\"\n if type(y) == str:\n #-- Get data\n if y in adata.obs.columns:\n plot_df = pd.DataFrame({'x': adata.obs['pseudotime'], 'y': adata.obs[y]})\n elif y in adata.var_names:\n plot_df = pd.DataFrame({'x': adata.obs['pseudotime'], 'y': adata[:,y].X.flatten()})\n \n #-- Make plot\n if color in adata.obs.columns:\n time_scatter = (ggplot(plot_df, aes(x = 'x', y = 'y'))\n + geom_point(aes(color = color), size = size, alpha = alpha)\n + labs(x = 'Pseudotime', y = y)\n + theme_std)\n else:\n time_scatter = (ggplot(plot_df, aes(x = 'x', y = 'y'))\n + geom_point(size = size, alpha = alpha, color = color)\n + labs(x = 'Pseudotime', y = y)\n + theme_std)\n \n else:\n #-- Make multiple color plot\n plt_vars = y\n plt_vars.append('pseudotime')\n sannot = adata.obs.copy()[plt_vars]\n sannot['id'] = range(sannot.shape[0])\n \n plot_df = pd.melt(sannot, id_vars = ['id', 'pseudotime'], \n var_name = 'signature', value_name = 'score')\n \n time_scatter = (ggplot(plot_df, aes('pseudotime', 'score'))\n + geom_point(aes(color = 'signature'), alpha = alpha, size = size)\n + theme_std)\n\n return time_scatter\n","sub_path":"scycle/plots/_scatter_pseudotime.py","file_name":"_scatter_pseudotime.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"148758007","text":"from django.urls import reverse_lazy\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom .factory import (\n ActorFactory,\n CastFactory,\n CinemaAwardFactory,\n CinemaAwardMovieFactory,\n MovieFactory,\n UserFactory,\n)\n\n# factory data\n\n\ndef get_new_cinema_award_instance(i=1):\n cinema_award = CinemaAwardFactory(name=f'cinema-award-{i}')\n movie1 = get_new_movie_instance(i=i * 10 + 1)['movie']\n movie2 = get_new_movie_instance(i=i * 10 + 2)['movie']\n\n movie_award1 = CinemaAwardMovieFactory(\n cinema_award=cinema_award, movie=movie1, rank=1\n )\n movie_award2 = CinemaAwardMovieFactory(\n cinema_award=cinema_award, movie=movie2, rank=1\n )\n\n return {\n 'cinema_award': cinema_award,\n 'rank': [movie_award1, movie_award2],\n }\n\n\ndef get_new_cinema_award_data(i=1):\n data = get_new_cinema_award_instance(i=i)\n\n return {\n 'id': data['cinema_award'].id,\n 'name': data['cinema_award'].name,\n 'rank': [\n {\n 'movie_id': v.movie.id,\n 'movie_name': v.movie.name,\n 'rank': v.rank,\n }\n for v in data['rank']\n ],\n }\n\n\ndef get_new_movie_instance(i=1):\n movie = MovieFactory(name=f'movie-{i}')\n actor1 = ActorFactory(name=f'actor-{i}-1')\n actor2 = ActorFactory(name=f'actor-{i}-2')\n\n casting_actor_1 = CastFactory(\n movie=movie, actor=actor1, character_name=f'character-name-{i}-1'\n )\n casting_actor_2 = CastFactory(\n movie=movie, actor=actor2, character_name=f'character-name-{i}-2'\n )\n\n return {'movie': movie, 'casting': [casting_actor_1, casting_actor_2]}\n\n\ndef get_new_movie_data(i=1):\n data = get_new_movie_instance(i=i)\n\n return {\n 'id': data['movie'].id,\n 'name': data['movie'].name,\n 'casting': [\n {\n 'actor_id': v.actor.id,\n 'actor_name': v.actor.name,\n 'character_name': v.character_name,\n }\n for v in data['casting']\n ],\n }\n\n\n# test\n\n\nclass CinemaAPITestCase(APITestCase):\n URL_PREFIX_NAME = 'api:cinema'\n factory_class = MovieFactory\n\n @classmethod\n def setUpTestData(cls):\n cls.user = UserFactory()\n\n def test_get_list(self):\n data = get_new_cinema_award_data()\n\n self.client.force_authenticate(self.user)\n response = self.client.get(\n reverse_lazy(f'{self.URL_PREFIX_NAME}-list')\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n result = response.json()['results'][0]\n assert result['name'] == data['name']\n assert result['rank'] == data['rank']\n\n def test_get_detail(self):\n data = get_new_cinema_award_data(i=1)\n\n self.client.force_authenticate(self.user)\n response = self.client.get(\n reverse_lazy(\n f'{self.URL_PREFIX_NAME}-detail', kwargs={'pk': data['id']}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n result = response.json()\n assert result['name'] == data['name']\n assert result['rank'] == data['rank']\n\n def test_get_cinema_movies_list(self):\n data = get_new_cinema_award_data(i=1)\n\n self.client.force_authenticate(self.user)\n response = self.client.get(\n reverse_lazy(f'{self.URL_PREFIX_NAME}-movies')\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n result = response.json()\n assert result[0]['name'] == data['name']\n assert result[0]['rank'] == data['rank']\n\n def test_get_cinema_awards_by_movie(self):\n cinema1 = get_new_cinema_award_data(i=1)\n cinema2 = get_new_cinema_award_data(i=2)\n\n new_movie = CinemaAwardMovieFactory(\n cinema_award_id=cinema2['id'],\n movie_id=cinema1['rank'][0]['movie_id'],\n rank=3,\n )\n\n cinema2['rank'].append(\n {\n 'movie_id': new_movie.movie.id,\n 'movie_name': new_movie.movie.name,\n 'rank': new_movie.rank,\n }\n )\n\n self.client.force_authenticate(self.user)\n response = self.client.get(\n reverse_lazy(\n f'{self.URL_PREFIX_NAME}-movies', kwargs={'movie_pk': 1}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n result = response.json()\n\n data = [cinema1, cinema2]\n\n assert len(data) == len(result)\n\n for i in range(len(result)):\n assert result[i]['name'] == data[i]['name']\n assert result[i]['rank'] == data[i]['rank']\n\n def test_put_cinema_awards_by_movie(self):\n cinema1 = get_new_cinema_award_data(i=1)\n\n self.client.force_authenticate(self.user)\n response = self.client.put(\n reverse_lazy(\n f'{self.URL_PREFIX_NAME}-movies',\n kwargs={'movie_pk': cinema1['id']},\n )\n )\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n","sub_path":"djangopoc/cinema/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"653931549","text":"# A function that accepts a list (which has a length of 4 or more) and a \n# string and returns the list such that the second through the fourth \n# element (index 1 through 3 both inclusive) in the input list are \n# replaced by the input string. For example:\n\n# input_list = [\"Isha\", \"Chandoygya\", \"Sri Vasya\", \"Mandukya\", \"Sri\"]\n# input_string = \"Brahman\" \n\n# Then, your function should return a list such as:\n\n# ['Isha', 'Brahman', 'Brahman', 'Brahman', 'Sri']\n\n# method 1\ndef List_Updating(LIST, STR):\n for i in range(len(LIST)):\n if (i >= 1) and (i < 4):\n LIST[i] = STR\n return LIST\n\n# method 2\ndef Updating_the_list(input_list,input_string):\n for i in range(1,4):\n input_list.pop(i)\n input_list.insert(i,input_string)\n return input_list\n\n################### Sample Solution ###################\ndef _list_manipulation_sample1_(input_list, input_string):\n for i in range(1, 4):\n input_list[i] = input_string\n return input_list\n\n# getting list from input\ndef list_create():\n lst = [] \n # number of elemetns as input \n n = int(input(\"Enter number of elements : \")) \n \n # iterating till the range \n for i in range(0, n): \n ele = input(\"enter list element : \")\n \n lst.append(ele) # adding the element\n return lst \n\n# Driver code test\nmy_list = list_create()\nstr = input('please enter your string : ')\nresult = List_Updating(my_list, str)\nprint(result)","sub_path":"08 - Week 5/04 List Exercises/01 List Manipulation Exercise 1 (Updating a List).py","file_name":"01 List Manipulation Exercise 1 (Updating a List).py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"142114541","text":"import pandas as pd\nimport sqlite3\nfrom pandas import DataFrame \n\nn_conn = sqlite3.connect('northwind_small.sqlite3')\nn_curs = n_conn.cursor()\n\n\n# What are the ten most expensive items (per unit price) in the database?\nquery = \"\"\"\nSELECT ProductName, UnitPrice\nFROM Product\nORDER BY UnitPrice DESC\nLIMIT 10\n\"\"\"\nn_curs.execute(query)\nprint(n_curs.fetchall())\n\n\n# What is the average age of an employee at the time of their hiring? (Hint: a\n# lot of arithmetic works with dates.)\n\nquery = \"\"\"\nSELECT AVG(HireDate-BirthDate)\nFROM Employee\n\"\"\"\nn_curs.execute(query)\nprint(n_curs.fetchall())\n\n# answer: 37.22\n\n# (*Stretch*) How does the average age of employee at hire vary by city?\nquery = \"\"\"SELECT City, AVG(HireDate-BirthDate)\nFROM Employee\nGROUP BY City\n\"\"\"\nn_curs.execute(query)\nprint(n_curs.fetchall())\n\n# What are the ten most expensive items (per unit price) \n# in the database *and* their suppliers?\nquery = \"\"\"\nSELECT ProductName, UnitPrice, CompanyName\nFROM Product as p\nJOIN Supplier as s\nON p.SupplierID = s.ID\nORDER BY UnitPrice DESC\nLIMIT 10\n\"\"\"\nn_curs.execute(query)\nprint(n_curs.fetchall())\n\n# What is the largest category (by number of unique products in it)?\nquery = \"\"\"\nSELECT CategoryName, COUNT(CategoryName)\nFROM Category as c\nJOIN Product as p\nON c.ID=p.CategoryID\nGROUP BY CategoryName\nORDER by COUNT(CategoryName) DESC\n\"\"\"\nn_curs.execute(query)\nprint(n_curs.fetchall())\n\n# largest category is Confections 13\n\n# (*Stretch*) Who's the employee with the most territories? Use `TerritoryId`\n# (not name, region, or other fields) as the unique identifier for territories.\n# EMPLOYEE ID 7\n\nquery = \"\"\"\nSELECT EmployeeId, TerritoryId, COUNT(DISTINCT TerritoryId)\nFROM EmployeeTerritory\nGROUP BY EmployeeId\nORDER BY COUNT(DISTINCT TerritoryId) DESC\n\"\"\"\nn_curs.execute(query)\nprint(n_curs.fetchall())\n\n","sub_path":"sc/northwind.py","file_name":"northwind.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"149488897","text":"#!/usr/bin/env python\n\n'''\n\nThis is publisher ->msg type patrol_messages/TaskDone.\nIntended for testing purpose.\n'''\n\nimport rospy\nfrom patrol_messages.msg import *\n\n\n#peer_subscribe(self, topic_name, topic_publish, peer_publish)\n#source code \n#callback when a peer has subscribed from a \n\nclass TestNextTaskPublication:\n def __init__(self):\n try:\n self.next_task_pubs = rospy.Publisher('/robot_0/next_task', NextTask, self, queue_size=2)\n except Exception:\n print (\"Error in creation of next task publisher\")\n\n def peer_subscribe(self, topic_name, topic_publish, peer_publish):\n print ('connected')\n next_task_msg = NextTask()\n next_task_msg.task.append('0')\n next_task_msg.task.append('1')\n next_task_msg.task.append('0')\n self.next_task_pubs.publish(next_task_msg)\n'''\n def publish_next_task(self, next_task): \n while not rospy.is_shutdown(): \n count_connections = self.next_task_pubs.get_num_connections()\n if count_connections > 0:\n rospy.loginfo (\" number of live connections are %d\", count_connections)\n self.next_task_pubs.publish(next_task_msg)\n break\n rospy.sleep(1)\n'''\nif __name__ == \"__main__\":\n rospy.init_node('tpbp_next_task', anonymous = True)\n t = TestNextTaskPublication()\n rospy.spin()\n\n\n\n","sub_path":"patrol_algo/test_scripts/publish_next_task.py","file_name":"publish_next_task.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"528133","text":"import numpy as np\nimport robcom_python as robcom\n\nimport pyrado\nfrom pyrado.environments.base import Env\nfrom pyrado.spaces import BoxSpace\nfrom pyrado.spaces.base import Space\nfrom pyrado.tasks.base import Task\nfrom pyrado.tasks.final_reward import FinalRewTask, FinalRewMode\nfrom pyrado.tasks.goalless import GoallessTask\nfrom pyrado.tasks.reward_functions import ZeroPerStepRewFcn\nfrom pyrado.utils.data_types import RenderMode\nfrom pyrado.utils.input_output import print_cbt\n\n\nclass WAMBallInCupReal(Env):\n \"\"\"\n Class for the real Barrett WAM\n\n Uses robcom 2.0 and specifically robcom's GoTo process to execute a trajectory given by desired joint positions.\n The process is only executed on the real system after `max_steps` has been reached to avoid possible latency,\n but at the same time mimic the usual step-based environment behavior.\n \"\"\"\n\n name: str = 'wam-bic'\n\n def __init__(self,\n dt: float = 1/500.,\n max_steps: int = pyrado.inf,\n ip: str = '192.168.2.2',\n poses_des: [np.ndarray, None] = None):\n \"\"\"\n Constructor\n\n :param dt: sampling time interval\n :param max_steps: maximum number of time steps\n :param ip: IP address of the PC controlling the Barrett WAM\n :param poses_des: desired joint poses as N x 3 ndarray, where N is the number of steps in the trajectory\n \"\"\"\n # Call the base class constructor to initialize fundamental members\n super().__init__(dt, max_steps)\n\n # Create the robcom client and connect to it\n self._client = robcom.Client()\n self._client.start(ip, 2013) # IP address and port\n print_cbt('Connected to the Barret WAM client.', 'c', bright=True)\n self._gt = None # Goto command\n\n # Desired joint position for the initial state\n self.init_pose_des = np.array([0.0, 0.5876, 0.0, 1.36, 0.0, -0.321, -1.57])\n\n # Initialize spaces\n self._state_space = None\n self._obs_space = None\n self._act_space = None\n self._create_spaces()\n\n # Initialize task\n self._task = self._create_task(dict())\n\n # Desired trajectory\n if poses_des is not None:\n if not poses_des.shape[1] == 7:\n raise pyrado.ShapeErr(given=poses_des.shape[1], expected_match=self.init_pose_des)\n self.poses_des = poses_des\n\n @property\n def state_space(self) -> Space:\n return self._state_space\n\n @property\n def obs_space(self) -> Space:\n return self._obs_space\n\n @property\n def act_space(self) -> Space:\n return self._act_space\n\n @property\n def task(self) -> Task:\n return self._task\n\n def _create_task(self, task_args: dict) -> Task:\n # The wrapped task acts as a dummy and carries the FinalRewTask s\n return FinalRewTask(GoallessTask(self.spec, ZeroPerStepRewFcn()), mode=FinalRewMode(user_input=True))\n\n def _create_spaces(self):\n # State space (normalized time, since we do not have a simulation)\n self._state_space = BoxSpace(np.array([0.]), np.array([1.]))\n\n # Action space (PD controller on 3 joint positions and velocities)\n max_act = np.array([np.pi, np.pi, np.pi, # [rad, rad, rad, ...\n 10*np.pi, 10*np.pi, 10*np.pi]) # ... rad/s, rad/s, rad/s]\n self._act_space = BoxSpace(-max_act, max_act,\n labels=[r'$q_{1,des}$', r'$q_{3,des}$', r'$q_{5,des}$',\n r'$\\dot{q}_{1,des}$', r'$\\dot{q}_{3,des}$', r'$\\dot{q}_{5,des}$'])\n\n # Observation space (normalized time)\n self._obs_space = BoxSpace(np.array([0.]), np.array([1.]), labels=['$t$'])\n\n def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:\n # Create robcom GoTo process\n gt = self._client.create(robcom.Goto, 'RIGHT_ARM', '')\n\n # Move to initial state within 5 seconds\n gt.add_step(5., self.init_pose_des)\n print_cbt('Moving the Barret WAM to the initial position.', 'c', bright=True)\n\n # Start process and wait for completion\n gt.start()\n gt.wait_for_completion()\n print_cbt('Reached the initial position.', 'c')\n\n # Reset the task which also resets the reward function if necessary\n self._task.reset(env_spec=self.spec)\n\n input('Hit enter to continue.')\n\n # Reset time steps\n self._curr_step = 0\n self.state = np.array([self._curr_step/self.max_steps])\n\n return self.observe(self.state)\n\n def step(self, act: np.ndarray) -> tuple:\n info = dict(t=self._curr_step*self._dt, act_raw=act)\n\n # Current reward depending on the (measurable) state and the current (unlimited) action\n remaining_steps = self._max_steps - (self._curr_step + 1) if self._max_steps is not pyrado.inf else 0\n self._curr_rew = self._task.step_rew(self.state, act, remaining_steps) # always 0 for wam-bic-real\n\n act = self.limit_act(act)\n\n if self.poses_des is not None and self._curr_step < self.poses_des.shape[0]:\n # Use given desired trajectory if given and time step does no exceed its length\n qpos_des = self.poses_des[self._curr_step]\n else:\n # Otherwise use the action given by a policy\n qpos_des = self.init_pose_des.copy() # keep the initial joint angles deselected joints\n np.add.at(qpos_des, [1, 3, 5], act[:3]) # the policy operates on joint 1, 3 and 5\n\n # Create robcom GoTo process at the first time step TODO @Christian: possible move to the end of reset()?\n if self._curr_step == 0:\n self._gt = self._client.create(robcom.Goto, 'RIGHT_ARM', '')\n\n # Add desired joint position as step to the process\n self._gt.add_step(self.dt, qpos_des)\n self._curr_step += 1\n self.state = np.array([self._curr_step/self.max_steps])\n\n # A GoallessTask only signals done when has_failed() is true, i.e. the the state is out of bounds\n done = self._task.is_done(self.state) # always false for wam-bic-real\n\n # Only start execution of process when all desired poses have been added to process\n # i.e. `max_steps` has been reached\n if self._curr_step >= self._max_steps:\n done = True\n print_cbt('Executing trajectory on Barret WAM.', 'c', bright=True)\n self._gt.start()\n self._gt.wait_for_completion()\n print_cbt('Finished execution.', 'c')\n\n # Add final reward if done\n if done:\n # Ask the user to enter the final reward\n self._curr_rew += self._task.final_rew(self.state, remaining_steps)\n\n return self.observe(self.state), self._curr_rew, done, info\n\n def render(self, mode: RenderMode, render_step: int = 1):\n # Skip all rendering\n pass\n\n def close(self):\n # Don't close the connection to robcom manually, since this might cause SL to crash.\n # Closing the connection is finally handled by robcom\n pass\n","sub_path":"Pyrado/pyrado/environments/barrett_wam/wam.py","file_name":"wam.py","file_ext":"py","file_size_in_byte":7140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"409008343","text":"# Скажите а.\r\n# На вход программе подается строка.\r\n# Если первым символом этой строки является буква \"a\",\r\n# в любом регистре (а и А).\r\n# вывести в консоль \"ДА\", и \"НЕТ\" в противном случае.\r\n\r\n# Рофл НЕТ\r\n\r\n# Авада-кедавра НЕТ\r\n\r\n# авада-кедавра ДА\r\n\r\n\r\nmessage = input()\r\nif message[0].lower() == \"а\": \r\n print(\"ДА\")\r\nelse:\r\n print(\"НЕТ\")","sub_path":"Lec_13_15.12/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"101250016","text":"import math\n\n\ndef fact(n):\n assert isinstance(n, int)\n if n == 0:\n return 1\n if n == 1:\n return 1\n return n * fact(n - 1)\n\n\ndef choose(n, p):\n return fact(n) / (fact(n - p) * fact(p))\n\n\ndef test():\n assert fact(5) == 120\n assert choose(5, 3) == 10\n\n\ndef helper(n):\n for i in range(n + 1):\n print(f\"{i} -> {choose(n, i)%7}\")\n\n\ndef proto_solve():\n \"\"\" first 100 rows \"\"\"\n m = math.floor(100 / 7)\n\n return m * (m + 1) / 2 * 28\n\n\ndef triangle(n):\n with open(\"pascal.txt\", \"w\") as file:\n result = 0\n for i in range(n):\n seq = \"\"\n for j in range(i + 1):\n if choose(i, j) % 7 == 0:\n seq += \"° \"\n\n else:\n seq += \"# \"\n result += 1\n file.write(\" \" * (n - i - 1) + seq + \" \" * (n - i - 1) + \"\\n\")\n return result\n\n\nif __name__ == \"__main__\":\n test()\n print(triangle(98))\n # for i in range(28, 35):\n # print(i)\n # helper(i)\n# print(proto_solve())\n","sub_path":"problem148.py","file_name":"problem148.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"246180086","text":"##############################################################################\n#\n# Copyright (c) 2004 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Principal Information Interfaces\n\n$Id$\n\"\"\"\nfrom zope.i18n import MessageIDFactory\nfrom zope.interface import Interface\nfrom zope.schema import TextLine\n\n_ = MessageIDFactory('messageboard')\n\n\nclass IPrincipalInformation(Interface):\n \"\"\"This interface additional information about a principal.\"\"\"\n\n email = TextLine(\n title=_(\"E-mail\"),\n description=_(\"E-mail Address\"),\n default=u\"\",\n required=False)\n\n ircNickname = TextLine(\n title=_(\"IRC Nickname\"),\n description=_(\"IRC Nickname\"),\n default=u\"\",\n required=False)\n","sub_path":"book/trunk/principalinfo/interfaces.py","file_name":"interfaces.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"530438078","text":"# How many Lychrel numbers are there below ten-thousand?\n\nimport func # pylint: disable=E0401\n\nlychrel_count = 0\nfor num in range(1, 10000):\n lychrel_flag = False\n for _ in range(50):\n reverse_num = int(str(num)[::-1])\n num += reverse_num\n if (func.is_palindrome(num)):\n lychrel_flag = True\n break\n if not lychrel_flag:\n lychrel_count += 1\n\nprint(lychrel_count)\n# answer equals 249","sub_path":"50-59/55.py","file_name":"55.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"454374570","text":"#!/usr/bin/env python\n# PySys System Test Framework, Copyright (C) 2006-2018 M.B.Grieve\n\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n\n# Contact: moraygrieve@users.sourceforge.net\n\nfrom __future__ import print_function\nimport sys, os\nfrom pysys.exceptions import *\n\ndef copyfileobj(fsrc, fdst, length=16*1024):\n\t\"\"\"Internal method to read bytes from a source file descriptor, and write to a destination file descriptor.\n\t\n\t@param fsrc: The source file descriptor\n\t@param fdst: The destination file descriptor\n\t@param length: The buffer length to read from the src and write to the destination\n\t\n\t\"\"\"\n\twhile 1:\n\t\tbuf = fsrc.read(length)\n\t\tif not buf:\n\t\t\tbreak\n\t\tfdst.write(buf)\n\n\ndef filecopy(src, dst):\n\t\"\"\"Copy source file to a destination file.\n\t\n\t@param src: Full path to the source filename\n\t@param dst: Full path the destination filename\n \t@raises FileNotFoundException: Raised if the source file does not exist\n \n\t\"\"\"\n\tif not os.path.exists(src):\n\t\traise FileNotFoundException(\"unable to find file %s\" % (os.path.basename(src)))\n\t\n\tfsrc = None\n\tfdst = None\n\ttry:\n\t\tfsrc = open(src, 'rb')\n\t\tfdst = open(dst, 'wb')\n\t\tcopyfileobj(fsrc, fdst)\n\tfinally:\n\t\tif fdst:\n\t\t\tfdst.close()\n\t\tif fsrc:\n\t\t\tfsrc.close()\n\n\n# entry point for running the script as an executable\nif __name__ == \"__main__\":\n\tif len(sys.argv) < 2:\n\t\tprint(\"Usage: filecopy \")\n\t\tsys.exit()\n\telse:\n\t\tfilecopy(sys.argv[1], sys.argv[2])\n\n","sub_path":"pysys/utils/filecopy.py","file_name":"filecopy.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"289037322","text":"\n'''\n Ensemble method: phương pháp kết hợp\n class: sklearn.ensemble.ExtraTreeClassifier\n class: sklearn.ensemble.RandomForestClassifier\n'''\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\n\n# [height, weight,shoe_size]\nX = [[181, 80, 44], [177, 70, 43], [160, 60, 38], [154, 54, 37], [166, 65, 40],\n [190, 90, 47], [175, 64, 39],\n [177, 70, 40], [159, 55, 37], [171, 75, 42], [181, 85, 43]]\n\n# với mỗi bộ X ở trên gán nhãn cho Y\n\nY = ['male', 'male', 'female', 'female', 'male', 'male', 'female', 'female',\n 'female', 'male', 'male']\n\n\nclf = RandomForestClassifier(n_estimators=2)\n\nclf = clf.fit(X,Y)\n\nprection = clf.predict([[190, 70, 43]])\n\nprint(prection)\n\nclf2 = ExtraTreesClassifier(n_estimators=2)\n\nclf2 = clf2.fit(X,Y)\n\nprection2 = clf2.predict([[190, 70, 43]])\n\nprint(prection2)\n\n","sub_path":"Month 1 - Data Analysis/Week 1 - Learn Python/LearnSimpleCode/sklearn/ensemble.py","file_name":"ensemble.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"350892348","text":"#!/usr/bin/python3\n\n##--Michael duPont\n##--AVWX-Engine : avwx.py\n##--Shared METAR settings and functions\n##--2015-09-04\n\n# This file contains a series of functions and variables that can be used\n# in any project that needs a means of fetching, interpretting, and/or\n# translating METAR and TAF weather data.\n#\n# While the list of functions is rather large, here are the most useful:\n# getMETAR(station) Returns METAR report for a given station\n# getTAF(station) Returns TAF report for a given station\n# parseMETAR(txt) Returns a key-value dict where 'txt' is the report getMETAR returns\n# parseTAF(txt) Returns a key-value dict where 'txt' is the report getTAF returns\n# translateMETAR(wxData) Returns a key-value dict of field translations where 'wxData' is what parseMETAR returns\n# translateTAF(wxData) Returns a key-value dict of field translations where 'wxData' is what parseTAF returns\n# getInfoForStation(station) Returns a key-value dict of basic info for a given 'station'\n# getFlightRules(vis , splitCloud)\n# Returns int corresponding to flight rules index in flightRules\n# Standard usage states that the current rule is IFR if visibility value is not available\n# vis is string of visibility distance value\n# splitCloud is the ceiling is the format [type,height,modifier]\n# Typical usage: flightRules[getFlightRules(parsedReport['Visibility'] , getCeiling(parsedReport['Cloud-List']))]\n# Usage with values: flightRules[getFlightRules('8' , ['BKN','014',''])] -> 'MVFR'\n#\n# Both METAR and TAF reports come in the Standard/International variant and\n# the North American variant. parseMETAR and parseTAF automatically figure out\n# which parser variant to use, but you're welcome to use the variant functions\n# directly if you want. Just know that parsing errors will arise if the\n# International parser tries to parse a US report.\n#\n# Example usage for both METAR and TAF can be found at the bottom of the file.\n# You can run this test code by running this file: python avwx.py\n\nimport sqlite3 , json , sys\nif sys.version_info[0] == 2: import urllib2\nelif sys.version_info[0] == 3: from urllib.request import urlopen\nfrom itertools import permutations\nfrom time import strftime\nfrom copy import copy\nfrom os import path\nfrom .xmltodict import parse\n\n##--Logic Vars\nflightRules = ['VFR','MVFR','IFR','LIFR']\ncloudList = ['FEW','SCT','BKN','OVC']\nwxReplacements = {\n'RA':'Rain','TS':'Thunderstorm','SH':'Showers','DZ':'Drizzle','VC':'Vicinity','UP':'Unknown Precip',\n'SN':'Snow','FZ':'Freezing','SG':'Snow Grains','IC':'Ice Crystals','PL':'Ice Pellets','GR':'Hail','GS':'Small Hail',\n'FG':'Fog','BR':'Mist','HZ':'Haze','VA':'Volcanic Ash','DU':'Wide Dust','FU':'Smoke','SA':'Sand','SY':'Spray',\n'SQ':'Squall','PO':'Dust Whirls','DS':'Duststorm','SS':'Sandstorm','FC':'Funnel Cloud',\n'BL':'Blowing','MI':'Shallow','BC':'Patchy','PR':'Partial','UP':'Unknown'}\n\nmetarRMKStarts = [' BLU',' BLU+',' WHT',' GRN',' YLO',' AMB',' RED',' BECMG',' TEMPO',' INTER',' NOSIG',' RMK',' WIND',' QFE',' QFF',' INFO',' RWY',' CHECK']\ntafRMKStarts = ['RMK ','AUTOMATED ','COR ','AMD ','LAST ','FCST ','CANCEL ','CHECK ','WND ','MOD ',' BY',' QFE',' QFF']\ntafNewLineStarts = [' INTER ' , ' FM' , ' BECMG ' , ' TEMPO ']\n\n##--Station Location Identifiers\nRegionsUsingUSParser = ['C', 'K', 'P', 'T']\nRegionsUsingInternationalParser = ['A', 'B', 'D', 'E', 'F', 'G', 'H', 'L', 'N', 'O', 'R', 'S', 'U', 'V', 'W', 'Y', 'Z']\n#The Central American region is split. Therefore we need to use the first two letters\nMStationsUsingUSParser = ['MB', 'MM', 'MT', 'MY']\nMStationsUsingInternationalParser = ['MD' , 'MG', 'MH', 'MK' , 'MN', 'MP', 'MR', 'MS', 'MU' , 'MW' , 'MZ']\n\nnaUnits = {'Wind-Speed':'kt','Visibility':'sm','Altitude':'ft','Temperature':'C','Altimeter':'inHg'}\ninUnits = {'Wind-Speed':'kt','Visibility':'m','Altitude':'ft','Temperature':'C','Altimeter':'hPa'}\ncurUnits = {} #Global placeholder for report units\n\n#LOCAL_PATH = path.dirname(path.realpath(__file__))\nstationDBPath = path.dirname(path.realpath(__file__))+'/stations.sqlite' #Path to the station info database\nrequestURL = \"\"\"https://aviationweather.gov/adds/dataserver_current/httpparam?dataSource={0}s&requestType=retrieve&format=XML&stationString={1}&hoursBeforeNow=2\"\"\"\n\n####################################################################################################################################\n##--Shared Functions\n\n#Returns the index of the earliest occurence of an item from a list in a string\ndef findFirstInList(txt,aList):\n\tstartIndex = len(txt)+1\n\tfor item in aList:\n\t\tif -1 < txt.find(item) < startIndex: startIndex = txt.find(item)\n\tif -1 < startIndex < len(txt)+1: return startIndex\n\treturn -1\n\n#Remove remarks and split\n#Remarks can include RMK and on, NOSIG and on, and BECMG and on\ndef __getRemarks(txt):\n\ttxt = txt.replace('?' , '').strip(' ')\n\t#First look for Altimeter in txt\n\taltIndex = len(txt)+1\n\tfor item in [' A2',' A3',' Q1',' Q0',' Q9']:\n\t\tindex = txt.find(item)\n\t\tif -1 < index < len(txt)-6 and txt[index+2:index+6].isdigit(): altIndex = index\n\t#Then look for earliest remarks 'signifier'\n\tsigIndex = findFirstInList(txt , metarRMKStarts)\n\tif sigIndex == -1: sigIndex = len(txt)+1\n\tif -1 < altIndex < sigIndex: return txt[:altIndex+6].strip().split(' ') , txt[altIndex+7:]\n\telif -1 < sigIndex < altIndex: return txt[:sigIndex].strip().split(' ') , txt[sigIndex+1:]\n\treturn txt.strip().split(' ') , ''\n\n#Provides sanitization for operations that work better when the report is a string\n#Returns the first pass sanitized report string\nstrReplacements = {' C A V O K ':' CAVOK ' , '?':' '}\ndef __sanitizeFirstPass(txt):\n\tif len(txt) > 4:\n\t\tstid = txt[:4]\n\t\ttxt = txt[4:] #Prevent changes to station ID\n\tfor key in strReplacements: txt = txt.replace(key , strReplacements[key])\n\t#Check for missing spaces in front of cloud layers\n\t#Ex: TSFEW004SCT012FEW///CBBKN080\n\tfor cloud in cloudList:\n\t\tif txt.find(cloud) != -1 and txt.find(' '+cloud) == -1:\n\t\t\tstartIndex = 0\n\t\t\tcounter = 0\n\t\t\twhile txt.count(cloud) != txt.count(' '+cloud):\n\t\t\t\tif len(txt[txt[startIndex:].find(cloud)+startIndex:]) >= 3:\n\t\t\t\t\ttargetText = txt[txt[startIndex:].find(cloud)+len(cloud)+startIndex:txt[startIndex:].find(cloud)+len(cloud)+startIndex+3]\n\t\t\t\t\tif targetText.isdigit() or not targetText.strip('/'):\n\t\t\t\t\t\ttxt = txt[:txt[startIndex:].find(cloud)+startIndex] + ' ' + txt[txt[startIndex:].find(cloud)+startIndex:]\n\t\t\t\tstartIndex = startIndex + txt[startIndex:].find(cloud) + len(cloud) + 1\n\t\t\t\t#Prevent infinite loops\n\t\t\t\tif counter > txt.count(cloud): break\n\t\t\t\tcounter += 1\n\treturn stid + txt\n\n#Return True if a space shouldn't exist between two items\n#This list grew so large that it had to be moved to its own function for readability\ndef extraSpaceExists(s1 , s2):\n\tif s1.isdigit():\n\t\t# 10 SM\n\t\tif s2 in ['SM','0SM']: return True\n\t\t# 12 /10\n\t\tif len(s2) > 2 and s2[0] == '/' and s2[1:].isdigit(): return True\n\tif s2.isdigit():\n\t\t# OVC 040\n\t\tif s1 in cloudList: return True\n\t\t# 12/ 10\n\t\tif len(s1) > 2 and s1.endswith('/') and s1[:len(s1)-1].isdigit(): return True\n\t\t# 12/1 0\n\t\tif len(s2) == 1 and len(s1) > 3 and s1[:2].isdigit() and s1.find('/') != -1 and s1[3:].isdigit(): return True\n\t\t# Q 1001\n\t\tif s1 in ['Q','A']: return True\n\t# 36010G20 KT\n\tif s2 == 'KT' and (s1[:5].isdigit() or (s1.startswith('VRB') and s1[3:5].isdigit())) and s1[len(s1)-1].isdigit(): return True\n\t# 36010K T\n\tif s2 == 'T' and len(s1) == 6 and (s1[:5].isdigit() or (s1.startswith('VRB') and s1[3:5].isdigit())) and s1[5] == 'K': return True\n\t# OVC022 CB\n\tif s2 in cloudTranslationStrings and s2 not in cloudList and len(s1) >= 3 and s1[:3] in cloudList: return True\n\t# FM 122400\n\tif s1 in ['FM','TL'] and (s2.isdigit() or (s2.endswith('Z') and s2[:len(s2)-1].isdigit())): return True\n\t# TX 20/10\n\tif s1 in ['TX','TN'] and s2.find('/') != -1: return True\n\treturn False\n\n#Sanitize wxData\n#We can remove and identify \"one-off\" elements and fix other issues before parsing a line\n#We also return the runway visibility and wind shear since they are very easy to recognize\n#and their location in the report is non-standard\nitemRemoval = ['AUTO' , 'COR' , 'NSC' , 'NCD' , '$' , 'KT' , 'M' , '.' , 'RTD' , 'SPECI' , 'METAR' , 'CORR']\nitemReplacements = {'CALM': '00000KT'}\nvisPermutations = [''.join(p) for p in permutations('P6SM')]\nvisPermutations.pop(visPermutations.index('6MPS'))\ndef __sanitize(wxData , removeCLRandSKC=True):\n\tshear = ''\n\trunwayVisibility = []\n\tfor i , item in reversed(list(enumerate(wxData))):\n\t\t#print(i , item)\n\t\t#Remove elements containing only '/'\n\t\tif not item.strip('/'):\n\t\t\twxData.pop(i)\n\t\t#Identify Runway Visibility\n\t\telif len(item) > 4 and item[0] == 'R' and (item[3] == '/' \\\n\t\tor item[4] == '/') and item[1:3].isdigit():\n\t\t\trunwayVisibility.append(wxData.pop(i))\n\t\t#Remove RE from wx codes, REVCTS -> VCTS\n\t\telif len(item) in [4,6] and item.startswith('RE'):\n\t\t\twxData.pop(i)\n\t\t#Fix a slew of easily identifiable conditions where a space does not belong\n\t\telif i and extraSpaceExists(wxData[i-1] , item):\n\t\t\twxData[i-1] += wxData.pop(i)\n\t\t#Remove spurious elements\n\t\telif item in itemRemoval:\n\t\t\twxData.pop(i)\n\t\t#Remove 'Sky Clear' from METAR but not TAF\n\t\telif removeCLRandSKC and item in ['CLR' , 'SKC']:\n\t\t\twxData.pop(i)\n\t\t#Replace certain items\n\t\telif item in itemReplacements:\n\t\t\twxData[i] = itemReplacements[item]\n\t\t#Remove ammend signifier from start of report ('CCA','CCB',etc)\n\t\telif len(item) == 3 and item.startswith('CC') and item[2].isalpha():\n\t\t\twxData.pop(i)\n\t\t#Identify Wind Shear\n\t\telif len(item) > 6 and item.startswith('WS') and item.find('/') != -1:\n\t\t\tshear = wxData.pop(i).replace('KT' , '')\n\t\t#Fix inconsistant 'P6SM' Ex: TP6SM or 6PSM -> P6SM\n\t\telif len(item) > 3 and item[len(item)-4:] in visPermutations:\n\t\t\twxData[i] = 'P6SM'\n\t\t#Fix wind T\n\t\telif (len(item) == 6 and item[5] in ['K','T'] and (item[:5].isdigit() or item.startswith('VRB'))) \\\n\t\tor (len(item) == 9 and item[8] in ['K','T'] and item[5] == 'G' and (item[:5].isdigit() or item.startswith('VRB'))):\n\t\t\twxData[i] = item[:len(item)-1] + 'KT'\n\t\t#Fix joined TX-TN\n\t\telif len(item) > 16 and len(item.split('/')) == 3:\n\t\t\tif item.startswith('TX') and item.find('TN') != -1:\n\t\t\t\twxData.insert(i+1 , item[:item.find('TN')])\n\t\t\t\twxData[i] = item[item.find('TN'):]\n\t\t\telif item.startswith('TN') and item.find('TX') != -1:\n\t\t\t\twxData.insert(i+1 , item[:item.find('TX')])\n\t\t\t\twxData[i] = item[item.find('TX'):]\n\t\t#print(i , item)\n\treturn wxData , runwayVisibility , shear\n\t\n#Altimeter\ndef __getAltimeterUS(wxData):\n\taltimeter = ''\n\t#Get altimeter\n\tif wxData:\n\t\tif wxData[len(wxData)-1][0] == 'A': altimeter = wxData.pop()[1:]\n\t\telif wxData[len(wxData)-1][0] == 'Q':\n\t\t\tglobal curUnits\n\t\t\tcurUnits['Altimeter'] = 'hPa'\n\t\t\taltimeter = wxData.pop()[1:].lstrip('.')\n\t\telif len(wxData[len(wxData)-1]) == 4 and wxData[len(wxData)-1].isdigit(): altimeter = wxData.pop()\n\t#Some stations report both, but we only need one\n\tif wxData and (wxData[len(wxData)-1][0] == 'A' or wxData[len(wxData)-1][0] == 'Q'): wxData.pop()\n\treturn wxData , altimeter\n\ndef __getAltimeterInternational(wxData):\n\taltimeter = ''\n\t#Get altimeter\n\tif wxData:\n\t\tif wxData[len(wxData)-1][0] == 'Q':\n\t\t\taltimeter = wxData.pop()[1:].lstrip('.')\n\t\t\tif altimeter.find('/') != -1: altimeter = altimeter[:altimeter.find('/')]\n\t\telif wxData[len(wxData)-1][0] == 'A':\n\t\t\tglobal curUnits\n\t\t\tcurUnits['Altimeter'] = 'inHg'\n\t\t\taltimeter = wxData.pop()[1:]\n\t#Some stations report both, but we only need one\n\tif wxData and (wxData[len(wxData)-1][0] == 'A' or wxData[len(wxData)-1][0] == 'Q'): wxData.pop()\n\treturn wxData , altimeter\n\ndef __getTAFAltIceTurb(wxData):\n\taltimeter = ''\n\ticing , turbulence = [] , []\n\tfor i in reversed(range(len(wxData))):\n\t\tif len(wxData[i]) > 6 and wxData[i].startswith('QNH') and wxData[i][3:7].isdigit():\n\t\t\taltimeter = wxData.pop(i)[3:7]\n\t\telif wxData[i].isdigit():\n\t\t\tif wxData[i][0] == '6': icing.append(wxData.pop(i))\n\t\t\telif wxData[i][0] == '5': turbulence.append(wxData.pop(i))\n\treturn wxData , altimeter , icing , turbulence\n\ndef isPossibleTemp(temp):\n\tfor index in temp:\n\t\tif index not in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'M']: return False\n\treturn True\n\n#Temp/Dewpoint\ndef __getTempAndDewpoint(wxData):\n\tfor i in reversed(range(len(wxData))):\n\t\tif wxData[i].find('/') != -1:\n\t\t\tcurVal = copy(wxData[i])\n\t\t\t#///07\n\t\t\tif curVal[0] == '/': curVal = '/' + curVal.strip('/')\n\t\t\t#07///\n\t\t\telif curVal[len(curVal)-1] == '/': curVal = curVal.strip('/') + '/'\n\t\t\tTD = curVal.split('/')\n\t\t\tif len(TD) != 2: continue\n\t\t\tisValid = True\n\t\t\tfor j in range(len(TD)):\n\t\t\t\tif TD[j] in ['MM','XX']: TD[j] = ''\n\t\t\t\telif not isPossibleTemp(TD[j]):\n\t\t\t\t\tisValid = False\n\t\t\t\t\tbreak\n\t\t\tif isValid:\n\t\t\t\twxData.pop(i)\n\t\t\t\treturn wxData , TD[0] , TD[1]\n\treturn wxData , '' , ''\n\n#Station and Time\ndef __getStationAndTime(wxData):\n\tstation = wxData.pop(0)\n\tif wxData and wxData[0].endswith('Z') and wxData[0][:len(wxData[0])-1].isdigit(): rTime = wxData.pop(0)\n\telif wxData and len(wxData[0]) == 6 and wxData[0].isdigit(): rTime = wxData.pop(0) + 'Z'\n\telse: rTime = ''\n\treturn wxData , station , rTime\n\n#Surface wind\ndef __getWindInfo(wxData):\n\tdirection , speed , gust = '' , '' , ''\n\tvariable = []\n\tglobal curUnits\n\tif wxData:\n\t\titem = copy(wxData[0])\n\t\tfor rep in ['(E)']: item = item.replace(rep , '')\n\t\titem = item.replace('O' , '0')\n\t\t#09010KT , 09010G15KT\n\t\tif item.endswith('KT') \\\n\t\tor item.endswith('KTS') \\\n\t\tor item.endswith('MPS') \\\n\t\tor item.endswith('KMH') \\\n\t\tor ((len(item) == 5 or (len(item) >= 8 and item.find('G') != -1) and item.find('/') == -1) and (item[:5].isdigit() or (item.startswith('VRB') and item[3:5].isdigit()))):\n\t\t\t#In order of frequency\n\t\t\tif item.endswith('KT'): item = item.replace('KT' , '')\n\t\t\telif item.endswith('KTS'): item = item.replace('KTS' , '')\n\t\t\telif item.endswith('MPS'):\n\t\t\t\tcurUnits['Wind-Speed'] = 'm/s'\n\t\t\t\titem = item.replace('MPS' , '')\n\t\t\telif item.endswith('KMH'):\n\t\t\t\tcurUnits['Wind-Speed'] = 'km/h'\n\t\t\t\titem = item.replace('KMH' , '')\n\t\t\tdirection = item[:3]\n\t\t\tif item.find('G') != -1:\n\t\t\t\tgust = item[item.find('G')+1:]\n\t\t\t\tspeed = item[3:item.find('G')]\n\t\t\telse: speed = item[3:]\n\t\t\twxData.pop(0)\n\t\t#elif len(item) > 5 and item[3] == '/' and item[:3].isdigit() and item[4:6].isdigit():\n\t\t\t#direction = item[:3]\n\t\t\t#if item.find('G') != -1:\n\t\t\t\t#print('Found second G: {0}'.format(item))\n\t\t\t\t#gIndex = item.find('G')\n\t\t\t\t#gust = item[gIndex+1:gIndex+3]\n\t\t\t\t#speed = item[4:item.find('G')]\n\t\t\t#else:\n\t\t\t\t#speed = item[4:]\n\t\t\t#wxData.pop(0)\n\t#Separated Gust\n\tif wxData and 1 < len(wxData[0]) < 4 and wxData[0][0] == 'G' and wxData[0][1:].isdigit():\n\t\tgust = wxData.pop(0)[1:]\n\t#Variable Wind Direction\n\tif wxData and len(wxData[0]) == 7 and wxData[0][:3].isdigit() and wxData[0][3] == 'V' and wxData[0][4:].isdigit():\n\t\tvariable = wxData.pop(0).split('V')\n\treturn wxData , direction , speed , gust , variable\n\n#Visibility\ndef __getVisibility(wxData):\n\tvisibility = ''\n\tglobal curUnits\n\tif wxData:\n\t\titem = copy(wxData[0])\n\t\t#Vis reported in statue miles\n\t\tif item.endswith('SM'): #10SM\n\t\t\tif item == 'P6SM': visibility = 'P6'\n\t\t\telif item == 'M1/4SM': visibility = 'M1/4'\n\t\t\telif item.find('/') == -1: visibility = str(int(item[:item.find('SM')])) #str(int()) fixes 01SM\n\t\t\telse: visibility = item[:item.find('SM')] #1/2SM\n\t\t\twxData.pop(0)\n\t\t\tcurUnits['Visibility'] = 'sm'\n\t\t#Vis reported in meters\n\t\telif len(item) == 4 and item.isdigit():\n\t\t\tvisibility = wxData.pop(0)\n\t\t\tcurUnits['Visibility'] = 'm'\n\t\telif 5 <= len(item) <= 7 and item[:4].isdigit() and (item[4] in ['M','N','S','E','W'] or item[4:] == 'NDV'):\n\t\t\tvisibility = wxData.pop(0)[:4]\n\t\t\tcurUnits['Visibility'] = 'm'\n\t\telif len(item) == 5 and item[1:5].isdigit() and item[0] in ['M','P','B']:\n\t\t\tvisibility = wxData.pop(0)[1:5]\n\t\t\tcurUnits['Visibility'] = 'm'\n\t\telif item.endswith('KM') and item[:item.find('KM')].isdigit():\n\t\t\tvisibility = item[:item.find('KM')] + '000'\n\t\t\twxData.pop(0)\n\t\t\tcurUnits['Visibility'] = 'm'\n\t\t#Vis statute miles but split\n\t\telif len(wxData) > 1 and wxData[1].endswith('SM') and wxData[1].find('/') != -1 and item.isdigit(): #2 1/2SM\n\t\t\tvis1 = wxData.pop(0) #2\n\t\t\tvis2 = wxData.pop(0).replace('SM','') #1/2\n\t\t\tvisibility = str(int(vis1)*int(vis2[2])+int(vis2[0]))+vis2[1:] #5/2\n\t\t\tcurUnits['Visibility'] = 'sm'\n\treturn wxData , visibility\n\n#TAF line report type and start/end times\ndef __getTypeAndTimes(wxData):\n\treportType , startTime , endTime = 'BASE' , '' , ''\n\t#TEMPO, BECMG, INTER\n\tif wxData and wxData[0] in ['TEMPO','BECMG','INTER']: reportType = wxData.pop(0)\n\t#PROB[30,40]\n\telif wxData and len(wxData[0]) == 6 and wxData[0].startswith('PROB'): reportType = wxData.pop(0)\n\t#1200/1306\n\tif wxData and len(wxData[0]) == 9 and wxData[0][4] == '/' and wxData[0][:4].isdigit() and wxData[0][5:].isdigit():\n\t\ttimes = wxData.pop(0).split('/')\n\t\tstartTime , endTime = times[0] , times[1]\n\t#FM120000\n\telif wxData and len(wxData[0]) > 7 and wxData[0].startswith('FM'):\n\t\treportType = 'FROM'\n\t\tif wxData[0].find('/') != -1 and wxData[0][2:].split('/')[0].isdigit() and wxData[0][2:].split('/')[1].isdigit():\n\t\t\ttSplit = wxData.pop(0)[2:].split('/')\n\t\t\tstartTime = tSplit[0]\n\t\t\tendTime = tSplit[1]\n\t\telif wxData[0][2:8].isdigit(): startTime = wxData.pop(0)[2:6]\n\t\t#TL120600\n\t\tif wxData and len(wxData[0]) > 7 and wxData[0].startswith('TL') and wxData[0][2:8].isdigit(): endTime = wxData.pop(0)[2:6]\n\treturn wxData , reportType , startTime , endTime\n\n#Fix rare cloud layer issues\ndef sanitizeCloud(cloud):\n\tif len(cloud) < 4: return cloud\n\tif not cloud[3].isdigit() and cloud[3] != '/':\n\t\tif cloud[3] == 'O': cloud = cloud[:3] + '0' + cloud[4:] #Bad \"O\": FEWO03 -> FEW003\n\t\telse: #Move modifiers to end: BKNC015 -> BKN015C\n\t\t\tcloud = cloud[:3] + cloud[4:] + cloud[3]\n\treturn cloud\n\n#Transforms a cloud string into a list of strings: [Type , Height (, Optional Modifier)]\n#Returns cloud string list\ndef splitCloud(cloud, beginsWithVV):\n\tsplitCloud = []\n\tcloud = sanitizeCloud(cloud)\n\tif beginsWithVV:\n\t\tsplitCloud.append(cloud[:2])\n\t\tcloud = cloud[2:]\n\twhile len(cloud) >= 3:\n\t\tsplitCloud.append(cloud[:3])\n\t\tcloud = cloud[3:]\n\tif cloud: splitCloud.append(cloud)\n\tif len(splitCloud) == 1: splitCloud.append('')\n\treturn splitCloud\n\n#Clouds\ndef __getClouds(wxData):\n\tclouds = []\n\tfor i in reversed(range(len(wxData))):\n\t\tif wxData[i][:3] in cloudList:\n\t\t\tclouds.append(splitCloud(wxData.pop(i) , False))\n\t\telif wxData[i][:2] == 'VV':\n\t\t\tclouds.append(splitCloud(wxData.pop(i) , True))\n\treturn wxData , sorted(clouds , key=lambda pair: (pair[1],pair[0]))\n\n#Returns int based on current flight rules from parsed METAR data\n#0=VFR , 1=MVFR , 2=IFR , 3=LIFR\n#Note: Common practice is to report IFR if visibility unavailable\ndef getFlightRules(vis , splitCloud):\n\t#Parse visibility\n\tif vis == '': return 2\n\telif vis == 'P6': vis = 10\n\telif vis.find('/') != -1:\n\t\tif vis[0] == 'M': vis = 0\n\t\telse: vis = int(vis.split('/')[0]) / int(vis.split('/')[1])\n\telif len(vis) == 4 and vis.isdigit(): vis = int(vis) * 0.000621371 #Convert meters to miles\n\telse: vis = int(vis)\n\t#Parse ceiling\n\tif splitCloud: cld = int(splitCloud[1])\n\telse: cld = 99\n\t#Determine flight rules\n\tif (vis <= 5) or (cld <= 30):\n\t\tif (vis < 3) or (cld < 10):\n\t\t\tif (vis < 1) or (cld < 5):\n\t\t\t\treturn 3 #LIFR\n\t\t\treturn 2 #IFR\n\t\treturn 1 #MVFR\n\treturn 0 #VFR\n\n#Returns list of ceiling layer from Cloud-List or None if none found\n#Only 'Broken', 'Overcast', and 'Vertical Visibility' are considdered ceilings\n#Prevents errors due to lack of cloud information (eg. '' or 'FEW///')\ndef getCeiling(clouds):\n\tfor cloud in clouds:\n\t\tif len(cloud) > 1 and cloud[1].isdigit() and cloud[0] in ['OVC','BKN','VV']:\n\t\t\treturn cloud\n\treturn None\n\ndef parseRemarks(rmk):\n\trmkData = {}\n\trmk = rmk.split(' ')\n\tfor item in rmk:\n\t\tif len(item) in [9,5] and item[0] == 'T' and item[1:].isdigit():\n\t\t\tif item[1] == '1': rmkData['Temp-Decimal'] = '-' + item[2].replace('0','') + item[3] + '.' + item[4]\n\t\t\telif item[1] == '0': rmkData['Temp-Decimal'] = item[2].replace('0','') + item[3] + '.' + item[4]\n\t\t\tif len(item) == 9:\n\t\t\t\tif item[5] == '1': rmkData['Dew-Decimal'] = '-' + item[6].replace('0','') + item[7] + '.' + item[8]\n\t\t\t\telif item[5] == '0': rmkData['Dew-Decimal'] = item[6].replace('0','') + item[7] + '.' + item[8]\n\treturn rmkData\n\n#Returns True if the station uses the North American format, False if the International format\ndef usesNAFormat(station):\n\tif station[0] in RegionsUsingUSParser: return True\n\telif station[0] in RegionsUsingInternationalParser: return False\n\telif station[:2] in MStationsUsingUSParser: return True\n\telif station[:2] in MStationsUsingInternationalParser: return False\n\n####################################################################################################################################\n##--METAR Functions\n\n#Get METAR report for 'station' from www.aviationweather.gov\n#Returns METAR report string\n#Else returns error int\n#0=Bad connection , 1=Station DNE/Server Error\n#getMETAR pulls from the ADDS API and is 3x faster than getMETAR2\ndef getMETAR(station):\n\ttry:\n\t\tif sys.version_info[0] == 2:\n\t\t\tresponse = urllib2.urlopen(requestURL.format('metar' , station))\n\t\t\txml = response.read()\n\t\telif sys.version_info[0] == 3:\n\t\t\tresponse = urlopen(requestURL.format('metar' , station))\n\t\t\txml = response.read().decode('utf-8')\n\t\tinitDictString = json.dumps(parse(xml))\n\t\tfor word in ['response' , 'data' , 'METAR' , station]:\n\t\t\tif initDictString.find(word) == -1: return 1\n\t\tretDict = json.loads(initDictString)['response']['data']['METAR']\n\t\tif type(retDict) == dict: return retDict['raw_text']\n\t\telif type(retDict) == list and len(retDict) >= 1: return retDict[0]['raw_text']\n\t\telse: return 1\n\texcept:\n\t\treturn 0\n\n#getMETAR2 scrapes the report from html\ndef getMETAR2(station):\n\ttry:\n\t\turl = 'http://www.aviationweather.gov/metar/data?ids='+station+'&format=raw&date=0&hours=0'\n\t\tif sys.version_info[0] == 2:\n\t\t\tresponse = urllib2.urlopen(url)\n\t\t\thtml = response.read()\n\t\telif sys.version_info[0] == 3:\n\t\t\tresponse = urlopen(url)\n\t\t\thtml = response.read().decode('utf-8')\n\t\tif html.find(station+'<') != -1: return 1 #Station does not exist/Database lookup error\n\t\treportStart = html.find(''+station+' ')+6 #Report begins with station iden\n\t\treportEnd = html[reportStart:].find('<') #Report ends with html bracket\n\t\treturn html[reportStart:reportStart+reportEnd].replace('\\n ','')\n\texcept:\n\t\treturn 0\n\n#Returns a dictionary of parsed METAR data\n#Keys: Station, Time, Wind-Direction, Wind-Speed, Wind-Gust, Wind-Variable-Dir, Visibility, Runway-Vis-List, Altimeter, Temperature, Dewpoint, Cloud-List, Other-List, Remarks, Raw-Report, Units\n#Units is dict of identified units of measurement for each field\ndef parseMETAR(txt):\n\tif len(txt) < 2: return\n\tif usesNAFormat(txt[:2]): return parseUSMETAR(txt)\n\telse: return parseInternationalMETAR(txt)\n\ndef parseUSMETAR(txt):\n\tglobal curUnits\n\tcurUnits = copy(naUnits)\n\tretWX = {'Raw-Report':txt}\n\ttxt = __sanitizeFirstPass(txt)\n\twxData , retWX['Remarks'] = __getRemarks(txt)\n\twxData , retWX['Runway-Vis-List'] , notUsed = __sanitize(wxData)\n\twxData , retWX['Station'] , retWX['Time'] = __getStationAndTime(wxData)\n\twxData , retWX['Cloud-List'] = __getClouds(wxData)\n\twxData , retWX['Wind-Direction'] , retWX['Wind-Speed'] , retWX['Wind-Gust'] , retWX['Wind-Variable-Dir'] = __getWindInfo(wxData)\n\twxData , retWX['Altimeter'] = __getAltimeterUS(wxData)\n\twxData , retWX['Visibility'] = __getVisibility(wxData)\n\tretWX['Other-List'] , retWX['Temperature'] , retWX['Dewpoint'] = __getTempAndDewpoint(wxData)\n\tretWX['Units'] = curUnits\n\tretWX['Flight-Rules'] = flightRules[getFlightRules(retWX['Visibility'] , getCeiling(retWX['Cloud-List']))]\n\tretWX['Remarks-Info'] = parseRemarks(retWX['Remarks'])\n\treturn retWX\n\ndef parseInternationalMETAR(txt):\n\tglobal curUnits\n\tcurUnits = copy(inUnits)\n\tretWX = {'Raw-Report':txt}\n\ttxt = __sanitizeFirstPass(txt)\n\twxData , retWX['Remarks'] = __getRemarks(txt)\n\twxData , retWX['Runway-Vis-List'] , notUsed = __sanitize(wxData)\n\twxData , retWX['Station'] , retWX['Time'] = __getStationAndTime(wxData)\n\tif 'CAVOK' not in wxData: wxData , retWX['Cloud-List'] = __getClouds(wxData)\n\twxData , retWX['Wind-Direction'] , retWX['Wind-Speed'] , retWX['Wind-Gust'] , retWX['Wind-Variable-Dir'] = __getWindInfo(wxData)\n\twxData , retWX['Altimeter'] = __getAltimeterInternational(wxData)\n\tif 'CAVOK' in wxData:\n\t\tretWX['Visibility'] = '9999'\n\t\tretWX['Cloud-List'] = []\n\t\twxData.pop(wxData.index('CAVOK'))\n\telse:\n\t\twxData , retWX['Visibility'] = __getVisibility(wxData)\n\twxData , retWX['Temperature'] , retWX['Dewpoint'] = __getTempAndDewpoint(wxData)\n\tretWX['Other-List'] = wxData #Other weather\n\tretWX['Units'] = curUnits\n\tretWX['Flight-Rules'] = flightRules[getFlightRules(retWX['Visibility'] , getCeiling(retWX['Cloud-List']))]\n\tretWX['Remarks-Info'] = parseRemarks(retWX['Remarks'])\n\treturn retWX\n\n####################################################################################################################################\n##--TAF Functions\n\n#Get TAF report for 'station' from www.aviationweather.gov\n#Returns TAF report string\n#Else returns error int\n#0=Bad Connection/Unknown Error , 1=Station DNE/Server Error , 2=Could Not Find Report Start\n#getTAF pulls from the ADDS API and is 3x faster than getTAF2\ndef getTAF(station):\n\ttry:\n\t\tif sys.version_info[0] == 2:\n\t\t\tresponse = urllib2.urlopen(requestURL.format('taf' , station))\n\t\t\txml = response.read()\n\t\telif sys.version_info[0] == 3:\n\t\t\tresponse = urlopen(requestURL.format('taf' , station))\n\t\t\txml = response.read().decode('utf-8')\n\t\tinitDictString = json.dumps(parse(xml))\n\t\tfor word in ['response' , 'data' , 'TAF' , station]:\n\t\t\tif initDictString.find(word) == -1: return 1\n\t\tretDict = json.loads(initDictString)['response']['data']['TAF']\n\t\tif type(retDict) == dict: return retDict['raw_text']\n\t\telif type(retDict) == list and len(retDict) >= 1: return retDict[0]['raw_text']\n\t\telse: return 1\n\texcept:\n\t\treturn 0\n\t\t\n#getMETAR2 scrapes the report from html\ndef getTAF2(station):\n\ttry:\n\t\turl = 'http://www.aviationweather.gov/taf/data?ids=' + station + '&format=raw&submit=Get+TAF+data'\n\t\tif sys.version_info[0] == 2:\n\t\t\tresponse = urllib2.urlopen(url)\n\t\t\thtml = response.read()\n\t\telif sys.version_info[0] == 3:\n\t\t\tresponse = urlopen(url)\n\t\t\thtml = response.read().decode('utf-8')\n\t\tif html.find(station+'<') != -1: return 1 #Station does not exist/Database lookup error\n\t\treportStart = html.find('TAF ')+6 #Standard report begins with 'TAF'\n\t\tif reportStart == 5: reportStart = html.find(''+station+' ')+6 #US report begins with station iden\n\t\tif reportStart == 5: return 2 #Beginning of report is non-standard/skewed\n\t\treportEnd = html[reportStart:].find('') #Report ends with html bracket\n\t\treturn html[reportStart:reportStart+reportEnd].replace('\\n ','')\n\texcept:\n\t\treturn 0\n\n#Returns a dictionary of parsed TAF data\n#'delim' is the divider between forecast lines. Ex: aviationweather.gov uses '
  '\n#Keys: Station, Time, Forecast, Remarks, Min-Temp, Max-Temp, Raw-Report, Units\n#Oceania stations also have the following keys: Temp-List, Alt-List\n#Forecast is list of report dicts in order of time with the following keys:\n#Type , Start-Time, End-Time, Flight-Rules, Wind-Direction, Wind-Speed, Wind-Gust, Wind-Shear, Visibility, Altimeter, Cloud-List, Icing-List, Turb-List, Other-List, Probability, Raw-Line\n#Units is dict of identified units of measurement for each field\ndef parseTAF(txt , delim='
  '):\n\tretWX = {}\n\tretWX['Raw-Report'] = txt\n\twhile len(txt) > 3 and txt[:4] in ['TAF ' , 'AMD ' , 'COR ']: txt = txt[4:]\n\tnotUsed , retWX['Station'] , retWX['Time'] = __getStationAndTime(txt[:20].split(' '))\n\ttxt = txt.replace(retWX['Station'] , '')\n\ttxt = txt.replace(retWX['Time'] , '')\n\tglobal curUnits\n\tif usesNAFormat(retWX['Station']):\n\t\tisInternational = False\n\t\tcurUnits = copy(naUnits)\n\telse:\n\t\tisInternational = True\n\t\tcurUnits = copy(inUnits)\n\tretWX['Remarks'] = ''\n\tparsedLines = []\n\tprob = ''\n\tlines = txt.strip(' ').split(delim)\n\twhile len(lines) > 0:\n\t\tline = lines[0].strip(' ')\n\t\tline = __sanitizeLine(line)\n\t\t#Remove Remarks from line\n\t\tindex = findFirstInList(line , tafRMKStarts)\n\t\tif index != -1:\n\t\t\tretWX['Remarks'] = line[index:]\n\t\t\tline = line[:index].strip(' ')\n\t\t#Separate new lines fixed by sanitizeLine\n\t\tindex = findFirstInList(line , tafNewLineStarts)\n\t\tif index != -1:\n\t\t\tlines.insert(1 , line[index+1:])\n\t\t\tline = line[:index]\n\t\t#Add empty PROB to next line data\n\t\trawLine = line\n\t\tif len(line) == 6 and line.startswith('PROB'):\n\t\t\tprob = line\n\t\t\tline = ''\n\t\tif line:\n\t\t\tif isInternational: parsedLine = parseInternationalTAFLine(line)\n\t\t\telse: parsedLine = parseUSTAFLine(line)\n\t\t\tparsedLine['Probability'] = prob\n\t\t\tparsedLine['Raw-Line'] = rawLine\n\t\t\tprob = ''\n\t\t\tparsedLines.append(parsedLine)\n\t\tlines.pop(0)\n\tif parsedLines:\n\t\tparsedLines[len(parsedLines)-1]['Other-List'] , retWX['Max-Temp'] , retWX['Min-Temp'] = getTempMinAndMax(parsedLines[len(parsedLines)-1]['Other-List'])\n\t\tif not (retWX['Max-Temp'] or retWX['Min-Temp']): parsedLines[0]['Other-List'] , retWX['Max-Temp'] , retWX['Min-Temp'] = getTempMinAndMax(parsedLines[0]['Other-List'])\n\t\tparsedLines = findMissingTAFTimes(parsedLines)\n\t\tparsedLines = getTAFFlightRules(parsedLines)\n\telse:\n\t\tretWX['Min-Temp'] = ['','']\n\t\tretWX['Max-Temp'] = ['','']\n\tif retWX['Station'][0] == 'A': parsedLines[len(parsedLines)-1]['Other-List'] , retWX['Alt-List'] , retWX['Temp-List'] = getOceaTandQ(parsedLines[len(parsedLines)-1]['Other-List'])\n\tretWX['Forecast'] = parsedLines\n\tretWX['Units'] = curUnits\n\treturn retWX\n\ndef parseUSTAFLine(txt):\n\tglobal curUnits\n\tcurUnits = naUnits\n\tretWX = {}\n\twxData = txt.split(' ')\n\twxData , notUsed , retWX['Wind-Shear'] = __sanitize(wxData , removeCLRandSKC=False)\n\twxData , retWX['Type'] , retWX['Start-Time'] , retWX['End-Time'] = __getTypeAndTimes(wxData)\n\twxData , retWX['Wind-Direction'] , retWX['Wind-Speed'] , retWX['Wind-Gust'] , notUsed = __getWindInfo(wxData)\n\twxData , retWX['Visibility'] = __getVisibility(wxData)\n\twxData , retWX['Cloud-List'] = __getClouds(wxData)\n\tretWX['Other-List'] , retWX['Altimeter'] , retWX['Icing-List'] , retWX['Turb-List'] = __getTAFAltIceTurb(wxData)\n\treturn retWX\n\t\ndef parseInternationalTAFLine(txt):\n\tglobal curUnits\n\tcurUnits = inUnits\n\tretWX = {}\n\twxData = txt.split(' ')\n\twxData , notUsed , retWX['Wind-Shear'] = __sanitize(wxData , removeCLRandSKC=False)\n\twxData , retWX['Type'] , retWX['Start-Time'] , retWX['End-Time'] = __getTypeAndTimes(wxData)\n\twxData , retWX['Wind-Direction'] , retWX['Wind-Speed'] , retWX['Wind-Gust'] , notUsed = __getWindInfo(wxData)\n\tif 'CAVOK' in wxData:\n\t\tretWX['Visibility'] = '9999'\n\t\tretWX['Cloud-List'] = []\n\t\twxData.pop(wxData.index('CAVOK'))\n\telse:\n\t\twxData , retWX['Visibility'] = __getVisibility(wxData)\n\t\twxData , retWX['Cloud-List'] = __getClouds(wxData)\n\tretWX['Other-List'] , retWX['Altimeter'] , retWX['Icing-List'] , retWX['Turb-List'] = __getTAFAltIceTurb(wxData)\n\treturn retWX\n\n#Fixes common mistakes with 'new line' signifiers so that they can be recognized\nlineFixes = {'TEMP0':'TEMPO','TEMP O':'TEMPO','TMPO':'TEMPO','TE MPO':'TEMPO','TEMP ':'TEMPO ',' EMPO':' TEMPO','TEMO':'TEMPO','TMPO':'TEMPO','T EMPO':'TEMPO','BECM G':'BECMG','BEMCG':'BECMG','BE CMG':'BECMG','BEMG':'BECMG',' BEC ':' BECMG ','BCEMG':'BECMG','B ECMG':'BECMG'}\ndef __sanitizeLine(txt):\n\tfor key in lineFixes:\n\t\tindex = txt.find(key)\n\t\tif index > -1: txt = txt[:index] + lineFixes[key] + txt[index+len(key):]\n\t#Fix when space is missing following new line signifiers\n\tfor item in ['BECMG' , 'TEMPO']:\n\t\tif txt.find(item) != -1 and txt.find(item + ' ') == -1:\n\t\t\tinsertIndex = txt.find(item)+len(item)\n\t\t\ttxt = txt[:insertIndex] + ' ' + txt[insertIndex:]\n\treturn txt\n\n#Pull out Max temp at time and Min temp at time items\ndef getTempMinAndMax(otherList):\n\ttempMax , tempMin = '' , ''\n\tfor i in reversed(range(len(otherList))):\n\t\titem = otherList[i]\n\t\tif len(item) > 6 and item[0] == 'T' and item.find('/') != -1:\n\t\t\t#TX12/1316Z\n\t\t\tif item[1] == 'X':\n\t\t\t\ttempMax = item\n\t\t\t\totherList.pop(i)\n\t\t\t#TNM03/1404Z\n\t\t\telif item[1] == 'N':\n\t\t\t\ttempMin = item\n\t\t\t\totherList.pop(i)\n\t\t\t#TM03/1404Z T12/1316Z -> Will fix TN/TX\n\t\t\telif item[1] == 'M' or item[1].isdigit():\n\t\t\t\tif tempMin:\n\t\t\t\t\tif int(tempMin[2:tempMin.find('/')].replace('M','-')) > int(item[1:item.find('/')].replace('M','-')):\n\t\t\t\t\t\ttempMax = 'TX' + tempMin[2:]\n\t\t\t\t\t\ttempMin = 'TN' + item[1:]\n\t\t\t\t\telse: tempMax = 'TX' + item[1:]\n\t\t\t\telse: tempMin = 'TN' + item[1:]\n\t\t\t\totherList.pop(i)\n\treturn otherList , tempMax , tempMin\n\n#Returns a list of items removed from a given list that are all digits from 'fromIndex' until hitting a non-digit item\ndef getDigitList(aList , fromIndex):\n\tretList = []\n\taList.pop(fromIndex)\n\twhile len(aList) > fromIndex and aList[fromIndex].isdigit(): retList.append(aList.pop(fromIndex))\n\treturn aList , retList\n\n#Get Temp and Alt list for Oceania TAFs\ndef getOceaTandQ(otherList):\n\ttList , qList = [] , []\n\tif 'T' in otherList: otherList , tList = getDigitList(otherList , otherList.index('T'))\n\tif 'Q' in otherList: otherList , qList = getDigitList(otherList , otherList.index('Q'))\n\treturn otherList , tList , qList\n\n#Fix any missing time issues (except for error/empty lines)\ndef findMissingTAFTimes(tafLines):\n\tlastFMLine = 0\n\tfor i , line in enumerate(tafLines):\n\t\tif line['End-Time'] == '' and isNotTempoOrProb(line['Type']):\n\t\t\tlastFMLine = i\n\t\t\tif i < len(tafLines)-1:\n\t\t\t\tfor report in tafLines[i+1:]:\n\t\t\t\t\tif isNotTempoOrProb(report['Type']): #Ignore TEMPO and PROB\n\t\t\t\t\t\tline['End-Time'] = report['Start-Time']\n\t\t\t\t\t\tbreak\n\tif lastFMLine > 0: tafLines[lastFMLine]['End-Time'] = tafLines[0]['End-Time'] #Special case for final forcast\n\treturn tafLines\n\n#Get flight rules by looking for missing data in prior reports\ndef getTAFFlightRules(tafLines):\n\tfor i , line in enumerate(tafLines):\n\t\ttempVis , tempCloud = line['Visibility'] , line['Cloud-List']\n\t\tfor report in reversed(tafLines[:i]):\n\t\t\tif isNotTempoOrProb(report['Type']): #Ignore TEMPO and PROB\n\t\t\t\tif tempVis == '': tempVis = report['Visibility']\n\t\t\t\tif 'SKC' in report['Other-List'] or 'CLR' in report['Other-List']: tempCloud = 'tempClear'\n\t\t\t\telif tempCloud == []: tempCloud = report['Cloud-List']\n\t\t\t\tif tempVis != '' and tempCloud != []: break\n\t\tif tempCloud == 'tempClear': tempCloud = []\n\t\tline['Flight-Rules'] = flightRules[getFlightRules(tempVis , getCeiling(tempCloud))]\n\t\t#print(\"Using \" + str(tempVis) + ' and ' + str(getCeiling(tempCloud)) + ' gives ' + str(line['Flight-Rules']))\n\treturn tafLines\n\ndef isNotTempoOrProb(reportType):\n\treturn reportType != 'TEMPO' and not (len(reportType) == 6 and reportType.startswith('PROB'))\n\n####################################################################################################################################\n##--Translation Functions\n\n#Format wind elements into a readable sentence\n#Returns the translation string\n#Ex: NNE-020 (variable 010 to 040) at 14kt gusting to 20kt\ndef translateWind(wDir , wSpd , wGst , wVar=[] , unit='kt'):\n\tret = ''\n\t#Wind Direction - Cheat Sheet\n\t#(360) -- 011/012 -- 033/034 -- (045) -- 056/057 -- 078/079 -- (090)\n\t#(090) -- 101/102 -- 123/124 -- (135) -- 146/147 -- 168/169 -- (180)\n\t#(180) -- 191/192 -- 213/214 -- (225) -- 236/237 -- 258/259 -- (270)\n\t#(270) -- 281/282 -- 303/304 -- (315) -- 326/327 -- 348/349 -- (360)\n\tif wDir == '000': ret += 'Calm'\n\telif wDir.isdigit():\n\t\twDir = int(wDir)\n\t\tif 304 <= wDir <= 360 or 0 <= wDir <= 56:\n\t\t\tret += 'N'\n\t\t\tif 304 <= wDir <= 348:\n\t\t\t\tif 327 <= wDir <= 348: ret += 'N'\n\t\t\t\tret += 'W'\n\t\t\telif 11 <= wDir <= 56:\n\t\t\t\tif 11 <= wDir <= 33: ret += 'N'\n\t\t\t\tret += 'E'\n\t\telif 124 <= wDir <= 236:\n\t\t\tret += 'S'\n\t\t\tif 124 <= wDir <= 168:\n\t\t\t\tif 147 <= wDir <= 168: ret += 'S'\n\t\t\t\tret += 'E'\n\t\telif 57 <= wDir <= 123:\n\t\t\tret += 'E'\n\t\t\tif 57 <= wDir <= 78: ret += 'NE'\n\t\t\telif 102 <= wDir <= 123: 'SE'\n\t\telif 237 <= wDir <= 303:\n\t\t\tret += 'W'\n\t\t\tif 237 <= wDir <= 258: ret += 'SW'\n\t\t\telif 282 <= wDir <= 303: ret += 'NW'\n\t\tret += '-' + str(wDir)\n\telif wDir == 'VRB': ret += 'Variable'\n\tif wVar:\n\t\tret += ' (variable ' + wVar[0] + ' to ' + wVar[1] + ')'\n\tif wSpd and wSpd != '00':\n\t\tret += ' at ' + wSpd + unit\n\tif wGst:\n\t\tret += ' gusting to ' + wGst + unit\n\treturn ret\n\n#Formats a visibility element into a string with both km and sm values\n#Ex: 8km ( 5sm )\ndef translateVisibility(vis , unit='m'):\n\tif vis == 'P6': return 'Greater than 6sm ( >9999m )'\n\tif vis == 'M1/4': return 'Less than .25sm ( <0400m )'\n\tif vis.find('/') != -1: vis = float(vis[:vis.find('/')]) / int(vis[vis.find('/')+1:])\n\ttry: float(vis)\n\texcept ValueError: return ''\n\tif unit == 'm':\n\t\tconverted = float(vis) * 0.000621371\n\t\tconverted = str(round(converted , 1)).replace('.0' , '') + 'sm'\n\t\tvis = str(round(int(vis)/1000.0 , 1)).replace('.0' , '')\n\t\tunit = 'km'\n\telif unit == 'sm':\n\t\tconverted = float(vis) / 0.621371\n\t\tconverted = str(round(converted , 1)).replace('.0' , '') + 'km'\n\t\tvis = str(vis).replace('.0' , '')\n\telse: return ''\n\treturn vis + unit + ' (' + converted + ')'\n\n#Formats a temperature element into a string with both C and F values\n#Used for both Temp and Dew\n#Ex: 34C (93F)\ndef translateTemp(temp , unit='C'):\n\ttemp = temp.replace('M','-')\n\ttry: int(temp)\n\texcept ValueError: return ''\n\tunit = unit.upper()\n\tif unit == 'C':\n\t\tconverted = int(temp) * 1.8 + 32\n\t\tconverted = str(int(round(converted))) + 'F'\n\telif unit == 'F':\n\t\tconverted = (int(temp) - 32) / 1.8\n\t\tconverted = str(int(round(converted))) + 'C'\n\telse: return ''\n\treturn temp + unit + ' (' + converted + ')'\n\n#Formats the altimter element into a string with hPa and inHg values\n#Ex: 30.11 inHg (10.20 hPa)\ndef translateAltimeter(alt , unit='hPa'):\n\tif alt.isdigit(): 1\n\telif not alt.isdigit() and len(alt) == 5 and alt[1:].isdigit(): alt = alt[1:]\n\telse: return ''\n\tif unit == 'hPa':\n\t\tconverted = float(alt) / 33.8638866667\n\t\tconverted = str(round(converted , 2)) + 'inHg'\n\telif unit == 'inHg':\n\t\talt = alt[:2] + '.' + alt[2:]\n\t\tconverted = float(alt) * 33.8638866667\n\t\tconverted = str(int(round(converted))) + 'hPa'\n\telse: return ''\n\treturn alt + unit + ' (' + converted + ')'\n\n#Format cloud list into a readable sentence\n#Returns the translation string\n#Ex: Scattered clouds at 1100ft, Broken layer at 2200ft (Cumulonimbus), Overcast layer at 3600ft - Reported AGL\ncloudTranslationStrings = {\n\t'OVC':'Overcast layer at {0}{1}',\n\t'BKN':'Broken layer at {0}{1}',\n\t'SCT':'Scattered clouds at {0}{1}',\n\t'FEW':'Few clouds at {0}{1}',\n\t'VV':'Vertical visibility up to {0}{1}',\n\t'CLR':'Sky Clear',\n\t'SKC':'Sky Clear',\n\t'AC':'Altocumulus',\n\t'ACC':'Altocumulus Castellanus',\n\t'AS':'Altostratus',\n\t'CB':'Cumulonimbus',\n\t'CC':'Cirrocumulus',\n\t'CI':'Cirrus',\n\t'CS':'Cirrostratus',\n\t'CU':'Cumulus',\n\t'FC':'Fractocumulus',\n\t'FS':'Fractostratus',\n\t'NS':'Nimbostratus',\n 'SC':'Stratocumulus',\n\t'ST':'Stratus',\n\t'TCU':'Towering Cumulus'\n\t}\ndef translateClouds(cloudList , unit='ft'):\n\tretList = []\n\tfor cloud in cloudList:\n\t\tif len(cloud) == 2 and cloud[1].isdigit() and cloud[0] in cloudTranslationStrings: retList.append(cloudTranslationStrings[cloud[0]].format(int(cloud[1])*100 , unit))\n\t\telif len(cloud) == 3 and cloud[1].isdigit() and cloud[0] in cloudTranslationStrings and cloud[2] in cloudTranslationStrings: retList.append((cloudTranslationStrings[cloud[0]]+' ('+cloudTranslationStrings[cloud[2]]+')').format(int(cloud[1])*100 , unit))\n\tif retList:\treturn ', '.join(retList) + ' - Reported AGL'\n\treturn 'Sky clear'\n\n#Translates weather codes into readable strings\n#Returns translated string of variable length\ndef translateWX(wx):\n\twxString = ''\n\tif wx[0] == '+':\n\t\twxString = 'Heavy '\n\t\twx = wx[1:]\n\telif wx[0] == '-':\n\t\twxString = 'Light '\n\t\twx = wx[1:]\n\tif len(wx) not in [2,4,6]: return wx #Return wx if wx is not a code, ex R03/03002V03\n\tfor i in range(len(wx)//2):\n\t\tif wx[:2] in wxReplacements: wxString += wxReplacements[wx[:2]] + ' '\n\t\telse: wxString += wx[:2]\n\t\twx = wx[2:]\n\treturn wxString.strip(' ')\n\n#Translate the list of wx codes (otherList) into a readable sentence\n#Returns the translation string\ndef translateOtherList(wxList):\n\tretList = []\n\tfor item in wxList: retList.append(translateWX(item))\n\treturn ', '.join(retList)\n\n#Translate wind shear into a readable string\n#Ex: Wind shear 2000ft from 140 at 30kt\ndef translateWindShear(shear , unitAlt='ft' , unitWnd='kt'):\n\tif not shear or shear.find('WS') == -1 or shear.find('/') == -1: return ''\n\tshear = shear[2:].split('/')\n\treturn 'Wind shear ' + str(int(shear[0])*100) + unitAlt + ' from ' + shear[1][:3] + ' at ' + shear[1][3:] + unitWnd\n\n#Translate the list of turbulance or icing into a readable sentence\n#Ex: Occasional moderate turbulence in clouds from 3000ft to 14000ft\nturbConditions = {'0':'None','1':'Light turbulence','2':'Occasional moderate turbulence in clear air','3':'Frequent moderate turbulence in clear air','4':'Occasional moderate turbulence in clouds','5':'Frequent moderate turbulence in clouds','6':'Occasional severe turbulence in clear air','7':'Frequent severe turbulence in clear air','8':'Occasional severe turbulence in clouds','9':'Frequent severe turbulence in clouds','X':'Extreme turbulence'}\niceConditions = {'0':'No icing','1':'Light icing','2':'Light icing in clouds','3':'Light icing in precipitation','4':'Moderate icing','5':'Moderate icing in clouds','6':'Moderate icing in precipitation','7':'Severe icing','8':'Severe icing in clouds','9':'Severe icing in precipitation'}\ndef translateTurbIce(aList , unit='ft'):\n\tif not aList: return ''\n\t#Determine turbulance or icing\n\tif aList[0][0] == '5':\n\t\tlayerType = 'Turbulance'\n\t\tconditions = turbConditions\n\telif aList[0][0] == '6':\n\t\tlayerType = 'Icing'\n\t\tconditions = iceConditions\n\telse: return ''\n\t#Create list of split items (type , floor , height)\n\tsplitList = []\n\tfor item in aList:\n\t\tif len(item) == 6: splitList.append([item[1:2],item[2:5],item[5:6]])\n\t#Combine items that cover a layer greater than 9000ft\n\tfor i in reversed(range(len(splitList)-1)):\n\t\tif splitList[i][2] == '9' and splitList[i][0] == splitList[i+1][0] and int(splitList[i+1][1]) == (int(splitList[i][1]) + int(splitList[i][2])*10):\n\t\t\tsplitList[i][2] = str(int(splitList[i][2]) + int(splitList[i+1][2]))\n\t\t\tsplitList.pop(i+1)\n\t#Return joined, formatted string from splitList items\n\treturn ', '.join(['{0} from {1}{3} to {2}{3}'.format(conditions[item[0]] , int(item[1])*100 , int(item[1])*100 + int(item[2])*1000 , unit) for item in splitList])\n\n#Format the Min and Max temp elemets into a readable string\n#Ex: Maximum temperature of 23C (73F) at 18-15:00Z\ndef translateMinMaxTemp(temp , unit='C'):\n\tif not temp or len(temp) < 7: return ''\n\tif temp[:2] == 'TX': tempType = 'Maximum'\n\telif temp[:2] == 'TN': tempType = 'Minimum'\n\telse: return ''\n\ttemp = temp[2:].replace('M' , '-').replace('Z','').split('/')\n\tif len(temp[1]) > 2: temp[1] = temp[1][:2] + '-' + temp[1][2:]\n\treturn tempType + ' temperature of ' + translateTemp(temp[0] , unit) + ' at ' + temp[1] + ':00Z'\n\n#Translate Visibility, Altimeter, Clouds, and Other\ndef translateShared(wxData , units):\n\ttranslations = {}\n\ttranslations['Visibility'] = translateVisibility(wxData['Visibility'] , units['Visibility'])\n\ttranslations['Altimeter'] = translateAltimeter(wxData['Altimeter'] , units['Altimeter'])\n\ttranslations['Clouds'] = translateClouds(wxData['Cloud-List'] , units['Altitude'])\n\ttranslations['Other'] = translateOtherList(wxData['Other-List'])\n\treturn translations\n\n#Translate the results of parseMETAR\n#Keys: Wind, Visibility, Clouds, Temperature, Dewpoint, Altimeter, Other\ndef translateMETAR(wxData):\n\tunits = wxData['Units']\n\ttranslations = translateShared(wxData , units)\n\ttranslations['Wind'] = translateWind(wxData['Wind-Direction'] , wxData['Wind-Speed'] , wxData['Wind-Gust'] , wxData['Wind-Variable-Dir'] , units['Wind-Speed'])\n\ttranslations['Temperature'] = translateTemp(wxData['Temperature'] , units['Temperature'])\n\ttranslations['Dewpoint'] = translateTemp(wxData['Dewpoint'] , units['Temperature'])\n\treturn translations\n\n#Translate the results of parseTAF\n#Keys: Forecast, Min-Temp, Max-Temp\n#Forecast keys: Wind, Visibility, Clouds, Altimeter, Wind-Shear, Turbulance, Icing, Other\ndef translateTAF(wxData):\n\ttranslations = {'Forecast':[]}\n\tunits = wxData['Units']\n\tfor line in wxData['Forecast']:\n\t\ttransLine = translateShared(line , units)\n\t\ttransLine['Wind'] = translateWind(line['Wind-Direction'] , line['Wind-Speed'] , line['Wind-Gust'] , unit=units['Wind-Speed'])\n\t\ttransLine['Wind-Shear'] = translateWindShear(line['Wind-Shear'] , wxData['Units']['Altitude'] , units['Wind-Speed'])\n\t\ttransLine['Turbulance'] = translateTurbIce(line['Turb-List'] , units['Altitude'])\n\t\ttransLine['Icing'] = translateTurbIce(line['Icing-List'] , units['Altitude'])\n\t\ttranslations['Forecast'].append(transLine)\n\ttranslations['Min-Temp'] = translateMinMaxTemp(wxData['Min-Temp'] , units['Temperature'])\n\ttranslations['Max-Temp'] = translateMinMaxTemp(wxData['Max-Temp'] , units['Temperature'])\n\treturn translations\n\ndef createMETARSummary(wxTrans):\n\tsumList = []\n\tif 'Wind' in wxTrans and wxTrans['Wind']: sumList.append('Winds ' + wxTrans['Wind'])\n\tif 'Visibility' in wxTrans and wxTrans['Visibility']: sumList.append('Vis ' + wxTrans['Visibility'][:wxTrans['Visibility'].find(' (')].lower())\n\tif 'Temperature' in wxTrans and wxTrans['Temperature']: sumList.append('Temp ' + wxTrans['Temperature'][:wxTrans['Temperature'].find(' (')])\n\tif 'Dewpoint' in wxTrans and wxTrans['Dewpoint']: sumList.append('Dew ' + wxTrans['Dewpoint'][:wxTrans['Dewpoint'].find(' (')])\n\tif 'Altimeter' in wxTrans and wxTrans['Altimeter']: sumList.append('Alt ' + wxTrans['Altimeter'][:wxTrans['Altimeter'].find(' (')])\n\tif 'Other' in wxTrans and wxTrans['Other']: sumList.append(wxTrans['Other'])\n\tif 'Clouds' in wxTrans and wxTrans['Clouds']: sumList.append(wxTrans['Clouds'].replace(' - Reported AGL' , ''))\n\treturn ', '.join(sumList)\n\ndef createTAFLineSummary(wxTrans):\n\tsumList = []\n\tif 'Wind' in wxTrans and wxTrans['Wind']: sumList.append('Winds ' + wxTrans['Wind'])\n\tif 'Visibility' in wxTrans and wxTrans['Visibility']: sumList.append('Vis ' + wxTrans['Visibility'][:wxTrans['Visibility'].find(' (')].lower())\n\tif 'Altimeter' in wxTrans and wxTrans['Altimeter']: sumList.append('Alt ' + wxTrans['Altimeter'][:wxTrans['Altimeter'].find(' (')])\n\tif 'Other' in wxTrans and wxTrans['Other']: sumList.append(wxTrans['Other'])\n\tif 'Clouds' in wxTrans and wxTrans['Clouds']: sumList.append(wxTrans['Clouds'].replace(' - Reported AGL' , ''))\n\tif 'Wind-Shear' in wxTrans and wxTrans['Wind-Shear']: sumList.append(wxTrans['Wind-Shear'])\n\tif 'Turbulance' in wxTrans and wxTrans['Turbulance']: sumList.append(wxTrans['Turbulance'])\n\tif 'Icing' in wxTrans and wxTrans['Icing']: sumList.append(wxTrans['Icing'])\n\treturn ', '.join(sumList)\n\n####################################################################################################################################\n##--Station data\n\n#Provide basic station info with the keys below\ndbHeaders = ['ICAO','Country','State','City','Name','IATA','Elevation','Latitude','Longitude','Priority']\ndef getInfoForStation(station):\n\tconn = sqlite3.connect(stationDBPath)\n\t#conn.text_factory = str\n\tcurs = conn.cursor()\n\tcurs.execute('SELECT '+','.join(dbHeaders)+' FROM Stations WHERE icao=?' , (station,))\n\trow = curs.fetchone()\n\tret = {}\n\tif row:\n\t\tfor i in range(len(row)): ret[dbHeaders[i]] = row[i]\n\treturn ret\n\n####################################################################################################################################\n##--Example Testing\n#These tests provide example usage for the primary public functions\n\n#Adds timestamp to begining of print statement\n#Returns string of time + logString\ndef timestamp(logString): return strftime('%d %H:%M:%S - ') + logString\n\n#Retrive, parse, and display METAR report\ndef metarTest(station):\n\tret = timestamp(station + '\\n\\n')\n\ttxt = getMETAR(station)\n\t#txt = 'VTSF 230200Z 22006KT 1701V240 9999 FEW020 30/25 Q1013 A2993 INFO C / RWY 19'\n\tif type(txt) == int: \n\t\tif txt: ret += 'Station does not exist/Database lookup error'\n\t\telse: ret += 'http connection error'\n\telse:\n\t\tdata = parseMETAR(txt)\n\t\tfor key in data: ret += '{0} -- {1}\\n'.format(key , data[key])\n\t\tret += 'Flight rules for \"{0}\" and \"{1}\" -- \"{2}\"'.format(data['Visibility'] , getCeiling(data['Cloud-List']) , flightRules[getFlightRules(data['Visibility'] , getCeiling(data['Cloud-List']))])\n\t\ttranslation = translateMETAR(data)\n\t\tret += '\\n\\nTranslation'\n\t\tfor key in translation: ret += '\\n' + key + ': ' + translation[key]\n\t\tret += '\\nSummary: ' + createMETARSummary(translation) + '\\n'\n\t\t#ret += str(getInfoForStation(station))\n\tprint(ret)\n\n#Retrive, parse, and display TAF report\ndef tafTest(station):\n\tret = timestamp(station + '\\n\\n')\n\ttxt = getTAF(station)\n\t#txt = 'TAF OPLA 180345Z 1806/1912 33008KT 4000 HZ NSC TX42/1810Z TN29/1900Z
  PROB30
  TEMPO 1810/1812 32015G25KT 3000 DRDU FM 181500 TL 190100 25003KT 4000 HZ NSC
  TEMPO 1901/1903 30004KT 2500 FU'\n\tif type(txt) == int: \n\t\tif txt: ret += 'Station does not exist/Database lookup error'\n\t\telse: ret += 'http connection error'\n\telse:\n\t\tdelim = '
  '\n\t\tret = timestamp(station + '\\n\\n') + txt + '\\n\\n'\n\t\ttaf = parseTAF(txt , delim)\n\t\t#Print report lines\n\t\tfor line in txt.strip(' ').split(delim): ret += line + '\\n'\n\t\t#Print header data\n\t\tret += '\\n' + taf['Station'] + '\\n' + taf['Time'] + '\\n' + taf['Remarks'] + '\\n' + str(taf['Units']) + '\\n' + str(taf['Min-Temp']) + '\\n' + str(taf['Max-Temp']) + '\\n\\n'\n\t\t#Print Forecasts' start and end times\n\t\tfor line in taf['Forecast']:\n\t\t\tret += line['Start-Time'] + ' - ' + line['End-Time'] + '\\n'\n\t\tret += '\\n'\n\t\t#Print Forecast dicts\n\t\tfor lineDict in taf['Forecast']: ret += str(lineDict) + '\\n\\n'\n\t\t#Print Translation\n\t\ttrans = translateTAF(taf)\n\t\tret += 'Translation\\n\\n' + str(trans) + '\\n\\nSummary:'\n\t\tfor line in trans['Forecast']: ret += '\\n' + createTAFLineSummary(line)\n\tprint(ret)\n\nif __name__ == '__main__':\n\tstation = 'CWER'\n\t#print(getInfoForStation(station))\n\tmetarTest(station)\n\tprint('\\n------------------------------------------\\n')\n\t#tafTest(station)\n","sub_path":"AVWXAPI/avwx.py","file_name":"avwx.py","file_ext":"py","file_size_in_byte":50290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"598574930","text":"from enums.signal import Signal\nfrom indicators import MacdIndicator, RsiIndicator\nfrom strategies import Strategy\nfrom common.exceptions import StrategyException\n\n\nclass MacdRsiStrategy(Strategy):\n\n def __init__(self, macd: MacdIndicator, rsi: RsiIndicator):\n self._macd = macd\n self._rsi = rsi\n \n @property\n def name(self):\n return self.__class__.__name__\n\n def execute(self) -> Signal:\n macds, signals = self._macd.calculate()\n rsis = self._rsi.calculate()\n \n last_macd, last_signal = macds[-1], signals[-1]\n last_rsi = rsis[-1]\n \n if last_macd > last_signal and last_rsi < self._rsi.overbought:\n signal = Signal.BUY\n elif last_macd < last_signal and last_rsi > self._rsi.oversold:\n signal = Signal.SELL\n else:\n signal = Signal.HOLD\n\n return signal\n\n\n\n","sub_path":"strategies/macd_rsi_strategy.py","file_name":"macd_rsi_strategy.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"51655621","text":"# print((50000000000/100)*4000 + 1684)\n# Part 2: var 100 generation ökar 4000. m värdet är 1684\n# # 98 5684\n# # 198 9684\n# # 298 13684\n# # 398 17684\n# # 498 21684\n\n\n\n\n\n\n\n\ninit_state = \"##.......#.######.##..#...#.#.#..#...#..####..#.##...#....#...##..#..#.##.##.###.##.#.......###....#\"\ntest1 = \"...##\"\n\n# print(file_lines[1][0:5])\n# print(file_lines[1][9:10], end=\"\")\n\ndef next_generation(state):\n if type(state) is str:\n pots = {}\n for idx, ch in enumerate(state):\n pots[idx] = ch\n else:\n pots = state\n\n #for key in sorted(pots.keys()):\n #print(key, pots[key])\n\n first_key = None\n last_key = None\n for k in sorted(pots.keys()):\n first_key = k if first_key == None else first_key\n last_key = k if last_key == None or last_key < k else last_key\n\n for a in range(last_key + 1, last_key + 6):\n pots[a] = \".\"\n for b in range(first_key - 1, first_key - 6, -1):\n pots[b] = \".\"\n\n #for key in sorted(pots.keys()):\n # print(key, pots[key])\n\n return_dick = {}\n path = r\"C:\\Users\\Herman\\Desktop\\PROGRAMMERING\\Projekt\\Advent_of_code\\adv12.txt\"\n with open(path) as file:\n file_lines = file.read().splitlines()\n for key in sorted(pots.keys()):\n # print(key)\n part_of_line = \"\"\n is_in_dot = False\n is_in_hash = False\n if (key - 2) in pots and (key + 2) in pots:\n for k2 in range(key - 2, key + 3):\n #print(\"k2\", k2)\n #print(pots[k2])\n part_of_line += pots[k2]\n for line in file_lines:\n #print(key, part_of_line, line[0:5], \" \", end=\"\")\n if line[9:10] == \"#\" and part_of_line == line[0:5]:\n is_in_hash = True\n if line[9:10] == \".\" and part_of_line == line[0:5]:\n is_in_dot = True\n if is_in_hash:\n return_dick[key] = \"#\"\n if is_in_dot:\n return_dick[key] = \".\"\n if not is_in_hash and not is_in_dot:\n return_dick[key] = \".\"\n return return_dick\n\n\n\ng = next_generation(init_state)\nfor d in sorted(g.keys()):\n print(d, g[d])\nfor _ in range(2000 - 1):\n # print(\"generation\",_)\n g = next_generation(g)\n if (_ + 2) % 100 == 0:\n # print(g)\n plant_id_sum = 0\n for ke in sorted(g.keys()):\n if g[ke] == \"#\":\n plant_id_sum += ke\n print(_, plant_id_sum)\n\n# print((50000000000/100)*4000 + 1684)\n# Part 2: var 100 ökning ökar 4000. m värdet är 1684\n# # 98 5684\n# # 198 9684\n# # 298 13684\n# # 398 17684\n# # 498 21684\n\n# print((500/100)\n\n# first_key = None\n# last_key = None\n# for key in sorted(pots.keys()):\n# first_key = first_key or key\n# last_key = last_key or key\n# last_key = key if last_key < key else last_key\n#\n# for a in range(last_key + 1, last_key + 6):\n# pots[a] = \".\"\n# for b in range(first_key - 1, first_key - 6, -1):\n# pots[b] = \".\"\n#\n#\n#\n\n#\n# return_dick = {}\n# for key in sorted(pots.keys()):\n# part_of_line = \"\"\n# is_in = False\n# if (key - 2) in pots and (key + 2) in pots:\n# for k in range(key - 2, key + 3):\n# part_of_line += pots[k]\n# for line in file_lines:\n# #print(key, part_of_line, line[0:5], \" \", end=\"\")\n# if part_of_line == line[0:5]:\n# #print(\"3\",\"match\", end=\"\")\n# is_in = True\n# #print()\n# if is_in:\n# return_dick[key] = \"#\"\n# else:\n# return_dick[key] = \".\"\n# return return_dick\n#\n# gen1 = generationizer(init_state)\n# gen2 = generationizer(gen1)\n#\n# for key2 in sorted(gen2.keys()):\n# print(key2,gen2[key2], end=\"\")\n","sub_path":"adv12-p2.py","file_name":"adv12-p2.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"602903573","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 29 00:26:54 2019\r\n\r\n@author: RUDRAJIT\r\n\"\"\"\r\nfrom PIL import Image\r\n\r\nimg=Image.open('test2.png')\r\nrgb_im=img.convert('RGB')#convert image to rgb value\r\nr,g,b=rgb_im.getpixel((101,1))#row column\r\nprint(r,g,b)\r\n\r\n\r\n","sub_path":"map-area-estimation-technique/Image-tut2.py","file_name":"Image-tut2.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"382556054","text":"#\n# @lc app=leetcode id=445 lang=python3\n#\n# [445] Add Two Numbers II\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n '''\n Accepted\n 1563/1563 cases passed (72 ms)\n Your runtime beats 99.55 % of python3 submissions\n Your memory usage beats 12.96 % of python3 submissions (14.4 MB)\n\n Time complexity : O(max(m, n))\n Space complexity : O(max(m, n))\n '''\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n num1, num2 = [], []\n\n while l1 is not None:\n num1.append(l1.val)\n l1 = l1.next\n \n while l2 is not None:\n num2.append(l2.val)\n l2 = l2.next\n \n retval = None\n carry = 0\n while len(num1) > 0 or len(num2) > 0:\n if len(num1) > 0 and len(num2) > 0:\n sum_ = num1.pop() + num2.pop()\n elif len(num1) > 0:\n sum_ = num1.pop()\n else:\n sum_ = num2.pop()\n \n retval = ListNode((sum_ + carry) % 10, retval)\n carry = (sum_ + carry) // 10\n \n if carry > 0:\n retval = ListNode(carry, retval)\n\n return retval\n# @lc code=end\n\n","sub_path":"leetcode/python3/[445]_add-two-numbers-ii.py","file_name":"[445]_add-two-numbers-ii.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"466375511","text":"\"\"\"\n Veti primi un string de la tastatura.\n Va trebui sa printati un tuplu care sa contina toate literele acelui string.\n\n exemplu:\n Veti primi: 'cmi'\n Veti printa: ('c', 'm', 'i')\n\"\"\"\nx = input()\nl1 = []\nfor i in x:\n l1.append(i)\nprint(tuple(l1))\n","sub_path":"session2/ex19.py","file_name":"ex19.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"321251798","text":"#!/usr/bin/env python\n\"\"\"Sub-sample sequence from a FASTA, FASTQ or SFF file.\n\nThis tool is a short Python script which requires Biopython 1.62 or later\nfor SFF file support. If you use this tool in scientific work leading to a\npublication, please cite the Biopython application note:\n\nCock et al 2009. Biopython: freely available Python tools for computational\nmolecular biology and bioinformatics. Bioinformatics 25(11) 1422-3.\nhttp://dx.doi.org/10.1093/bioinformatics/btp163 pmid:19304878.\n\nThis script is copyright 2010-2013 by Peter Cock, The James Hutton Institute\n(formerly the Scottish Crop Research Institute, SCRI), UK. All rights reserved.\nSee accompanying text file for licence details (MIT license).\n\nThis is version 0.1.0 of the script, use -v or --version to get the version.\n\"\"\"\nimport os\nimport sys\n\ndef stop_err(msg, err=1):\n sys.stderr.write(msg.rstrip() + \"\\n\")\n sys.exit(err)\n\nif \"-v\" in sys.argv or \"--version\" in sys.argv:\n print(\"v0.1.0\")\n sys.exit(0)\n\n#Parse Command Line\nif len(sys.argv) < 5:\n stop_err(\"Requires at least four arguments: seq_format, in_file, out_file, mode, ...\")\nseq_format, in_file, out_file, mode = sys.argv[1:5]\nif in_file != \"/dev/stdin\" and not os.path.isfile(in_file):\n stop_err(\"Missing input file %r\" % in_file)\n\nif mode == \"everyNth\":\n if len(sys.argv) != 6:\n stop_err(\"If using everyNth, just need argument N (integer, at least 2)\")\n try:\n N = int(sys.argv[5])\n except:\n stop_err(\"Bad N argument %r\" % sys.argv[5])\n if N < 2:\n stop_err(\"Bad N argument %r\" % sys.argv[5])\n if (N % 10) == 1:\n sys.stderr.write(\"Sampling every %ist sequence\\n\" % N)\n elif (N % 10) == 2:\n sys.stderr.write(\"Sampling every %ind sequence\\n\" % N)\n elif (N % 10) == 3:\n sys.stderr.write(\"Sampling every %ird sequence\\n\" % N)\n else:\n sys.stderr.write(\"Sampling every %ith sequence\\n\" % N)\n def sampler(iterator):\n global N\n count = 0\n for record in iterator:\n count += 1\n if count % N == 1:\n yield record\nelif mode == \"percentage\":\n if len(sys.argv) != 6:\n stop_err(\"If using percentage, just need percentage argument (float, range 0 to 100)\")\n try:\n percent = float(sys.argv[5]) / 100.0\n except:\n stop_err(\"Bad percent argument %r\" % sys.argv[5])\n if percent <= 0.0 or 1.0 <= percent:\n stop_err(\"Bad percent argument %r\" % sys.argv[5])\n sys.stderr.write(\"Sampling %0.3f%% of sequences\\n\" % (100.0 * percent))\n def sampler(iterator):\n global percent\n count = 0\n taken = 0\n for record in iterator:\n count += 1\n if percent * count > taken:\n taken += 1\n yield record\nelse:\n stop_err(\"Unsupported mode %r\" % mode)\n\ndef raw_fasta_iterator(handle):\n \"\"\"Yields raw FASTA records as multi-line strings.\"\"\"\n while True:\n line = handle.readline()\n if line == \"\":\n return # Premature end of file, or just empty?\n if line[0] == \">\":\n break\n\n no_id_warned = False\n while True:\n if line[0] != \">\":\n raise ValueError(\n \"Records in Fasta files should start with '>' character\")\n try:\n id = line[1:].split(None, 1)[0]\n except IndexError:\n if not no_id_warned:\n sys.stderr.write(\"WARNING - Malformed FASTA entry with no identifier\\n\")\n no_id_warned = True\n id = None\n lines = [line]\n line = handle.readline()\n while True:\n if not line:\n break\n if line[0] == \">\":\n break\n lines.append(line)\n line = handle.readline()\n yield \"\".join(lines)\n if not line:\n return # StopIteration \n\ndef fasta_filter(in_file, out_file, iterator_filter):\n count = 0\n #Galaxy now requires Python 2.5+ so can use with statements,\n with open(in_file) as in_handle:\n with open(out_file, \"w\") as pos_handle:\n for record in iterator_filter(raw_fasta_iterator(in_handle)):\n count += 1\n pos_handle.write(record)\n return count\n\ntry:\n from galaxy_utils.sequence.fastq import fastqReader, fastqWriter\n def fastq_filter(in_file, out_file, iterator_filter):\n count = 0\n #from galaxy_utils.sequence.fastq import fastqReader, fastqWriter\n reader = fastqReader(open(in_file, \"rU\"))\n writer = fastqWriter(open(out_file, \"w\"))\n for record in iterator_filter(reader):\n count += 1\n writer.write(record)\n writer.close()\n reader.close()\n return count\nexcept ImportError:\n from Bio.SeqIO.QualityIO import FastqGeneralIterator\n def fastq_filter(in_file, out_file, iterator_filter):\n count = 0\n with open(in_file) as in_handle:\n with open(out_file, \"w\") as pos_handle:\n for title, seq, qual in iterator_filter(FastqGeneralIterator(in_handle)):\n count += 1\n pos_handle.write(\"@%s\\n%s\\n+\\n%s\\n\" % (title, seq, qual))\n return count\n\ndef sff_filter(in_file, out_file, iterator_filter):\n count = 0\n try:\n from Bio.SeqIO.SffIO import SffIterator, SffWriter\n except ImportError:\n stop_err(\"SFF filtering requires Biopython 1.54 or later\")\n try:\n from Bio.SeqIO.SffIO import ReadRocheXmlManifest\n except ImportError:\n #Prior to Biopython 1.56 this was a private function\n from Bio.SeqIO.SffIO import _sff_read_roche_index_xml as ReadRocheXmlManifest\n with open(in_file, \"rb\") as in_handle:\n try:\n manifest = ReadRocheXmlManifest(in_handle)\n except ValueError:\n manifest = None\n in_handle.seek(0)\n with open(out_file, \"wb\") as out_handle:\n writer = SffWriter(out_handle, xml=manifest)\n in_handle.seek(0) #start again after getting manifest\n count = writer.write_file(iterator_filter(SffIterator(in_handle)))\n #count = writer.write_file(SffIterator(in_handle))\n return count\n\nif seq_format.lower()==\"sff\":\n count = sff_filter(in_file, out_file, sampler)\nelif seq_format.lower()==\"fasta\":\n count = fasta_filter(in_file, out_file, sampler)\nelif seq_format.lower().startswith(\"fastq\"):\n count = fastq_filter(in_file, out_file, sampler)\nelse:\n stop_err(\"Unsupported file type %r\" % seq_format)\n\nsys.stderr.write(\"Sampled %i records\\n\" % count)\n","sub_path":"tools/sample_seqs/sample_seqs.py","file_name":"sample_seqs.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"514286152","text":"# # for windows laptop\nrootPath = 'jiangnan2020_Simple\\\\'\ntrainImage = rootPath + 'train\\\\train\\\\'\ntestImage = rootPath + 'test\\\\test\\\\'\nmodelPath = 'model\\\\LR.pkl'\ntrainSize = 900\npredictSize = 100\n\n# # for linux server\n# rootPath = 'jiangnan2020/'\n# trainImage = rootPath + 'train/train/'\n# testImage = rootPath + 'test/test/'\n# modelPath = 'model/LR.pkl'\n# trainSize = 15000\n# predictSize = 5708\n\ntrainCSV = rootPath + 'train.csv'\ntestCSV = rootPath + 'test.csv'\nsubmitCSV = rootPath + 'submit.csv'\n\nimageW = 200\nimageH = 200\nimageSize = imageW * imageH\n","sub_path":"History_Edition/1_LR/constant/constPath.py","file_name":"constPath.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"652932689","text":"\"\"\"\nTests scikit-imputer converter.\n\"\"\"\nimport unittest\nimport numpy as np\nfrom sklearn.preprocessing import Imputer\nfrom onnxmltools.convert.sklearn.ImputerConverter import ImputerConverter\nfrom onnxmltools.convert.sklearn.convert import convert\nfrom onnxmltools.convert.common.ConvertContext import ConvertContext\nfrom onnxmltools.convert.common.model_util import make_tensor_value_info\nfrom onnxmltools.proto import onnx_proto\n\n\nclass TestSklearnImputerConverter(unittest.TestCase):\n\n def test_model_imputer(self):\n model = Imputer(missing_values='NaN', strategy='mean', axis=0)\n model.fit([[1, 2], [np.nan, 3], [7, 6]])\n model_onnx = convert(model, 'scikit-learn imputer', [('features', 'int32', 2)])\n self.assertTrue(model_onnx is not None)\n\n def test_imputer_int_inputs(self):\n model = Imputer(missing_values='NaN', strategy='mean', axis=0)\n model.fit([[1, 2], [np.nan, 3], [7, 6]])\n\n context = ConvertContext()\n node = ImputerConverter.convert(context, model,\n [make_tensor_value_info('features', onnx_proto.TensorProto.INT32, [2])])\n self.assertTrue(node is not None)\n\n # should contain two nodes\n self.assertEqual(len(node), 2)\n # last node should contain the Imputer\n outputs = node[-1].outputs\n self.assertEqual(len(outputs), 1)\n self.assertEqual(outputs[0].type.tensor_type.shape.dim[-1].dim_value, 2)\n\n def test_imputer_float_inputs(self):\n model = Imputer(missing_values='NaN', strategy='mean', axis=0)\n model.fit([[1, 2], [np.nan, 3], [7, 6]])\n\n context = ConvertContext()\n node = ImputerConverter.convert(context, model,\n [make_tensor_value_info('features', onnx_proto.TensorProto.FLOAT, [2])])\n self.assertTrue(node is not None)\n\n # should contain two nodes\n self.assertEqual(len(node), 1)\n\n # last node should contain the Imputer\n outputs = node[-1].outputs\n self.assertEqual(len(outputs), 1)\n self.assertEqual(outputs[0].type.tensor_type.shape.dim[-1].dim_value, 2)\n","sub_path":"tests/sklearn/test_ImputerConverter.py","file_name":"test_ImputerConverter.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"390646897","text":"import random\nPALABRAS = [\n\t\"de\",\n\t\"la\",\n\t\"que\",\n\t\"el\",\n\t\"en\",\n\t\"yo\",\n\t\"ayer\",\n\t\"los\",\n\t\"se\",\n\t\"del\",\n\t\"las\",\n\t\"un\",\n\t\"por\",\n\t\"con\",\n\t\"no\",\n\t\"una\",\n\t\"su\",\n\t\"para\",\n\t\"es\",\n\t\"al\",\n\t\"lo\",\n\t\"como\",\n\t\"mas\",\n\t\"octavo\",\n\t\"pero\",\n\t\"sus\",\n\t\"le\",\n\t\"ha\",\n\t\"me\",\n\t\"si\",\n\t\"sin\",\n\t\"sobre\",\n\t\"este\",\n\t\"ya\",\n\t\"entre\",\n\t\"cuando\",\n\t\"todo\",\n\t\"esta\",\n\t\"ser\",\n\t\"son\",\n\t\"dos\",\n\t\"tambien\",\n\t\"fue\",\n\t\"habia\",\n\t\"era\",\n\t\"muy\",\n\t\"anos\",\n\t\"hasta\",\n\t\"desde\",\n\t\"esta\",\n\t\"mi\",\n\t\"porque\",\n\t\"que\",\n\t\"solo\",\n\t\"han\",\n\t\"yo\",\n\t\"hay\",\n\t\"vez\",\n\t\"puede\",\n\t\"todos\",\n\t\"asi\",\n\t\"nos\",\n\t\"ni\",\n\t\"parte\",\n\t\"tiene\",\n\t\"el\",\n\t\"uno\",\n\t\"donde\",\n\t\"bien\",\n\t\"tiempo\",\n\t\"mismo\",\n\t\"ese\",\n\t\"ahora\",\n\t\"cada\",\n\t\"e\",\n\t\"vida\",\n\t\"otro\",\n\t\"despues\",\n\t\"te\",\n\t\"otros\",\n\t\"aunque\",\n\t\"esa\",\n\t\"eso\",\n\t\"hace\",\n\t\"otra\",\n\t\"gobierno\",\n\t\"tan\",\n\t\"durante\",\n\t\"siempre\",\n\t\"dia\",\n\t\"tanto\",\n\t\"ella\",\n\t\"tres\",\n\t\"si\",\n\t\"dijo\",\n\t\"sido\",\n\t\"gran\",\n\t\"pais\",\n\t\"segun\",\n\t\"menos\",\n\t\"mundo\",\n\t\"ano\",\n\t\"antes\",\n\t\"estado\",\n\t\"contra\",\n\t\"sino\",\n\t\"forma\",\n\t\"caso\",\n\t\"nada\",\n\t\"hacer\",\n\t\"general\",\n\t\"estaba\",\n\t\"poco\",\n\t\"estos\",\n\t\"presidente\",\n\t\"mayor\",\n\t\"ante\",\n\t\"unos\",\n\t\"les\",\n\t\"algo\",\n\t\"hacia\",\n\t\"casa\",\n\t\"ellos\",\n\t\"ayer\",\n\t\"hecho\",\n\t\"primera\",\n\t\"mucho\",\n\t\"mientras\",\n\t\"ademas\",\n\t\"quien\",\n\t\"momento\",\n\t\"millones\",\n\t\"esto\",\n\t\"espana\",\n\t\"hombre\",\n\t\"estan\",\n\t\"pues\",\n\t\"hoy\",\n\t\"lugar\",\n\t\"madrid\",\n\t\"nacional\",\n\t\"trabajo\",\n\t\"otras\",\n\t\"mejor\",\n\t\"nuevo\",\n\t\"decir\",\n\t\"algunos\",\n\t\"entonces\",\n\t\"todas\",\n\t\"dias\",\n\t\"debe\",\n\t\"politica\",\n\t\"como\",\n\t\"casi\",\n\t\"toda\",\n\t\"tal\",\n\t\"luego\",\n\t\"pasado\",\n\t\"primer\",\n\t\"medio\",\n\t\"va\",\n\t\"estas\",\n\t\"sea\",\n\t\"tenia\",\n\t\"nunca\",\n\t\"poder\",\n\t\"aqui\",\n\t\"ver\",\n\t\"veces\",\n\t\"embargo\",\n\t\"partido\",\n\t\"personas\",\n\t\"grupo\",\n\t\"cuenta\",\n\t\"pueden\",\n\t\"tienen\",\n\t\"misma\",\n\t\"nueva\",\n\t\"cual\",\n\t\"fueron\",\n\t\"mujer\",\n\t\"frente\",\n\t\"jose\",\n\t\"tras\",\n\t\"cosas\",\n\t\"fin\",\n\t\"ciudad\",\n\t\"he\",\n\t\"social\",\n\t\"manera\",\n\t\"tener\",\n\t\"sistema\",\n\t\"sera\",\n\t\"historia\",\n\t\"muchos\",\n\t\"juan\",\n\t\"tipo\",\n\t\"cuatro\",\n\t\"dentro\",\n\t\"nuestro\",\n\t\"punto\",\n\t\"dice\",\n\t\"ello\",\n\t\"cualquier\",\n\t\"noche\",\n\t\"aun\",\n\t\"agua\",\n\t\"parece\",\n\t\"haber\",\n\t\"situacion\",\n\t\"fuera\",\n\t\"bajo\",\n\t\"grandes\",\n\t\"nuestra\",\n\t\"ejemplo\",\n\t\"acuerdo\",\n\t\"habian\",\n\t\"usted\",\n\t\"estados\",\n\t\"hizo\",\n\t\"nadie\",\n\t\"paises\",\n\t\"horas\",\n\t\"posible\",\n\t\"tarde\",\n\t\"ley\",\n\t\"importante\",\n\t\"guerra\",\n\t\"desarrollo\",\n\t\"proceso\",\n\t\"realidad\",\n\t\"sentido\",\n\t\"lado\",\n\t\"mi\",\n\t\"tu\",\n\t\"cambio\",\n\t\"alli\",\n\t\"mano\",\n\t\"eran\",\n\t\"estar\",\n\t\"san\",\n\t\"numero\",\n\t\"sociedad\",\n\t\"unas\",\n\t\"centro\",\n\t\"padre\",\n\t\"gente\",\n\t\"final\",\n\t\"relacion\",\n\t\"cuerpo\",\n\t\"obra\",\n\t\"incluso\",\n\t\"traves\",\n\t\"ultimo\",\n\t\"madre\",\n\t\"mis\",\n\t\"modo\",\n\t\"problemas\",\n\t\"cinco\",\n\t\"carlos\",\n\t\"hombres\",\n\t\"informacion\",\n\t\"ojos\",\n\t\"muerte\",\n\t\"nombre\",\n\t\"algunas\",\n\t\"publico\",\n\t\"mujeres\",\n\t\"siglo\",\n\t\"todavia\",\n\t\"meses\",\n\t\"manana\",\n\t\"esos\",\n\t\"nosotros\",\n\t\"hora\",\n\t\"muchas\",\n\t\"pueblo\",\n\t\"alguna\",\n\t\"dar\",\n\t\"problema\",\n\t\"don\",\n\t\"da\",\n\t\"tu\",\n\t\"derecho\",\n\t\"verdad\",\n\t\"maria\",\n\t\"unidos\",\n\t\"podria\",\n\t\"seria\",\n\t\"junto\",\n\t\"cabeza\",\n\t\"aquel\",\n\t\"luis\",\n\t\"cuanto\",\n\t\"tierra\",\n\t\"equipo\",\n\t\"segundo\",\n\t\"director\",\n\t\"dicho\",\n\t\"cierto\",\n\t\"casos\",\n\t\"manos\",\n\t\"nivel\",\n\t\"podia\",\n\t\"familia\",\n\t\"largo\",\n\t\"partir\",\n\t\"falta\",\n\t\"llegar\",\n\t\"propio\",\n\t\"ministro\",\n\t\"cosa\",\n\t\"primero\",\n\t\"seguridad\",\n\t\"hemos\",\n\t\"mal\",\n\t\"trata\",\n\t\"algun\",\n\t\"tuvo\",\n\t\"respecto\",\n\t\"semana\",\n\t\"varios\",\n\t\"real\",\n\t\"se\",\n\t\"voz\",\n\t\"paso\",\n\t\"senor\",\n\t\"mil\",\n\t\"quienes\",\n\t\"proyecto\",\n\t\"mercado\",\n\t\"mayoria\",\n\t\"luz\",\n\t\"claro\",\n\t\"iba\",\n\t\"este\",\n\t\"pesetas\",\n\t\"orden\",\n\t\"espanol\",\n\t\"buena\",\n\t\"quiere\",\n\t\"aquella\",\n\t\"programa\",\n\t\"palabras\",\n\t\"internacional\",\n\t\"van\",\n\t\"esas\",\n\t\"segunda\",\n\t\"empresa\",\n\t\"puesto\",\n\t\"ahi\",\n\t\"propia\",\n\t\"matrmonio\",\n\t\"libro\",\n\t\"igual\",\n\t\"politico\",\n\t\"persona\",\n\t\"ultimos\",\n\t\"ellas\",\n\t\"total\",\n\t\"creo\",\n\t\"tengo\",\n\t\"dios\",\n\t\"costilla\",\n\t\"espanola\",\n\t\"condiciones\",\n\t\"mexico\",\n\t\"fuerza\",\n\t\"solo\",\n\t\"unico\",\n\t\"accion\",\n\t\"amor\",\n\t\"policia\",\n\t\"puerta\",\n\t\"pesar\",\n\t\"zona\",\n\t\"sabe\",\n\t\"calle\",\n\t\"interior\",\n\t\"tampoco\",\n\t\"musica\",\n\t\"ningun\",\n\t\"vista\",\n\t\"campo\",\n\t\"buen\",\n\t\"hubiera\",\n\t\"saber\",\n\t\"obras\",\n\t\"razon\",\n\t\"extrano\",\n\t\"ninos\",\n\t\"presencia\",\n\t\"tema\",\n\t\"dinero\",\n\t\"comision\",\n\t\"antonio\",\n\t\"servicio\",\n\t\"hijo\",\n\t\"ultima\",\n\t\"ciento\",\n\t\"estoy\",\n\t\"hablar\",\n\t\"dio\",\n\t\"minutos\",\n\t\"produccion\",\n\t\"camino\",\n\t\"seis\",\n\t\"quien\",\n\t\"fondo\",\n\t\"direccion\",\n\t\"papel\",\n\t\"demas\",\n\t\"barcelona\",\n\t\"idea\",\n\t\"especial\",\n\t\"diferentes\",\n\t\"dado\",\n\t\"base\",\n\t\"capital\",\n\t\"ambos\",\n\t\"europa\",\n\t\"libertad\",\n\t\"relaciones\",\n\t\"espacio\",\n\t\"medios\",\n\t\"ir\",\n\t\"actual\",\n\t\"poblacion\",\n\t\"empresas\",\n\t\"estudio\",\n\t\"salud\",\n\t\"servicios\",\n\t\"haya\",\n\t\"principio\",\n\t\"siendo\",\n\t\"cultura\",\n\t\"anterior\",\n\t\"alto\",\n\t\"media\",\n\t\"mediante\",\n\t\"primeros\",\n\t\"arte\",\n\t\"paz\",\n\t\"sector\",\n\t\"imagen\",\n\t\"medida\",\n\t\"deben\",\n\t\"datos\",\n\t\"consejo\",\n\t\"personal\",\n\t\"interes\",\n\t\"julio\",\n\t\"grupos\",\n\t\"miembros\",\n\t\"ninguna\",\n\t\"existe\",\n\t\"cara\",\n\t\"edad\",\n\t\"enano\",\n\t\"movimiento\",\n\t\"visto\",\n\t\"llego\",\n\t\"puntos\",\n\t\"actividad\",\n\t\"bueno\",\n\t\"uso\",\n\t\"nino\",\n\t\"dificil\",\n\t\"joven\",\n\t\"futuro\",\n\t\"aquellos\",\n\t\"mes\",\n\t\"pronto\",\n\t\"soy\",\n\t\"hacia\",\n\t\"nuevos\",\n\t\"nuestros\",\n\t\"estaban\",\n\t\"posibilidad\",\n\t\"sigue\",\n\t\"cerca\",\n\t\"resultados\",\n\t\"educacion\",\n\t\"atencion\",\n\t\"gonzalez\",\n\t\"capacidad\",\n\t\"efecto\",\n\t\"necesario\",\n\t\"valor\",\n\t\"aire\",\n\t\"investigacion\",\n\t\"siguiente\",\n\t\"figura\",\n\t\"central\",\n\t\"comunidad\",\n\t\"necesidad\",\n\t\"serie\",\n\t\"organizacion\",\n\t\"nuevas\",\n\t\"calidad\",\n]\ndef palabra():\n\tp=random.choice(PALABRAS)\n\treturn p","sub_path":"ejercicios-en-clase-master/ahorcado/palabras.py","file_name":"palabras.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"108242009","text":"# Copyright 2018 VMware, Inc.\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport functools\n\nfrom neutron_lib import exceptions as n_exc\nfrom oslo_log import helpers as log_helpers\nfrom oslo_log import log as logging\nfrom oslo_utils import excutils\n\nfrom vmware_nsx._i18n import _\nfrom vmware_nsx.services.lbaas import base_mgr\nfrom vmware_nsx.services.lbaas import lb_common\nfrom vmware_nsx.services.lbaas import lb_const\nfrom vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils\nfrom vmware_nsxlib.v3 import exceptions as nsxlib_exc\nfrom vmware_nsxlib.v3 import load_balancer as nsxlib_lb\nfrom vmware_nsxlib.v3 import utils\n\nLOG = logging.getLogger(__name__)\n\n\nclass EdgePoolManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager):\n def _get_pool_kwargs(self, name=None, tags=None, algorithm=None,\n description=None):\n kwargs = {\n 'snat_translation': {'type': \"LBSnatAutoMap\"}}\n if name:\n kwargs['name'] = name\n if tags:\n kwargs['tags'] = tags\n if algorithm:\n kwargs['algorithm'] = algorithm\n if description:\n kwargs['description'] = description\n return kwargs\n\n def _get_pool_tags(self, context, pool):\n return lb_utils.get_tags(self.core_plugin, pool['id'],\n lb_const.LB_POOL_TYPE, pool['tenant_id'],\n context.project_name)\n\n def _remove_persistence(self, pool, vs_data):\n sp = pool.get('session_persistence')\n lb_client = self.core_plugin.nsxpolicy.load_balancer\n pp_client = None\n if not sp:\n LOG.debug(\"No session persistence info for pool %s\", pool['id'])\n elif sp['type'] == lb_const.LB_SESSION_PERSISTENCE_HTTP_COOKIE:\n pp_client = lb_client.lb_cookie_persistence_profile\n elif sp['type'] == lb_const.LB_SESSION_PERSISTENCE_APP_COOKIE:\n pp_client = lb_client.lb_cookie_persistence_profile\n else:\n pp_client = lb_client.lb_source_ip_persistence_profile\n\n persistence_profile_id = vs_data.get('persistence_profile_id')\n if persistence_profile_id:\n pp_client.delete(persistence_profile_id)\n\n def _process_vs_update(self, context, pool, pool_id, listener, completor):\n vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server\n try:\n # Process pool persistence profile and\n # create/update/delete profile for virtual server\n vs_data = vs_client.get(listener['id'])\n if pool and pool_id:\n (persistence_profile_id,\n post_process_func) = self._setup_session_persistence(\n pool, self._get_pool_tags(context, pool),\n listener, vs_data)\n else:\n post_process_func = functools.partial(\n self._remove_persistence, pool, vs_data)\n persistence_profile_id = None\n except nsxlib_exc.ManagerError:\n with excutils.save_and_reraise_exception():\n completor(success=False)\n LOG.error(\"Failed to configure session persistence \"\n \"profile for pool %(pool_id)s\",\n {'pool_id': pool['id']})\n try:\n # Update persistence profile and pool on virtual server\n vs_client.update(\n listener['id'],\n pool_id=pool_id,\n lb_persistence_profile_id=persistence_profile_id)\n\n LOG.debug(\"Updated NSX virtual server %(vs_id)s with \"\n \"pool %(pool_id)s and persistence profile %(prof)s\",\n {'vs_id': listener['id'], 'pool_id': pool['id'],\n 'prof': persistence_profile_id})\n if post_process_func:\n post_process_func()\n except nsxlib_exc.ManagerError:\n with excutils.save_and_reraise_exception():\n completor(success=False)\n LOG.error('Failed to attach pool %s to virtual '\n 'server %s', pool['id'], listener['id'])\n\n def _build_persistence_profile_tags(self, pool_tags, listener):\n tags = pool_tags[:]\n # With octavia loadbalancer name might not be among data passed\n # down to the driver\n lb_data = listener.get('loadbalancer')\n if lb_data:\n tags.append({\n 'scope': 'os-lbaas-lb-name',\n 'tag': lb_data['name'][:utils.MAX_TAG_LEN]})\n tags.append({\n 'scope': 'os-lbaas-lb-id',\n 'tag': listener['loadbalancer_id']})\n tags.append({\n 'scope': 'os-lbaas-listener-id',\n 'tag': listener['id']})\n return tags\n\n def _setup_session_persistence(self, pool, pool_tags, listener, vs_data):\n sp = pool.get('session_persistence')\n pers_type = None\n cookie_name = None\n cookie_mode = None\n lb_client = self.core_plugin.nsxpolicy.load_balancer\n pp_client = None\n if not sp:\n LOG.debug(\"No session persistence info for pool %s\", pool['id'])\n elif sp['type'] == lb_const.LB_SESSION_PERSISTENCE_HTTP_COOKIE:\n pp_client = lb_client.lb_cookie_persistence_profile\n pers_type = nsxlib_lb.PersistenceProfileTypes.COOKIE\n cookie_name = sp.get('cookie_name')\n if not cookie_name:\n cookie_name = lb_const.SESSION_PERSISTENCE_DEFAULT_COOKIE_NAME\n cookie_mode = \"INSERT\"\n elif sp['type'] == lb_const.LB_SESSION_PERSISTENCE_APP_COOKIE:\n pp_client = lb_client.lb_cookie_persistence_profile\n pers_type = nsxlib_lb.PersistenceProfileTypes.COOKIE\n # In this case cookie name is mandatory\n cookie_name = sp['cookie_name']\n cookie_mode = \"REWRITE\"\n else:\n pp_client = lb_client.lb_source_ip_persistence_profile\n pers_type = nsxlib_lb.PersistenceProfileTypes.SOURCE_IP\n\n if pers_type:\n # There is a profile to create or update\n pp_kwargs = {\n 'persistence_profile_id': pool['id'],\n 'name': \"persistence_%s\" % utils.get_name_and_uuid(\n pool['name'] or 'pool', pool['id'], maxlen=235),\n 'tags': self._build_persistence_profile_tags(\n pool_tags, listener)\n }\n if cookie_name:\n pp_kwargs['cookie_name'] = cookie_name\n pp_kwargs['cookie_mode'] = cookie_mode\n\n persistence_profile_id = vs_data.get('persistence_profile_id')\n if persistence_profile_id:\n # NOTE: removal of the persistence profile must be executed\n # after the virtual server has been updated\n if pers_type:\n # Update existing profile\n LOG.debug(\"Updating persistence profile %(profile_id)s for \"\n \"listener %(listener_id)s with pool %(pool_id)s\",\n {'profile_id': persistence_profile_id,\n 'listener_id': listener['id'],\n 'pool_id': pool['id']})\n pp_client.update(persistence_profile_id, **pp_kwargs)\n return persistence_profile_id, None\n else:\n # Prepare removal of persistence profile\n return (None, functools.partial(self._remove_persistence,\n vs_data))\n elif pers_type:\n # Create persistence profile\n pp_id = pp_client.create_or_overwrite(**pp_kwargs)\n LOG.debug(\"Created persistence profile %(profile_id)s for \"\n \"listener %(listener_id)s with pool %(pool_id)s\",\n {'profile_id': pp_id,\n 'listener_id': listener['id'],\n 'pool_id': pool['id']})\n return pp_id, None\n return None, None\n\n @log_helpers.log_method_call\n def create(self, context, pool, completor):\n pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool\n\n pool_name = utils.get_name_and_uuid(pool['name'] or 'pool', pool['id'])\n tags = self._get_pool_tags(context, pool)\n\n description = pool.get('description')\n lb_algorithm = lb_const.LB_POOL_ALGORITHM_MAP.get(pool['lb_algorithm'])\n # NOTE(salv-orlando): Guard against accidental compat breakages\n try:\n listener = pool['listener'] or pool['listeners'][0]\n except IndexError:\n # If listeners is an empty list we hit this exception\n listener = None\n # Perform additional validation for session persistence before\n # creating resources in the backend\n lb_common.validate_session_persistence(pool, listener, completor)\n try:\n kwargs = self._get_pool_kwargs(\n pool_name, tags, lb_algorithm, description)\n pool_client.create_or_overwrite(lb_pool_id=pool['id'], **kwargs)\n except nsxlib_exc.ManagerError:\n completor(success=False)\n msg = (_('Failed to create pool on NSX backend: %(pool)s') %\n {'pool': pool['id']})\n raise n_exc.BadRequest(resource='lbaas-pool', msg=msg)\n\n # The pool object can be created with either --listener or\n # --loadbalancer option. If listener is present, the virtual server\n # will be updated with the pool. Otherwise, just return. The binding\n # will be added later when the pool is associated with layer7 rule.\n # FIXME(salv-orlando): This two-step process can leave a zombie pool on\n # NSX if the VS update operation fails\n if listener:\n self._process_vs_update(context, pool, pool['id'], listener,\n completor)\n completor(success=True)\n\n @log_helpers.log_method_call\n def update(self, context, old_pool, new_pool, completor):\n pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool\n\n pool_name = None\n tags = None\n lb_algorithm = None\n description = None\n if new_pool['name'] != old_pool['name']:\n pool_name = utils.get_name_and_uuid(new_pool['name'] or 'pool',\n new_pool['id'])\n tags = self._get_pool_tags(context, new_pool)\n if new_pool['lb_algorithm'] != old_pool['lb_algorithm']:\n lb_algorithm = lb_const.LB_POOL_ALGORITHM_MAP.get(\n new_pool['lb_algorithm'])\n if new_pool.get('description') != old_pool.get('description'):\n description = new_pool['description']\n # NOTE(salv-orlando): Guard against accidental compat breakages\n try:\n listener = new_pool['listener'] or new_pool['listeners'][0]\n except IndexError:\n # If listeners is an empty list we hit this exception\n listener = None\n # Perform additional validation for session persistence before\n # operating on resources in the backend\n lb_common.validate_session_persistence(new_pool, listener, completor,\n old_pool=old_pool)\n\n try:\n kwargs = self._get_pool_kwargs(pool_name, tags, lb_algorithm,\n description)\n pool_client.update(lb_pool_id=new_pool['id'], **kwargs)\n if (listener and new_pool['session_persistence'] !=\n old_pool['session_persistence']):\n self._process_vs_update(context, new_pool, new_pool['id'],\n listener, completor)\n completor(success=True)\n except Exception as e:\n with excutils.save_and_reraise_exception():\n completor(success=False)\n LOG.error('Failed to update pool %(pool)s with '\n 'error %(error)s',\n {'pool': old_pool['id'], 'error': e})\n\n @log_helpers.log_method_call\n def delete(self, context, pool, completor):\n pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool\n\n # NOTE(salv-orlando): Guard against accidental compat breakages\n try:\n listener = pool['listener'] or pool['listeners'][0]\n except IndexError:\n # If listeners is an empty list we hit this exception\n listener = None\n if listener:\n try:\n self._process_vs_update(\n context, pool, None, listener, completor)\n except Exception as e:\n LOG.error('Disassociation of listener %(lsn)s from pool '\n '%(pool)s failed with error %(err)s',\n {'lsn': listener['id'],\n 'pool': pool['id'],\n 'err': e})\n try:\n pool_client.delete(pool['id'])\n except nsxlib_exc.ResourceNotFound:\n pass\n except nsxlib_exc.ManagerError:\n completor(success=False)\n msg = (_('Failed to delete lb pool from nsx: %(pool)s') %\n {'pool': pool['id']})\n raise n_exc.BadRequest(resource='lbaas-pool', msg=msg)\n\n completor(success=True)\n\n @log_helpers.log_method_call\n def delete_cascade(self, context, pool, completor):\n self.delete(context, pool, completor)\n","sub_path":"vmware_nsx/services/lbaas/nsx_p/implementation/pool_mgr.py","file_name":"pool_mgr.py","file_ext":"py","file_size_in_byte":14063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"34851790","text":"#!/usr/bin/env python2.7\nfrom __future__ import print_function, division, absolute_import, unicode_literals\nfrom matplotlib import pyplot as plt\nfrom urdf_optcontrol import optimizer\nimport rospy\nfrom std_msgs.msg import Float64\nimport numpy as np\nfrom casadi import sin, cos, fmax\nfrom std_srvs.srv import Empty\nimport time\nimport pathlib\nfrom jump import homing, jumping, rehoming\n\ndef publish_inputs(u, publishers):\n for ankle, knee, hip in zip(u['ankle'], u['knee'], u['hip']):\n publishers[0].publish(float(ankle))\n publishers[1].publish(float(knee))\n publishers[2].publish(float(hip))\n rospy.Rate(u['rate']).sleep()\n\ndef reset_simulation(publishers, restart_function):\n for i in range(3):\n publishers[0].publish(0)\n publishers[1].publish(0)\n publishers[2].publish(0)\n time.sleep(0.1)\n restart_function()\n time.sleep(0.2)\n\nros = True\nrecalc_homing = False\nrecalc_jump = False\nrecalc_rehoming = False\n\nfilepath = pathlib.Path(__file__).parent\n\nif ros: \n # INITIALIZING NODE AND PUBLISHERS\n rospy.init_node(\"softleg_control\")\n pub_ankle = rospy.Publisher('/oneleg/ankle/theta_command', Float64, queue_size=10)\n pub_knee = rospy.Publisher('/oneleg/knee/theta_command', Float64, queue_size=10)\n pub_hip = rospy.Publisher('/oneleg/hip/theta_command', Float64, queue_size=10)\n publishers = [pub_ankle, pub_knee, pub_hip]\n\n# INITIALIZING GAZEBO SERVICES TO STOP AND UNSTOP\npause_gazebo=rospy.ServiceProxy('/gazebo/pause_physics',Empty)\nunpause_gazebo=rospy.ServiceProxy('/gazebo/unpause_physics',Empty)\nrestart_gazebo=rospy.ServiceProxy('/gazebo/reset_simulation',Empty)\n\n\n# ROBOT SPECIFICATIONS\nrobot = {\n 'urdf_path': pathlib.Path(__file__).absolute().parent.parent.parent.joinpath('urdf','softleg-light.urdf'),\n 'root': 'foot',\n 'end': 'body',\n 'sea_damping': dict(leg_J1=0.05, leg_J2=0.05, leg_J3=0.05)\n}\n\n# POSITION AND VELOCITY OF THE CENTER OF THE MASS OF THE ROBOT \nCoM = {\n 'pos_x': lambda q: -0.1209*sin(q[0])-0.0488*sin(q[0]+q[1])-0.0291*sin(q[0]+q[1]+q[2]),\n 'pos_y': lambda q: 0.1209*cos(q[0])+0.0488*cos(q[0]+q[1])+0.0291*cos(q[0]+q[1]+q[2]),\n 'vel_x': lambda q,qd: -0.1209*qd[0]*cos(q[0])-0.0488*(qd[0]+qd[1])*cos(q[0]+q[1])-0.0291*(qd[0]+qd[1]+qd[2])*cos(q[0]+q[1]+q[2]),\n 'vel_y': lambda q,qd: -0.1209*qd[0]*sin(q[0])-0.0488*(qd[0]+qd[1])*sin(q[0]+q[1])-0.0291*(qd[0]+qd[1]+qd[2])*sin(q[0]+q[1]+q[2]),\n 'acc_x': lambda q,qd,qdd: 0.0488*(qd[0]+qd[1])**2*sin(q[0]+q[1]) -0.0488*(qdd[0]+qdd[1])*cos(q[0]+q[1]) +0.0291*(qd[0]+qd[1]+qd[2])**2*sin(q[0]+q[1]+q[2]) -0.0291*(qdd[0]+qdd[1]+qdd[2])*cos(q[0]+q[1]+q[2]) +0.1209*sin(q[0])*(qd[0])**2 -0.1209*cos(q[0])*qdd[0],\n 'acc_y': lambda q,qd,qdd: -0.0488*(qd[0]+qd[1])**2*cos(q[0]+q[1]) -0.0488*(qdd[0]+qdd[1])*sin(q[0]+q[1]) -0.0291*(qd[0]+qd[1]+qd[2])**2*cos(q[0]+q[1]+q[2]) -0.0291*(qdd[0]+qdd[1]+qdd[2])*sin(q[0]+q[1]+q[2]) -0.1209*sin(q[0])*qdd[0] -0.1209*cos(q[0])*(qd[0])**2\n}\n\ndef p_zero_mom(q,qd,qdd,m=3.2,g=9.81):\n F = m*(g+CoM['acc_y'](q,qd,qdd))\n return CoM['pos_x'](q)-(m*CoM['acc_y'](q,qd,qdd)*CoM['pos_y'](q))/F\nCoM['p_zero_mom'] = p_zero_mom\n\n# INITIAL POSE AND VELOCITY\nsquat_position = [0.35,-2.1745,2.0245]\n\n# HOMING PROCEDURE\nu_home = homing(final_cond=squat_position,\n recalc=recalc_homing,\n center_of_mass=CoM,\n robot=robot\n)\n# JUMP PROCEDURE\nu_jump = jumping( initial_cond=squat_position,\n recalc=recalc_jump,\n center_of_mass=CoM,\n robot=robot\n)\n\nqf = [u_jump['q'][0][-1], u_jump['q'][1][-1], u_jump['q'][2][-1]]\nqdf = [u_jump['qd'][0][-1], u_jump['qd'][1][-1], u_jump['qd'][2][-1]]\n\n# RE-HOMING PROCEDURE\ninitial_cond = [float(x) for x in qf+qdf]\n\nu_rehome = rehoming(initial_cond=initial_cond,\n final_cond=squat_position,\n recalc=recalc_rehoming,\n center_of_mass=CoM,\n robot=robot\n)\n\n\nif ros:\n reset_simulation(publishers, restart_gazebo)\n input('Press Enter to start')\n publish_inputs(u_home, publishers)\n time.sleep(1.0)\n for i in range(100):\n\n publish_inputs(u_jump, publishers)\n # pause_gazebo()\n time.sleep(0.1)\n\n publish_inputs(u_rehome, publishers)\n time.sleep(1.0)\n\n\nplt.show()\n","sub_path":"script/longer_time/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"529813364","text":"import datetime\nimport io\nimport time\nfrom fractions import Fraction\n\nimport gpiozero\nimport picamera\nfrom PIL import Image, ImageStat\n\n\ndef get_average_brightness(image):\n image = image.copy().convert(\"L\")\n return ImageStat.Stat(image).mean[0]\n\n\ndef capture(shutter_speed, iso=800, exposure_mode=\"off\"):\n stream = io.BytesIO()\n camera = picamera.PiCamera(resolution=(1024, 768), framerate=Fraction(1, 6), sensor_mode=3)\n camera.shutter_speed = shutter_speed\n camera.iso = iso\n camera.exposure_mode = exposure_mode\n\n camera.start_preview()\n time.sleep(1)\n camera.capture(stream, format=\"png\")\n stream.seek(0)\n camera.close()\n\n return stream\n\n\nfilename = datetime.datetime.now().strftime(r\"still_%Y%m%d_%H%M%S.png\")\n\nshutter_speed = 500_000\nmax_shutter_speed = 6_000_000\nmin_shutter_speed = 1_000\n\nwanted_brightness = 128\n\nled17 = gpiozero.LED(17)\nled17.on()\ntime.sleep(1)\n\nbest_image = None\nbest_brightness = None\nbest_brightness_delta = 256\n\nprint(datetime.datetime.now().isoformat(timespec=\"seconds\"), \"START\")\n\nfor x in range(5):\n stream = capture(shutter_speed)\n image = Image.open(stream)\n brightness = get_average_brightness(image)\n brightness_delta = abs(wanted_brightness-brightness)\n factor = wanted_brightness/brightness\n new_shutter_speed = int(shutter_speed*factor)\n\n print(\n datetime.datetime.now().isoformat(timespec=\"seconds\"),\n f\"seq={x}\",\n f\"ss=[{shutter_speed:,} => {new_shutter_speed:,}]\",\n f\"brightness=[{brightness:,.2f}*{factor:,.2f} ~= {wanted_brightness}]\"\n )\n\n if brightness_delta < best_brightness_delta:\n best_image = image\n best_brightness = brightness\n best_brightness_delta = brightness_delta\n\n if shutter_speed == max_shutter_speed:\n print(datetime.datetime.now().isoformat(timespec=\"seconds\"), \"BREAK max shutter speed\")\n break\n elif shutter_speed == min_shutter_speed:\n print(datetime.datetime.now().isoformat(timespec=\"seconds\"), \"BREAK min shutter speed\")\n break\n else:\n shutter_speed = max(min(new_shutter_speed, max_shutter_speed), min_shutter_speed)\n\nprint(datetime.datetime.now().isoformat(timespec=\"seconds\"), f\"Best brightness: {best_brightness:,.2f}\", f\"Saving {filename}\")\nbest_image.save(filename)\n\nled17.off()\n\nprint(datetime.datetime.now().isoformat(timespec=\"seconds\"), \"STOP\")\n","sub_path":"capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"510881877","text":"#!/usr/bin/env python\n\nfrom pip.req import parse_requirements\nfrom setuptools import find_packages, setup\n\ninstall_reqs = parse_requirements('requirements.txt', session=False)\nreqs = [str(ir.req) for ir in install_reqs]\n\nsetup(\n name='asyncexec',\n version='0.3.0',\n packages=find_packages(),\n include_package_data=True,\n install_requires=reqs,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"558134953","text":"#!/usr/bin/env python\n\n\"\"\"\nPython implementation of common model fitting operations to\nanalyse protein folding data. Simply automates some fitting\nand value calculation. Will be extended to include phi-value\nanalysis and other common calculations.\n\nAllows for quick model evaluation and plotting.\n\nAlso tried to make this somewhat abstract and modular to\nenable more interesting calculations, such as Ising models\nand such.\n\nRequirements (recommended python 2.7+):\n\t- numpy\n\t- scipy\n\t- matplotlib\n\nLowe, A.R. 2015\n\n\"\"\"\n\nimport sys\nimport inspect\nimport numpy as np\nimport scipy as sp\n\nimport core\nimport constants\n\n__author__ = \"Alan R. Lowe\"\n__email__ = \"a.lowe@ucl.ac.uk\"\n\n\ndef list_models():\n\t\"\"\" List the kinetic of equilibrium models defined in this module.\n\n\tReturns a list of the names of the models, whose parent class is\n\tFitModel.\n\t\"\"\"\n\tclsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n\tverif = lambda cls: 'Verified: {0}'.format(cls[1]().verified)\n\tfit_models = [ (cls[0], verif(cls)) for cls in clsmembers if cls[1].__bases__[0] == core.FitModel ]\n\treturn fit_models\n\n\n\n\n\n\n\n\nclass TemplateModel(core.FitModel):\n\t\"\"\" A template model for expansion\n\t\"\"\"\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([])\n\n\n\tdef fit_func(self, x):\n\t\traise NotImplementedError\n\n\t@property\n\tdef equation(self):\n\t\treturn r'F=f(x)'\n\n\n\n\n\n\n\"\"\"\n==========================================================\nEQUILIBRIUM FOLDING models\n==========================================================\n\"\"\"\n\nclass TwoStateEquilibrium(core.FitModel):\n\t\"\"\" Two state equilbrium denaturation curve.\n\n\tF = \\frac{\\exp( m(x-d_{50})) / RT} { 1+\\exp(m(x-d_{50}))/RT}\n\n\tNotes:\n\t\tClarke and Fersht. Engineered disulfide bonds as probes of\n\t\tthe folding pathway of barnase: Increasing the stability\n\t\tof proteins against the rate of denaturation.\n\t\tBiochemistry (1993) vol. 32 (16) pp. 4322-4329\n\t\"\"\"\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([1.5, 5.])\n\t\tself.verified = True\n\n\n\tdef fit_func(self, x, m, d50):\n\t\tF = ( np.exp((m*(x-d50))/core.temperature.RT)) / (1.+np.exp((m*(x-d50))/core.temperature.RT))\n\t\treturn F\n\n\t@property\n\tdef equation(self):\n\t\treturn r'F = \\frac{\\exp( m(x-d_{50})) / RT} { 1+\\exp(m(x-d_{50}))/RT}'\n\n\n\n\nclass TwoStateEquilibriumSloping(core.FitModel):\n\t\"\"\" Two state equilbrium denaturation curve.\n\n\tF = (\\alpha_f+\\beta_f x) + (\\alpha_u+\\beta_u x) \\cdot \\frac{\\exp( m(x-d_{50})) / RT} { 1+\\exp(m(x-d_{50}))/RT}\n\n\tNotes:\n\t\tClarke and Fersht. Engineered disulfide bonds as probes of\n\t\tthe folding pathway of barnase: Increasing the stability\n\t\tof proteins against the rate of denaturation.\n\t\tBiochemistry (1993) vol. 32 (16) pp. 4322-4329\n\t\"\"\"\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([1., 0.1, 0.0, 0.1, 1.5, 5.])\n\t\tself.verified = True\n\n\n\tdef fit_func(self, x, alpha_f, beta_f, alpha_u, beta_u, m, d50):\n\t\tF = (alpha_f+beta_f*x) + (alpha_u+beta_u*x) * (\\\n\t\t( np.exp((m*(x-d50))/core.temperature.RT)) / (1.+np.exp((m*(x-d50))/core.temperature.RT)))\n\t\treturn F\n\n\t@property\n\tdef equation(self):\n\t\treturn r'F = (\\alpha_f+\\beta_f x) + (\\alpha_u+\\beta_u x) \\cdot \\frac{\\exp( m(x-d_{50})) / RT} { 1+\\exp(m(x-d_{50}))/RT}'\n\n\n\n\nclass TwoStateDimerEquilibrium(core.FitModel):\n\t\"\"\" Two State model for a dimer denaturation Equilibrium.\n\ti.e. N2 = 2D\n\n\tY_0 = Y_N * (1 - F_D) + Y_D * F_D\n\tY_N = alpha_N + beta_N * x\n\tY_D = alpha_D + beta_D * x\n\tF_D = (((K_U)^2 + (8 * K_U * P_t - K_U)^0.5)/(4 * P_t)\n\tK_U = exp((RT * ln(P_t)-m(d_{50}-x)))/RT)\n\n\tNotes:\n\t\tMallam and Jackson. Folding studies on a knotted protein.\n\t\tJournal of Molecular Biology (2005) vol. 346 (5) pp. 1409-1421\n\n\tComments:\n\t\tPt is a variable that needs to be set, So it like Ising model when you need\n\t\tto define a specific value to a curve.\n\t\tCould this be the self-constants code line ?\n\n\t\"\"\"\n\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([1., 0.1, 0.0, 0.1, 1.5, 5.])\n\t\tself.constants = (('Pt',7),)\n\n\tdef fit_func(self, x, alpha_N, beta_N, alpha_D, beta_D, m, d50):\n\t\tK_U = np.exp(((core.temperature.RT * np.ln(Pt))-(m*d50)-x) / core.temperature.RT)\n\t\tF_D = ((K_U^2 + (8 * K_U * Pt))^0.5 - K_U) / (4*Pt)\n\t\tY_0 = ((alpha_N + beta_N*x)*(1-F_D)) + ((alpha_D + beta_D*x)*(F_D))\n\t\treturn Y_0\n\n\t\"\"\"\n\t@property\n\tdef equation(self):\n\t\treturn r'Y_0 = (\\alpha_N+\\beta_N x) \\cdot (1-F_D) + Y_D \\cdot F_D \\\\\n\t\t\t\t\\text{where}\\\\\n\t\t\t\tF_D = \\frac{(K_U^2 + (8 * K_U * Pt))^0.5 - K_U / 4*Pt} (\\alpha_u+\\beta_u x) \\\\\n\t\t\t\tK_U = \\frac{\\exp(((constants.RT * np.ln(Pt))-(m*d50)-x) / constants.RT}'\n\t\"\"\"\n\n# NOTE(ergm) added this on 30/8/2017\nclass ThreeStateEquilibrium (core.FitModel):\n\t\"\"\" Three state equilbrium denaturation curve.\n\n\ti.e. N = I = D\n\n\tY_obs = Y_N*F_N + Y_I*F_I + Y_D*F_D\n\n\twhere:\n\tF_N = 1/(1 + Kni + Kni*Kid)\n\tF_I = Kni / (1 + Kni + Kni*Kid)\n\tF_D = Kni*Kid/(1 + Kni + Kni*Kid)\n\tKni = exp((-DGni - m_ni * x)/RT)\n\tKid = exp((-DGid - m_id * x)/RT)\n\n\texpanded:\n\n\tY_obs = Y_N + Y_I*exp((DGni_H20 + m_ni*x)/RT) + Y_D*exp((DGni_H20 + m_ni*x)/RT) * exp((DGid_H20 + m_id*x)/RT) /\n\t\t\t1 + exp((DGni_H20 + m_ni*x)/RT) + exp((DGni_H20 + m_ni*x)/RT) * exp((DGid_H20 + m_id*x)/RT)\n\n\tNotes:\n\t\tHecky J, Muller KM Structural Perturbation and Compensation by Directed\n\t\tEvolution at Physiological Temperature Leads to Thermostabilization of\n\t\tbeta-Lactamase. (2005) Biochemistry 44. pp. 12640 –12654\n\n\tComments:\n\t\tY_obs = The spectroscopic signal maximum as a function of denaturant concentration\n\t\tYN and YD are the spectroscopic signals for native and denatured states\n\t\tK = Equilibrium Constants\n\t\tF_D = fraction of denatured\n\n\t\"\"\"\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([1., 0.5, 0.0, 5., 1.5, 5., 1])\n\n\n\n\tdef fit_func(self, x, Y_N, Y_I, Y_D, DGni, m_ni, DGid, m_id):\n\t\tF = (Y_N + Y_I*np.exp((-DGni + m_ni*x)/core.temperature.RT) + Y_D*np.exp((-DGni + m_ni*x)/core.temperature.RT) * np.exp((-DGid + m_id*x)/core.temperature.RT)) \\\n\t\t/ (1 + np.exp((-DGni + m_ni*x)/core.temperature.RT) + np.exp((-DGni + m_ni*x)/core.temperature.RT) * np.exp((-DGid + m_id*x)/core.temperature.RT))\n\t\treturn F\n\n\t@property\n\tdef equation(self):\n\t\treturn r'\\Upsilon_{obs} = \\Upsilon_N F_N + \\Upsilon_I F_I + \\Upsilon_D F_D \\\\ \\\n\t\t\t\t\\text{expanded} \\\\ \\\n\t\t\t\t\\Upsilon_{obs} = \\frac{ \\Upsilon_N + \\Upsilon_I \\exp \\frac {\\Delta G_{NI}^{H_2O} + m_{NI} x} {RT} + \\\n\t\t\t\t\\Upsilon_D \\exp \\frac{\\Delta G_{NI}^{H_2O} + m_{NI} x} {RT} \\cdot \\exp \\frac{\\Delta G_{ID}^{H_2O} + m_{ID} x} {RT}} {1 + \\exp \\\n\t\t\t\t\\frac{\\Delta G_{NI}^{H_2O} + m_{NI} x} {RT} + \\exp \\frac{\\Delta G_{NI}^{H_2O} + m_{NI} x} {RT} \\cdot \\\n \t\t\t \\exp \\frac{\\Delta G_{ID}^{H_2O} + m_{ID} x} {RT}}'\n\n\n\nclass ThreeStateMonoIEquilibrium(core.FitModel):\n\t\"\"\" Three State model for a dimer denaturation Equilibrium.\n\ti.e. N2 = I2 = 2D\n\n\tY_rel = (Y_N * ((2*Pt*F_D^2)/(K1*K2))) + (Y_I * ((2*Pt*F_D^2)/K2)) + (Y_D * F_D)\n\tF_D = -((K1*K2) + ((K1*K2)^2 + (8*(1+K1)*(K1*K2)*Pt))^0.5) / (4*Pt*(1+K1))\n\tK1 = exp((DG1 + m1*x)/RT)\n\tK2 = exp((DG2 + m2*x)/RT)\n\n\tNotes:\n\t\tMallam and Jackson. Folding studies on a knotted protein.\n\t\tJournal of Molecular Biology (2005) vol. 346 (5) pp. 1409-1421\n\n\tComments:\n\t\tPt is a variable that needs to be set, So it like Ising model when you need\n\t\tto define a specific value to a curve.\n\t\talso needs to fit to multiple datasets\n\n\t\"\"\"\n\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([1., 0.1, 0.0, 0.1, 1.5, 5., 2.])\n\t\tself.constants = (('Pt',1e-6),)\n\n\tdef fit_func(self, x, DG1, m1, DG2, m2, Y_N, Y_I, Y_D, Pt):\n\t\tK1 = np.exp((-DG1 + (m1*x)) / core.temperature.RT)\n\t\tK2 = np.exp((-DG2 + (m2*x)) / core.temperature.RT)\n\t\tF_D = -((K1*K2) + ((K1*K2)^2 + (8*(1+K1)*(K1*K2)*Pt))^0.5) / (4*Pt*(1+K1))\n\t\tY_rel = (Y_N * ((2*Pt*F_D^2)/(K1*K2))) + (Y_I * ((2*Pt*F_D^2)/K2)) + (Y_D * F_D)\n\t\treturn Y_rel\n\n\n\t\"\"\"\n\t@property\n\tdef equation(self):\n\t\treturn r'Y_rel = (Y_N \\cdot \\frac{(2PtF_D^2/K1K2)} + (Y_I \\cdot \\frac{(2PtF_D^2)/K2)} + (Y_D * F_D) \\\\\n\t\t\t\\text{where}\\\\ F_D = \\frac {- K1K2 + ((K1K2)^2 + (8(1+K1)(K1K2)Pt))^0.5) / 4Pt(1+K1)} \\\\\n\t\t\tK1 = \\frac {\\exp((-DG1 + (m1 x)) / RT)} \\\\ K2 = \\frac {\\exp((-DG2 + (m2 x)) / RT)} '\n\t\"\"\"\n\nclass ThreeStateDimericIEquilibrium(core.FitModel):\n\t\"\"\" Three State model for a dimer denaturation Equilibrium.\n\ti.e. N2 = 2I = 2D\n\n\tY_rel = (Y_N * ((2*Pt*F_D^2)/(K1*K2))) + (Y_I * ((2*Pt*F_D^2)/K2)) + (Y_D * F_D)\n\tF_D = -((K1*K2) + ((K1*K2)^2 + (8*(1+K1)*(K1*K2)*Pt))^0.5) / (4*Pt*(1+K1))\n\tK1 = exp((DG1 + m1*x)/RT)\n\tK2 = exp((DG2 + m2*x)/RT)\n\n\tNotes:\n\t\tMallam and Jackson. Folding studies on a knotted protein.\n\t\tJournal of Molecular Biology (2005) vol. 346 (5) pp. 1409-1421\n\n\tComments:\n\t\tPt is a variable that needs to be set, So it like Ising model when you need\n\t\tto define a specific value to a curve.\n\t\talso needs to fit to multiple datasets.\n\n\t\"\"\"\n\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([1., 0.1, 0.0, 0.1, 1.5, 5., 3.])\n\t\tself.constants = (('Pt',1e-6),)\n\n\tdef fit_func(self, x, DG1, m1, DG2, m2, Y_N, Y_I, Y_D, Pt):\n\t\tK1 = np.exp((-DG1 + (m1*x)) / core.temperature.RT)\n\t\tK2 = np.exp((-DG2 + (m2*x)) / core.temperature.RT)\n\t\tF_I = -(K1*(1+K2) + ((K1^2*(1+K2)^2 +(8*Pt*K1))^0.5)) / (4*Pt)\n\t\tY_rel = (Y_N * ((2*Pt*F_I^2)/K1)) + (Y_I * F_I) + (Y_D * (K2*F_I))\n\t\treturn Y_rel\n\n\n\t\"\"\"\n\t@property\n\tdef equation(self):\n\t\treturn r'Y_rel = (Y_N \\cdot \\frac{(2PtF_I^2/K1)} + (Y_I F_I} + (Y_D * (K2F_I)) \\\\\n\t\t\t\\text{where} \\\\\n\t\t\tF_I = \\frac {- K1(1+K2) + (K1^2 \\cdot(1+K2)^2 + (8 Pt K1))^0.5) / 4Pt} \\\\\n\t\t\tK1 = \\frac {\\exp((-DG1 + (m1 x)) / RT)} \\\\\n\t\t\tK2 = \\frac {\\exp((-DG2 + (m2 x)) / RT)}'\n\t\"\"\"\n\n\n\nclass HomozipperIsingEquilibrium(core.FitModel):\n\t\"\"\" Homopolymer Zipper Ising model\n\n\tNotes:\n\t\tAksel and Barrick. Analysis of repeat-protein folding using\n\t\tnearest-neighbor statistical mechanical models.\n\t\tMethods in enzymology (2009) vol. 455 pp. 95-125\n\t\"\"\"\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([7, 0.1, -.53, -4.6])\n\t\tself.constants = (('n',7),)\n\t\tself.verified = True\n\n\tdef fit_func(self, x, n, DG_intrinsic, m_intrinsic, DG_interface):\n\t\t# , m_interface , -0.6\n\n\t\t# clamp to prevent instability\n\t\tif DG_intrinsic<0. or DG_interface>0.:\n\t\t\treturn core.FIT_ERROR(x)\n\n\t\tk = np.exp(-(DG_intrinsic - m_intrinsic*x) / core.temperature.RT )\n\t\t#t = np.exp(-(DG_interface - m_interface*x) / core.temperature.RT )\n\t\tt = np.exp(-(DG_interface) / core.temperature.RT )\n\t\tpre_factor = (k/(n*(k*t-1)))\n\t\tnumerator = n*(k*t)**(n+2) - (n+2)*(k*t)**(n+1) + (n+2)*k*t-n\n\t\tdenominator = (k*t-1)**2 + k*((k*t)**(n+1) - (n+1)*k*t+n )\n\t\ttheta = pre_factor * (numerator / denominator)\n\t\treturn 1.-theta\n\n\n\"\"\"\n==========================================================\nKINETIC FOLDING models\n==========================================================\n\"\"\"\n\nclass TwoStateChevron(core.FitModel):\n\t\"\"\" Two state chevron plot.\n\n\tk_{obs} = k_u^{H_2O}\\exp(m_{ku}x) + k_f^{H_2O}\\exp(m_{kf}x)\n\n\tNotes:\n\t\tJackson SE and Fersht AR. Folding of chymotrypsin inhibitor 2.\n\t\t1. Evidence for a two-state transition.\n\t\tBiochemistry (1991) 30(43):10428-10435.\n\t\"\"\"\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([50., 1.3480, 5e-4, 1.])\n\t\t#self.constants = (('mf',1.76408),('mu',1.13725))\n\t\tself.verified = True\n\n\n\tdef fit_func(self, x, kf, mf, ku, mu):\n\t\tk_obs = kf*np.exp(-mf*x) + ku*np.exp(mu*x)\n\t\treturn k_obs\n\n\tdef error_func(self, y):\n\t\treturn np.log(y)\n\n\t@property\n\tdef equation(self):\n\t\treturn r'k_{obs} = k_f^{H_2O}\\exp(-m_{kf}x) + k_u^{H_2O}\\exp(m_{ku}x)'\n\n\nclass ThreeStateChevron(core.FitModel):\n\t\"\"\" Three state chevron with single intermediate.\n\n\tk_{obs} = k_{fi}^{H_2O} * exp(-m_{if}*x) +\n\t\t\t\tk_{if}^{H_2O} * exp((m_i - m_{if})*x) /\n\t\t\t\t(1 + 1 / K_{iu})\n\n\twhere:\n\n\tK_{iu} = K_{iu}^{H_2O} * exp((m_u-m_i)*x)\n\n\tNotes:\n\t\tParker et al. An integrated kinetic analysis of\n\t\tintermediates and transition states in protein folding\n\t\treactions.\n\t\tJournal of molecular biology (1995) vol. 253 (5) pp. 771-86\n\t\"\"\"\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([4.5e-4, -9.5e-1, 1.3e9, -6.9, 1.4e-8, -1.6])\n\t\t#self.constants = (('mif',-0.97996),('mi',-6.00355),('mu',-1.66154))\n\t\tself.verified = True\n\n\tdef fit_func(self, x, kfi, mif, kif, mi, Kiu, mu):\n\t\tk_fi = kfi*np.exp(-mif*x)\n\t\tk_if = kif*np.exp((mi - mif)*x)\n\t\tK_iu = Kiu*np.exp((mu - mi)*x)\n\t\tk_obs = k_fi + k_if / (1.+1./K_iu)\n\t\treturn k_obs\n\n\tdef error_func(self, y):\n\t\treturn np.log(y)\n\n\tdef components(self, x, kfi, mif, kif, mi, Kiu, mu):\n\t\tk_fi = kfi*np.exp(-mif*x)\n\t\tk_if = kif*np.exp((mi - mif)*x)\n\t\tk_obs_I = k_fi + k_if\n\t\treturn {'kobs_I':k_obs_I}\n\n\t@property\n\tdef equation(self):\n\t\treturn r'k_{obs} = k_{fi}^{H_2O}\\exp(-m_{if}x) + k_{if}^{H_2O}\\exp((m_i - m_{if})x) /(1 + 1 / (K_{iu}^{H_2O}\\exp((m_u-m_i)x)))'\n\n\n\nclass ThreeStateFastPhaseChevron(core.FitModel):\n\t\"\"\" Three state chevron with single intermediate.\n\n\tk_{obs} = k_{fi}^{H_2O} * exp(-m_{if}*x) +\n\t\t\t\tk_{if}^{H_2O} * exp((m_i - m_{if})*x) /\n\t\t\t\t(1 + 1 / K_{iu})\n\n\twhere:\n\n\tK_{iu} = K_{iu}^{H_2O} * exp((m_u-m_i)*x)\n\n\tNotes:\n\t\tParker et al. An integrated kinetic analysis of\n\t\tintermediates and transition states in protein folding\n\t\treactions.\n\t\tJournal of molecular biology (1995) vol. 253 (5) pp. 771-86\n\t\"\"\"\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([172., 1.42, .445, .641, 9.41e3, 2.71313, 1.83e-4, 1.06])\n\t\t#self.constants = (('kui',172.), ('mui',1.42), ('kiu',.445), ('miu',.641), ('mif',-2.71313),('mfi',1.06534))\n\t\tself.verified = True\n\n\tdef fit_func(self, x, kui, mui, kiu, miu, kif, mif, kfi, mfi):\n\t\tk_iu = kiu*np.exp(miu*x)\n\t\tk_ui = kui*np.exp(-mui*x)\n\t\tk_if = kif*np.exp(-mif*x)\n\t\tk_fi = kfi*np.exp(mfi*x)\n\t\tK_iu = k_iu / (k_iu+k_ui)\n\t\tk_obs = k_fi + k_if / (1.+1./K_iu)\n\t\treturn k_obs\n\n\tdef error_func(self, y):\n\t\treturn np.log(y)\n\n\tdef components(self, x, kui, mui, kiu, miu, kif, mif, kfi, mfi):\n\t\tk_iu = kiu*np.exp(miu*x)\n\t\tk_ui = kui*np.exp(-mui*x)\n\t\tk_if = kif*np.exp(-mif*x)\n\t\tk_fi = kfi*np.exp(mfi*x)\n\t\tk_obs_I = k_iu + k_ui\n\t\tk_obs_N = k_fi + k_if\n\t\treturn {'kobs_I':k_obs_I} #, 'kobs_N':k_obs_N}\n\n\t# NOTE (ergm) EDIT 23/8/2017\n\t@property\n\tdef equation(self):\n\t\treturn r'k_{obs} = k_{fi}^{H_2O}\\exp(-m_{if}x) + k_{if}^{H_2O}\\exp((m_i - m_{if})x) / (1 + 1 /K_{iu}^{H_2O}\\exp((m_u-m_i)x)) \\\\ \\\n where \\\\ \\\n k_{ui} = k_{ui}^{H2O} exp(-m_{ui}x) \\\\ \\\n k_{iu} = k_{iu}^{H2O} exp(m_{iu}x) \\\\ \\\n K_{iu} = k_{iu}/(k_{iu} + k_{ui})'\n\n\nclass ThreeStateSequentialChevron(core.FitModel):\n\t\"\"\" Three state metastable intermediate chevron plot.\n\n\tA_1 = -(k_{ui}+k_{iu}+k_{if}+k_{fi})\n\tA_2 = k_{ui}*(k_{if}+k_{fi}) + k_{iu}*k_{uf}\n\n\tk_{obs} = 0.5 * (-A_2 - sqrt(A_2^2 - 4*A_1))\n\n\tNotes:\n\t\tBachmann and Kiefhaber. Apparent two-state tendamistat\n\t\tfolding is a sequential process along a defined route.\n\t\tJ Mol Biol (2001) vol. 306 (2) pp. 375-386\n\t\"\"\"\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([2e4, 0.3480, 20.163, 1.327, 0.3033, 0.2431])\n\t\t# self.constants = (('mui',4.34965),('mif',0.68348),('mfi',0.97966))\n\t\tself.verified = True\n\n\tdef fit_func(self, x, kui, mui, kif, mif, kfi, mfi):\n\t\tkiu = 1.e4\n\t\tmiu = 0.\n\t\tk_ui = kui*np.exp(-mui*x)\n\t\tk_iu = kiu*np.exp(miu*x)\n\t\tk_if = kif*np.exp(-mif*x)\n\t\tk_fi = kfi*np.exp(mfi*x)\n\t\tlam_1 = -(k_ui + k_iu + k_if + k_fi)\n\t\tlam_2 = k_ui * (k_if+k_fi) + k_iu*k_fi\n\t\tk_obs = 0.5 * (-lam_1 - np.sqrt(lam_1**2 - 4*lam_2))\n\t\treturn k_obs\n\n\tdef error_func(self, y):\n\t\treturn np.log(y)\n\n\tdef components(self, x, kui, mui, kif, mif, kfi, mfi):\n\t\tkiu = 1.e4\n\t\tmiu = 0.\n\t\tk_ui = kui*np.exp(-mui*x)\n\t\tk_iu = kiu*np.exp(miu*x)\n\t\tk_if = kif*np.exp(-mif*x)\n\t\tk_fi = kfi*np.exp(mfi*x)\n\t\tk_TS1 = k_ui + (k_fi/kif)*k_iu\n\t\tk_TS2 = (k_ui/k_iu)*k_if + k_fi\n\t\treturn {'kTS1':k_TS1, 'kTS2':k_TS2}\n\n\t@property\n\tdef equation(self):\n\t\treturn r'k_{obs} = 0.5(-A_2 \\pm \\sqrt{A_2^2 - 4A_1}) \\\\ \\text{where}\\\\ A_1 = -(k_{ui} + k_{iu} + k_{if} + k_{fi}) \\\\A_2 = k_{ui}(k_{if} + k_{fi}) + k_{iu}k_{if} \\\\ \\text{and} \\\\k_{ui} = k_{ui}^{H_2O}\\exp(-m_{ui}x) \\\\k_{iu} = k_{iu}^{H_2O}\\exp(-m_{iu}x) \\\\ etc...'\n\n\n\nclass ParallelTwoStateChevron(core.FitModel):\n\t\"\"\" Two state chevron plot.\n\n\tk_{obs} = k_u^{H_2O}\\exp(m_ku*x) + k_u^{H_2O}\\exp(m_kf*x)\n\n\tNotes:\n\t\t[Reference]\n\t\"\"\"\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([50., 1.3480, 5e-4, 1., 150., 3.5])\n\n\n\tdef fit_func(self, x, kf_A, mf_A, ku_A, mu_A, kf_B, mf_B):\n\n\t\tif mf_A < 0. or mf_B < 0. or mu_A < 0.:\n\t\t\treturn core.FIT_ERROR(x)\n\n\t\tif kf_A <0. or ku_A <0. or kf_B < 0.:\n\t\t\treturn core.FIT_ERROR(x)\n\n\t\tdeltaG_A = kf_A / ku_A\n\t\tku_B = kf_B / deltaG_A\n\t\tmu_B = np.abs(mf_A + mu_A) - np.abs(mf_B)\n\t\tk_obs_A = kf_A*np.exp(-mf_A*x) + ku_A*np.exp(mu_A*x)\n\t\tk_obs_B = kf_B*np.exp(-mf_B*x) + ku_B*np.exp(mu_B*x)\n\t\tk_obs = k_obs_A + k_obs_B\n\t\treturn k_obs\n\n\tdef error_func(self, y):\n\t\treturn np.log(y)\n\n\tdef components(self, x, kf_A, mf_A, ku_A, mu_A, kf_B, mf_B):\n\t\tdeltaG_A = kf_A / ku_A\n\t\tku_B = kf_B / deltaG_A\n\t\tmu_B = np.abs(mf_A + mu_A) - np.abs(mf_B)\n\t\tk_obs_A = kf_A*np.exp(-mf_A*x) + ku_A*np.exp(mu_A*x)\n\t\tk_obs_B = kf_B*np.exp(-mf_B*x) + ku_B*np.exp(mu_B*x)\n\t\tk_obs = k_obs_A + k_obs_B\n\t\treturn {'kobs_A':k_obs_A, 'kobs_B':k_obs_B}\n\n\t@property\n\tdef equation(self):\n\t\treturn r'k_{obs} = k_u^{H_2O}\\exp(m_{ku}x) + k_u^{H_2O}\\exp(m_{kf}x)'\n\n\n\n\nclass ParallelTwoStateUnfoldingChevron(core.FitModel):\n\t\"\"\" Two state chevron plot.\n\n\tk_{obs} = k_u^{H_2O}\\exp(m_ku*x) + k_u^{H_2O}\\exp(m_kf*x)\n\n\tNotes:\n\t\t[Reference]\n\t\"\"\"\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([5e-4, 1., 1e-5, 1.5])\n\n\n\tdef fit_func(self, x, ku_A, mu_A, ku_B, mu_B):\n\n\t\tif mu_A < 0. or mu_B < 0.:\n\t\t\treturn core.FIT_ERROR(x)\n\n\t\tk_obs_A = ku_A*np.exp(mu_A*x)\n\t\tk_obs_B = ku_B*np.exp(mu_B*x)\n\t\tk_obs = k_obs_A + k_obs_B\n\t\treturn k_obs\n\n\tdef error_func(self, y):\n\t\treturn np.log(y)\n\n\tdef components(self, x, ku_A, mu_A, ku_B, mu_B):\n\t\tk_obs_A = ku_A*np.exp(mu_A*x)\n\t\tk_obs_B = ku_B*np.exp(mu_B*x)\n\t\tk_obs = k_obs_A + k_obs_B\n\t\treturn {'kobs_A':k_obs_A, 'kobs_B':k_obs_B}\n\n\n\n\nclass TwoStateChevronMovingTransition(core.FitModel):\n\t\"\"\" Two state chevron with moving transition state.\n\tSecond order polynomial.\n\n\tk_u = k_u^{H_2O} * \\exp(m_{ku}*x) * \\exp(m_{ku}^' * x^2)\n\tk_f = k_f^{H_2O} * \\exp(m_{kf}*x) * \\exp(m_{kf}^' * x^2)\n\n\tk_{obs} = k_u + k_f\n\n\tNotes:\n\t\tTernstrom et al. From snapshot to movie: phi analysis\n\t\tof protein folding transition states taken one step\n\t\tfurther.\n\t\tPNAS (1999) vol. 96 (26) pp. 14854-9\n\t\"\"\"\n\tdef __init__(self):\n\t\tcore.FitModel.__init__(self)\n\t\tfit_args = self.fit_func_args\n\t\tself.params = tuple( [(fit_args[i],i) for i in xrange(len(fit_args))] )\n\t\tself.default_params = np.array([50., 1.3480, 5e-4, 1., 1.])\n\t\tself.verified = False\n\n\tdef fit_func(self, x, ku, mu, kf, mf, m_prime):\n\t\tk_obs = ku*(np.exp(mu*x)+np.exp(m_prime*x*x)) + kf*(np.exp(mf*x)+np.exp(m_prime*x*x))\n\t\treturn k_obs\n\n\tdef error_func(self, y):\n\t\treturn np.log(y)\n\n\t@property\n\tdef equation(self):\n\t\treturn r'k_u = k_u^{H_2O} \\cdot \\exp(m_{ku}*x) \\cdot \\exp(m^{\\'} x^2) \\\\ \\\n\t\t\t\tk_f = k_f^{H_2O} \\cdot \\exp(m_{kf}*x) \\cdot \\exp(m^{\\'} x^2) \\\\ \\\n\t\t\t\tk_{obs} = k_u + k_f'\n\n\n\n\n\nif __name__ == \"__main__\":\n\tget_models()\n","sub_path":"pyfolding/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":20686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"241622984","text":"# %%\nimport numpy as np\nimport pandas as pd\nimport os\nfrom sklearn.preprocessing import StandardScaler\n\n# %%\ndg = pd.read_csv('files/match_player_formatted_with_rating.txt', '|')\n\nds = dg.sort_values(by = ['DATE', 'COUNTRIES', 'GROUND', 'PLAYER_RATING'], ascending = [1, 1, 1, 0])\nds = ds.reindex(columns = ['DATE', 'COUNTRIES', 'GROUND', 'TEAM', 'PLAYER_NAME', 'PLAYER_ID', 'PLAYER_RATING'])\n\n# %%\ndef pivot(df, dq, attr, j, index):\n total = 0\n \n for i in range(0, 11):\n total += dq.iloc[i][attr]\n \n df.at[index, f'{attr}.MEAN.{j}'] = total / 11\n\n# %%\n#T1 = 'New Zealand'\n#T2 = 'India'\n#IS_HOST_1 = 0\n#IS_HOST_2 = 0\n#RANK_1 = 1\n#RANK_2 = 3\n#TEMP = 13.6\n\n#%%\nwc = pd.read_csv('data/Cricket_World_cup_fixtures.csv', encoding='cp1252')\ndm = pd.read_csv('files/2019_lineup.csv', ',')\ndm['PLAYER_RATING'] = dg['PLAYER_RATING'].min()\n \ntestset = pd.DataFrame()\nfor index, r0 in wc.iterrows():\n T1 = r0['TEAM.1']\n T2 = r0['TEAM.2']\n IS_HOST_1 = r0['IS_HOST.1']\n IS_HOST_2 = r0['IS_HOST.2']\n RANK_1 = r0['RANK.1']\n RANK_2 = r0['RANK.2']\n TEMP = r0['TEMP.HOST']\n TEMP_1 = r0['TEMP.1']\n TEMP_2 = r0['TEMP.2']\n print(T1, T2, IS_HOST_1, IS_HOST_2, RANK_1, RANK_2)\n #T1 = 'India'\n #T2 = 'South Africa'\n #IS_HOST_1 = 0\n #IS_HOST_2 = 0\n #RANK_1 = 2\n #RANK_2 = 3\n #TEMP = 21\n #TEMP_1 = 32\n #TEMP_2 = 18\n \n dn = dm[(dm['TEAM'] == T1) | (dm['TEAM'] == T2)].copy()\n\n for row in dn.itertuples():\n dd = dg[dg['PLAYER_ID'] == row.PLAYER]\n dn.at[row.Index, 'TEAM'] = 0 if dn.at[row.Index, 'TEAM'] == T1 else 1\n if dd.index.size == 0:\n print('error', row.PLAYER)\n continue\n dn.at[row.Index, 'PLAYER_RATING'] = dd.iloc[-1]['PLAYER_RATING']\n dn.at[row.Index, 'INNINGS'] = dd.iloc[-1]['INNINGS']\n dn.at[row.Index, 'R_T5'] = dd.iloc[-1]['R_T5']\n dn.at[row.Index, 'R_T10'] = dd.iloc[-1]['R_T10'] \n dn.at[row.Index, 'BAT_AVG_T5'] = dd.iloc[-1]['BAT_AVG_T5']\n dn.at[row.Index, 'BAT_AVG_T10'] = dd.iloc[-1]['BAT_AVG_T10']\n dn.at[row.Index, 'BAT_SR_T5'] = dd.iloc[-1]['BAT_SR_T5']\n dn.at[row.Index, 'BAT_SR_T10'] = dd.iloc[-1]['BAT_SR_T10']\n dn.at[row.Index, 'WIN_T5'] = dd.iloc[-1]['WIN_T5']\n dn.at[row.Index, 'WIN_T10'] = dd.iloc[-1]['WIN_T10']\n dn.at[row.Index, 'O_T5'] = dd.iloc[-1]['O_T5']\n dn.at[row.Index, 'O_T10'] = dd.iloc[-1]['O_T10']\n dn.at[row.Index, 'W_T5'] = dd.iloc[-1]['W_T5']\n dn.at[row.Index, 'W_T10'] = dd.iloc[-1]['W_T10']\n dn.at[row.Index, 'BOWL_ER_T5'] = dd.iloc[-1]['BOWL_ER_T5']\n dn.at[row.Index, 'BOWL_ER_T10'] = dd.iloc[-1]['BOWL_ER_T10']\n dn.at[row.Index, 'BOWL_AVG_T5'] = dd.iloc[-1]['BOWL_AVG_T5']\n dn.at[row.Index, 'BOWL_AVG_T10'] = dd.iloc[-1]['BOWL_AVG_T10']\n dn.at[row.Index, 'BOWL_SR_T5'] = dd.iloc[-1]['BOWL_SR_T5']\n dn.at[row.Index, 'BOWL_SR_T10'] = dd.iloc[-1]['BOWL_SR_T10']\n dn.at[row.Index, 'TEMP_T5'] = abs(dd.iloc[-1]['TEMP_M_T5'] - TEMP)\n dn.at[row.Index, 'TEMP_T10'] = abs(dd.iloc[-1]['TEMP_M_T10'] - TEMP)\n \n dn.fillna(0, inplace = True)\n \n dh = dn.sort_values(by = ['TEAM', 'PLAYER_RATING'], ascending = [1, 0])\n dh = dh[dh['TEAM'] == 0][:11].append(dh[dh['TEAM'] == 1][:11])\n\n ds = dh.sort_values(by = ['PLAYER_RATING'], ascending = [0])\n \n df = pd.DataFrame([[T1, T2, IS_HOST_1, IS_HOST_2, \n RANK_1, RANK_2, TEMP, TEMP_1, TEMP_2]],\n columns = ['TEAM.1','TEAM.2','IS_HOST.1','IS_HOST.2',\n 'RANK.1','RANK.2', 'TEMP.HOST', 'TEMP.1', 'TEMP.2'])\n \n for r1 in df.itertuples():\n for i in range(0, 2):\n dq = dh[dh['TEAM'] == i]\n \n pivot(df, dq, 'PLAYER_RATING', i + 1, r1[0])\n pivot(df, dq, 'INNINGS', i + 1, r1[0])\n pivot(df, dq, 'R_T5', i + 1, r1[0])\n pivot(df, dq, 'R_T10', i + 1, r1[0])\n pivot(df, dq, 'BAT_AVG_T5', i + 1, r1[0])\n pivot(df, dq, 'BAT_AVG_T10', i + 1, r1[0])\n pivot(df, dq, 'BAT_SR_T5', i + 1, r1[0])\n pivot(df, dq, 'BAT_SR_T10', i + 1, r1[0])\n pivot(df, dq, 'WIN_T5', i + 1, r1[0])\n pivot(df, dq, 'WIN_T10', i + 1, r1[0])\n pivot(df, dq, 'O_T5', i + 1, r1[0])\n pivot(df, dq, 'O_T10', i + 1, r1[0])\n pivot(df, dq, 'W_T5', i + 1, r1[0])\n pivot(df, dq, 'W_T10', i + 1, r1[0])\n pivot(df, dq, 'BOWL_AVG_T5', i + 1, r1[0])\n pivot(df, dq, 'BOWL_AVG_T10', i + 1, r1[0])\n pivot(df, dq, 'BOWL_ER_T5', i + 1, r1[0])\n pivot(df, dq, 'BOWL_ER_T10', i + 1, r1[0])\n pivot(df, dq, 'BOWL_SR_T5', i + 1, r1[0])\n pivot(df, dq, 'BOWL_SR_T10', i + 1, r1[0])\n pivot(df, dq, 'TEMP_T5', i + 1, r1[0])\n pivot(df, dq, 'TEMP_T10', i + 1, r1[0])\n testset = testset.append(df)\n\n#%%\ndf = testset.copy()\ndf['diff_rank_t1t2'] = df['RANK.1'] - df['RANK.2']\ndf['diff_rank_t2t1'] = df['RANK.2'] - df['RANK.1']\n\ndf['diff_temp_tHt1'] = df['TEMP.HOST'] - df['TEMP.1']\ndf['diff_temp_tHt2'] = df['TEMP.HOST'] - df['TEMP.2']\n\ndf['diff_PR_t1t2'] = df['PLAYER_RATING.MEAN.1'] - df['PLAYER_RATING.MEAN.2']\ndf['diff_PR_t2t1'] = df['PLAYER_RATING.MEAN.2'] - df['PLAYER_RATING.MEAN.1']\n\ndf['diff_BA_t1t2'] = df['BAT_AVG_T10.MEAN.1'] - df['BAT_AVG_T10.MEAN.2']\ndf['diff_BA_t2t1'] = df['BAT_AVG_T10.MEAN.2'] - df['BAT_AVG_T10.MEAN.1']\n\ndf['diff_BWLA_t1t2'] = df['BOWL_AVG_T10.MEAN.1'] - df['BOWL_AVG_T10.MEAN.2']\ndf['diff_BWLA_t2t1'] = df['BOWL_AVG_T10.MEAN.2'] - df['BOWL_AVG_T10.MEAN.1']\n\n# %%\nX = df[cols]\nX = StandardScaler().fit_transform(X)\n\n# %%\ny = clf.predict(X)\npred = list(zip(testset['TEAM.1'], testset['TEAM.2'], y))\npred = pd.DataFrame(pred, columns = ['TEAM.1', 'TEAM.2', 'WIN'])\npred['WIN_R'] = 1 - pred['WIN']\n\n#%%\na = pred.groupby('TEAM.1')['WIN'].sum()\nb = pred.groupby('TEAM.2')['WIN_R'].sum()\n\n#%%\ndf.to_csv('errr.csv')\n","sub_path":"analysis_NG/analyze_prediction.py","file_name":"analyze_prediction.py","file_ext":"py","file_size_in_byte":5985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"642031625","text":"\n# Numpy是表达N维数组的最基础库\n# Python接口使用,C语言实现,计算速度优异\n# Python数据分析及科学计算的基础库,支撑Pandas等\n# 提供直接的矩阵运算、广播函数、线性代数等功能\n\n# Pandas库提供了简单易用的数据结构和数据分析工具\n# 理解数据类型与索引的关系,操作索引即操作数据\n# Python最主要的数据分析功能库,基于Numpy开发\n\n# SciPy库是数学、科学和工程计算功能库\n# 提供了一批数学算法及工程数据运算功能\n# 类似Matlab,可用于如傅里叶变换、信号处理等应用\n# Python最主要的科学计算功能库,基于Numpy开发\n\nimport numpy\nimport Pandas\nimport SciPy\n\n\ndef npSum(l1, l2):\n a = numpy.array(l1)\n b = numpy.array(l2)\n c = a**2 + b**3\n return c\n\n\na = [0, 1, 2, 3, 4]\nb = [9, 8, 7, 6, 5]\nprint(npSum(a, b))\n","sub_path":"Python计算生态概览/数据分析常用库.py","file_name":"数据分析常用库.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"557982186","text":"from astropy.table import Table\nfrom astropy.io import fits\nimport numpy\nfrom time import strftime\n\nimport re\n\ndef get_file_data(filename):\n \"\"\"\n Reads a file from the ASAS and returns a list of dictionaries containing all the pairs key-value\n from it. For each dictionary, for the tables, each column is represented as a list on the\n dictionary's key which name matchs the column's name.\n \"\"\"\n document= open(filename,'r')\n data=[]\n pairs={'ORIGIN':filename.replace('/','\\\\'),'FRST_MAG':'None'}\n actuald=None #Will be used to save the magnitude's table\n for line in document:\n if line[0]=='#':\n key= re.match(r\"#.*=\",line)\n if key:\n #It's a key\n key= key.group()[1:-1]\n values= (line.strip()[len(key)+2:]).split(' ')\n values= [x for x in values if x!='']\n values= tuple(values)\n if len(values)==1:\n pairs[key]= values[0]\n else:\n pairs[key]= values\n #If a data block has been completely read, add it to the data.\n if actuald!=None:\n pairs.update(actuald)\n data.append(pairs)\n actuald=None\n pairs={'ORIGIN':filename.replace('/','\\\\'),'FRST_MAG':'None'}\n else:\n #It's a multiple key description\n keys= re.findall(r\"\\s\\S+\",line)\n keys= [x[1:] for x in keys]\n #Check if this are MAGnitudes, if so, add the first one to the dictionary.\n for k in keys:\n if k[:4]==\"MAG_\":\n pairs['FRST_MAG']= k\n break\n #Restart the current dictionary\n actuald={}\n for k in keys:\n actuald[k]=[]\n else:\n #Extract the values and append them to the dictionary of multiple keys.\n values= re.findall(r\"\\S+[\\s$]\",line)\n values= [x[:-1] for x in values]\n #Try convertion to float, and if it doesn't work, append values as they are is.\n for i in range(len(values)):\n actuald[keys[i]].append(values[i])\n pairs.update(actuald)\n data.append(pairs)\n for dictionary in data:\n for key in dictionary:\n val= dictionary[key]\n #Try the conversion of columns to float\n if type(val)==list:\n try: dictionary[key]= [float(x) for x in val]\n except ValueError as er: pass\n return data\n\n\ndef get_files_data(filenames):\n \"\"\"\n The same as get_file_data but receibes multiple files.\n \"\"\"\n dict_list=[]\n for fip in filenames:\n dict_list= dict_list+ get_file_data(fip)\n return dict_list\n\ndef save_to_fits(dictionary_list,filename,type_to_format={float:\"E\",str:\"20A\"}):\n \"\"\"\n Saves all the dictionaries of the list onto a FITS file.\n \"\"\"\n hdus= fits.HDUList()\n #Creating the primary hdu to contain the meta\n primary_hdu = fits.PrimaryHDU()\n primary_hdu.header['history']= \"From the ASAS records, parsed by ACALIB ASAS parser the \"+str(strftime(\"%d/%m/%Y\")+\" (dd/mm/yyyy)\")\n hdus.append(primary_hdu)\n #Creating the bintables HDUS for each dictionary\n for dictionary in dictionary_list:\n columns= []\n for key in dictionary:\n val= dictionary[key]\n if type(val)==list:\n #Goes to the table.\n columns.append( fits.Column(name=key, array=val, format=type_to_format[type(val[0])] ))\n column_definitions = fits.ColDefs(columns)\n tbhdu = fits.BinTableHDU.from_columns(column_definitions)\n for key in dictionary:\n val= dictionary[key]\n if not type(val)==list:\n #Goes to the HDU's header\n if type(val)!=tuple:\n tbhdu.header[key]= val\n else:\n val= tuple([''.join(x) for x in ' '.join(val).split(';')])\n tbhdu.header[key]= val\n hdus.append(tbhdu)\n\n output_file= open(filename,\"w\")\n hdus.writeto(output_file)\n output_file.close()\n\n","sub_path":"varstarscan/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"383857232","text":"import sys\nprint(sys.version)\n\ndef isPrime(number):\n foundFactor = False\n endRange = 1\n if number % 2 == 0:\n endRange = int(number / 2) + 1\n else:\n endRange = int((number + 1) / 2) + 1\n\n for x in range(2,endRange,1):\n if number % x == 0:\n foundFactor = True\n break\n\n if foundFactor == True:\n return False\n else:\n return True\n\ndef isVoodooPrime(number):\n foundNumber = False\n if isPrime(number) == True:\n primeString = str(1 / number)\n print(primeString)\n if primeString.find(str(number)) >= 0:\n return True\n else:\n return False\n else:\n return False\n\nfor i in range(1,30,1):\n print(str(i) + \":\")\n print(isVoodooPrime(i))\n","sub_path":"VoodooPrime.py","file_name":"VoodooPrime.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"373661486","text":"# coding: UTF-8\r\n\"\"\"\r\nScript: Prog1/damier\r\n\"\"\"\r\n\r\n# Imports\r\nimport turtle\r\n\r\n# Fonctions\r\ndef carre(x, y, taille, couleur):\r\n turtle.speed(10)\r\n turtle.up()\r\n turtle.goto(x, y)\r\n turtle.down()\r\n turtle.pencolor(couleur)\r\n turtle.fillcolor(couleur)\r\n turtle.begin_fill()\r\n for i in range(4):\r\n turtle.forward(taille)\r\n turtle.right(90)\r\n i += 1\r\n turtle.end_fill()\r\n turtle.up()\r\n return None\r\n\r\ndef ligne(x, y, taille, couleurs):\r\n turtle.speed(10)\r\n turtle.up()\r\n turtle.goto(x, y)\r\n turtle.down()\r\n for i in range(5):\r\n carre(x, y, taille, couleurs[0])\r\n turtle.forward(taille)\r\n x += taille\r\n carre(x, y, taille, couleurs[1])\r\n x += taille\r\n return None\r\n\r\ndef deux_lignes(x, y, taille, couleurs):\r\n turtle.speed(10)\r\n turtle.up()\r\n turtle.goto(x, y)\r\n turtle.down()\r\n x2 = x\r\n y2 = y - taille\r\n ligne(x, y, taille, couleurs)\r\n for i in range(5):\r\n carre(x2, y2, taille, couleurs[1])\r\n turtle.forward(taille)\r\n x2 += taille\r\n carre(x2, y2, taille, couleurs[0])\r\n x2 += taille\r\n return None\r\n\r\ndef damier(x, y, taille, couleurs):\r\n turtle.speed(10)\r\n for i in range(5):\r\n deux_lignes(x, y, taille, couleurs)\r\n y = y - taille * 2\r\n return None\r\n\r\n# Programme principal\r\ndef main():\r\n damier(-200, 100, 25, ['pink', 'green'])\r\n turtle.exitonclick()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n# Fin\r\n","sub_path":"damier.py","file_name":"damier.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"427073496","text":"\r\n\r\n## Classification Tree on Balanced Dataset\r\n## CART Model on IRIS Data\r\n## Three species of iris flower setosa, versicolor, virginica\r\nfrom sklearn.datasets import load_iris\r\n\r\niris = load_iris()\r\n\r\nimport pydotplus\r\n\r\n## Get the Features and Labels\r\nprint(iris.feature_names)\r\nprint(iris.target_names)\r\nprint(iris.target)\r\n\r\n\r\ntype(iris.data)\r\nprint(iris.data)\r\n\r\n\r\nX = iris.data\r\ny = iris.target\r\n\r\ntype(X)\r\ntype(y)\r\n# create training and testing vars\r\nfrom sklearn.cross_validation import train_test_split\r\nX_train,X_test,y_train,y_test = train_test_split(\r\n X,y,test_size=50,\r\n random_state = 42)\r\n\r\nprint(X_train.shape, y_train.shape)\r\nprint(X_test.shape, y_test.shape)\r\n\r\nimport numpy as np\r\ny_freq = np.bincount(y_train)\r\ny_val = np.nonzero(y_freq)[0]\r\nnp.vstack((y_val,y_freq[y_val])).T\r\n\r\n## Building the Classifier\r\n## Importing the Classification Tree Package\r\nfrom sklearn import tree\r\n\r\n## Creating the Classifier Object\r\nclf = tree.DecisionTreeClassifier()\r\n\r\n## Fitting the Classification Tree on Training Dataset\r\nclf = clf.fit(X_train, y_train)\r\n\r\n\r\n## Visualize the tree\r\n\r\nimport io\r\nfrom sklearn.tree import export_graphviz\r\nfrom scipy import misc\r\nfrom matplotlib import pyplot as plt\r\nf = io.StringIO()\r\ndot_data = export_graphviz(clf, out_file = f,\r\n feature_names=iris.feature_names)\r\npydot_graph = pydotplus.graph_from_dot_data(f.getvalue())\r\nimport graphviz\r\n## The below command requires graphviz to be installed on your machine\r\n## Path setting be done to graphviz bin folder\r\n## Link to download graphviz \r\n## https://graphviz.gitlab.io/_pages/Download/Download_windows.html\r\npydot_graph.write_png(\"decision_tree.png\")\r\nimg = misc.imread(\"decision_tree.png\")\r\nplt.rcParams[\"figure.figsize\"]=(20,20)\r\nplt.imshow(img)\r\n\r\n##f.getvalue()\r\n#dot_data = tree.export_graphviz(clf,out_file = None)\r\n#graph = graphviz.Source(dot_data)\r\n#graph.render(\"iris\")\r\n\r\n## Predict based on the Classifier\r\npred_y_test = clf.predict(X_test)\r\npred_y_test\r\n\r\n## Let us see the classification accuracy of our model\r\nfrom sklearn.metrics import accuracy_score\r\nscore = accuracy_score(y_test, pred_y_test)\r\nscore\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\nconfusion_matrix(y_test, pred_y_test)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nCreated on Sun Aug 6 23:42:04 2017\r\n@author: Rajesh Jakhotia\r\n@Company: K2 Analytics Finishing School Pvt. Ltd\r\n@Email: ar.jakhotia@k2analytics.co.in\r\n@Website: k2analytics.co.in\r\n\"\"\"\r\n## Classification Tree on Unbalanced Dataset\r\nimport os\r\nimport pandas as pd\r\n\r\n#Set the working directory\r\n#os.chdir(\"D:\\K2Analytics\\Datafile\")\r\n\r\n#Load the Dataset\r\nCTDF_dev = pd.read_csv(\"DEV_SAMPLE.csv\")\r\nCTDF_holdout = pd.read_csv(\"HOLDOUT_SAMPLE.csv\")\r\n\r\nprint( len(CTDF_dev), len(CTDF_holdout))\r\nCTDF_dev.head()\r\n\r\n\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#Data Preprocessing\r\n#Splitting into features and response variables\r\nX = CTDF_dev[['Age', 'Gender', 'Balance', 'Occupation',\r\n 'No_OF_CR_TXNS', 'AGE_BKT', 'SCR', 'Holding_Period']]\r\n\r\n#Categorical Variable to Numerical Variables\r\nX_train = pd.get_dummies(X)\r\nX_train.columns\r\n\r\n\r\ny_train = CTDF_dev[\"Target\"]\r\n\r\nprint (type(X_train) , type(y_train))\r\n\r\n#Decision Tree\r\n#Loading the library\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\n#Setting the parameter\r\nclf = DecisionTreeClassifier(criterion = \"gini\" , \r\n min_samples_split = 100,\r\n min_samples_leaf = 10,\r\n max_depth = 50)\r\n\r\n#Calling the fit function to built the tree\r\nclf.fit(X_train,y_train)\r\n\r\n\r\nimport pydot\r\nfrom sklearn.tree import tree\r\nfrom sklearn.tree import export_graphviz\r\nfrom sklearn.externals.six import StringIO \r\ndot_data = StringIO()\r\nfeature_list = list(X_train.columns.values)\r\nexport_graphviz(clf, \r\n out_file = dot_data, \r\n feature_names = feature_list)\r\ngraph=pydot.graph_from_dot_data(dot_data.getvalue())\r\ngraph[0].write_pdf(\".\\classification_tree_output.pdf\")\r\n\r\n\r\n\r\nNodes = pd.DataFrame(clf.tree_.__getstate__()[\"nodes\"])\r\nNodes\r\n\r\nfeature_importance = pd.DataFrame([X_train.columns,\r\n clf.tree_.compute_feature_importances()])\r\nfeature_importance.T \r\n\r\n\r\n\r\n## Let us see how good is the model\r\npred_y_train = clf.predict(X_train )\r\npred_y_train\r\n\r\n## Let us see the classification accuracy of our model\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import roc_curve\r\nfrom sklearn.metrics import auc\r\nscore = accuracy_score(y_train, pred_y_train)\r\nscore\r\n\r\ny_train_prob = clf.predict_proba(X_train)\r\n## AUC\r\nfpr, tpr, thresholds = roc_curve(y_train, y_train_prob[:,1])\r\nauc(fpr, tpr)\r\n\r\n## Let us see how good is the model\r\nX_holdout = CTDF_holdout[['Age', 'Gender', 'Balance', 'Occupation',\r\n 'No_OF_CR_TXNS', 'AGE_BKT', 'SCR', 'Holding_Period']]\r\nX_test = pd.get_dummies(X_holdout)\r\ny_test = CTDF_holdout[\"Target\"]\r\n\r\n\r\npred_y_test = clf.predict(X_test)\r\nscore_h = accuracy_score(y_test, pred_y_test)\r\nscore_h\r\n\r\ny_test_prob = clf.predict_proba(X_test)\r\nfpr, tpr, thresholds = roc_curve(y_test, y_test_prob[:,1])\r\nauc(fpr, tpr)\r\n\r\n\r\ny_freq = np.bincount(y_train)\r\ny_val = np.nonzero(y_freq)[0]\r\nnp.vstack((y_val,y_freq[y_val])).T\r\n\r\n#Cross validation function\r\nfrom sklearn.cross_validation import cross_val_score\r\nscores = cross_val_score(clf, X_train , y_train, cv = 10, scoring='roc_auc')\r\nscores.mean()\r\nscores.std()\r\n\r\ny_train_prob = clf.predict_proba(X_train)\r\nfpr, tpr, thresholds = roc_curve(y_train, y_train_prob[:,1])\r\nauc(fpr, tpr)\r\n\r\n\r\ny_test_prob = clf.predict_proba(X_test)\r\nfpr, tpr, thresholds = roc_curve(y_test, y_test_prob[:,1])\r\nauc(fpr, tpr)\r\n\r\n\r\n## Tuning the Classifier using GridSearchCV\r\nfrom sklearn.grid_search import GridSearchCV\r\nhelp(GridSearchCV)\r\n\r\nparam_dist = {\"criterion\": [\"gini\",\"entropy\"],\r\n \"max_depth\": np.arange(3,10),\r\n }\r\n\r\ntree = DecisionTreeClassifier(min_samples_split = 100,\r\n min_samples_leaf = 10)\r\n\r\ntree_cv = GridSearchCV(tree, param_dist, cv = 10, \r\n scoring = 'roc_auc', verbose = 100)\r\n\r\ntree_cv.fit(X_train,y_train)\r\n\r\n## Building the model using best combination of parameters\r\nprint(\"Tuned Decision Tree parameter : {}\".format(tree_cv.best_params_))\r\n\r\nclassifier = tree_cv.best_estimator_\r\n\r\nclassifier.fit(X_train,y_train)\r\n\r\n\r\n\r\n#predicting probabilities\r\ny_train_prob = classifier.predict_proba(X_train)\r\nfpr, tpr, thresholds = roc_curve(y_train, y_train_prob[:,1])\r\nauc_d = auc(fpr, tpr)\r\nauc_d\r\ny_test_prob = classifier.predict_proba(X_test)\r\nfpr, tpr, thresholds = roc_curve(y_test, y_test_prob[:,1])\r\nauc_h = auc(fpr, tpr)\r\nauc_h\r\n\r\n## Rank Ordering\r\nPrediction = classifier.predict_proba(X_train)\r\nCTDF_dev[\"prob_score\"] = Prediction[:,1]\r\n\r\n#scoring step\r\n#decile code\r\ndef deciles(x):\r\n decile = pd.Series(index=[0,1,2,3,4,5,6,7,8,9])\r\n for i in np.arange(0.1,1.1,0.1):\r\n decile[int(i*10)]=x.quantile(i)\r\n def z(x):\r\n if x 1: \n counter += collatz(3*tmp + 1)\n cache[n] = counter\n return counter\n\nans = 0\nmax_len = 0\nfor i in range(1, 1000000):\n tmp = collatz(i)\n if tmp > max_len:\n (ans, max_len) = (i, tmp)\n\nprint(ans)\n","sub_path":"problem_014.py","file_name":"problem_014.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"352983789","text":"\nimport os\n\nimport pygame as pg\nfrom pygame.locals import *\n\nimport agent, event\n\ncolor = {\n 'gray': (100, 100, 100),\n 'white': (255, 255, 255),\n 'light_green': (20, 60, 20),\n 'danger': (150, 50, 50)\n}\n\n\nhelp = \"\"\"\n CMPSC360 Project GoldMiner Demo\n *** I: Intelligent agent mode\n *** V: View what state it is in each cell\n *** R: Reset the board\n *** H: Display help information\n *** Q: Quit the game\n *** Enter: Next step\n *** ->: turn left\n *** <-: turn right\n Press H to hide the help information \n \n Have fun \\(^o^)/\\(^o^)/\\(^o^)/\n\"\"\"\n\n\nstatus_font = (os.path.join('helpInfoFont', 'Pacifico.ttf'), 22)\n\n\ndef load_image(name):\n fullname = os.path.join('images', name)\n image = pg.image.load(fullname)\n image = image.convert()\n image.set_colorkey(image.get_at((0, 0)), RLEACCEL)\n return image, image.get_rect()\n\n\nclass MainFrame:\n\n def __init__(self, event_controller):\n self.event_controller = event_controller\n self.event_controller.register_listener(self)\n\n pg.init()\n self.screen = pg.display.set_mode((769, 920), pg.RESIZABLE)\n pg.display.set_caption('GoldMiner (360 project game)')\n self.background = pg.Surface(self.screen.get_size())\n self.background.convert()\n self.background.fill(color[\"white\"])\n\n self.screen.blit(self.background, (0, 0))\n pg.display.update()\n\n self.back_sprites = pg.sprite.RenderUpdates()\n self.front_sprites = pg.sprite.RenderUpdates()\n\n self.sectors = {}\n self.player_sector = None\n self.view_all = False\n\n self.help_display = HelpDisplay()\n self.help_display.rect.center = self.background.get_rect().center\n\n def app_start(self):\n dx, dy = (192, 192)\n x, y = (3, 4)\n rect = pg.Rect(2 + 192 * 3, -192 + 2, 188, 188)\n for count in xrange(16):\n if count % 4 == 0:\n x -= 3\n y -= 1\n rect = rect.move(-3 * dx, dy)\n else:\n x += 1\n rect = rect.move(dx, 0)\n new_sector = Sector(self.back_sprites)\n new_sector.index = (x, y)\n new_sector.rect = rect\n self.sectors[(x, y)] = new_sector\n\n\n self.player = Player()\n\n ev = event.GenerateRequest()\n self.event_controller.post(ev)\n\n def player_moveto(self, pos):\n self.player_sector = self.sectors[pos]\n self.player.moveto = self.player_sector.rect.center\n\n def help(self):\n if not self.front_sprites.has(self.help_display):\n self.help_display.add(self.front_sprites)\n else:\n self.help_display.remove(self.front_sprites)\n\n\n def player_forward(self, ev):\n if not self.front_sprites.has(self.player):\n self.player.add(self.front_sprites)\n self.player.update_facing(agent.facing_list['right'])\n self.player_moveto(ev.pos)\n self.sectors[ev.pos].visit()\n self.front_sprites.update()\n self.redraw()\n\n def player_turn(self, ev):\n self.player.update_facing(ev.facing)\n\n\n\n def found_danger(self, ev):\n self.sectors[ev.pos].set_danger()\n\n def view(self, ev):\n if self.view_all:\n self.view_all = False\n else:\n self.view_all = True\n\n for s in self.sectors.values():\n if not s.visited:\n s.view(self.view_all)\n\n def world_built(self, ev):\n for key, sector in self.sectors.items():\n item = ev.world[key]\n for x in range(5):\n if item[x] == 1:\n thing = agent.map_list[x].lower()\n sector.things.append(thing)\n\n def reset_world(self, ev):\n for sector in self.sectors.values():\n sector.visited = False\n sector.danger = False\n sector.image.fill(color['gray'])\n sector.things = []\n self.player.remove(self.front_sprites)\n\n ev = event.GenerateRequest()\n self.event_controller.post(ev)\n\n def redraw(self):\n # Draw everything\n self.back_sprites.clear(self.screen, self.background)\n self.front_sprites.clear(self.screen, self.background)\n\n self.back_sprites.update()\n self.front_sprites.update()\n\n dirty_rects1 = self.back_sprites.draw(self.screen)\n dirty_rects2 = self.front_sprites.draw(self.screen)\n\n dirty_rects = dirty_rects1 + dirty_rects2\n pg.display.update(dirty_rects)\n\n def notify(self, ev):\n if isinstance(ev, event.Tick):\n self.redraw()\n elif isinstance(ev, event.AppStart):\n self.app_start()\n elif isinstance(ev, event.Reset):\n self.reset_world(ev)\n elif isinstance(ev, event.WorldBuilt):\n self.world_built(ev)\n elif isinstance(ev, event.PlayerForward):\n self.player_forward(ev)\n elif isinstance(ev, event.PlayerTurn):\n self.player_turn(ev)\n\n elif isinstance(ev, event.View):\n self.view(ev)\n elif isinstance(ev, event.Help):\n self.help(ev)\n elif isinstance(ev, event.FoundDanger):\n self.found_danger(ev)\n\n\nclass HelpDisplay(pg.sprite.Sprite):\n \"\"\"Help information\"\"\"\n\n def __init__(self):\n pg.sprite.Sprite.__init__(self)\n self.image = pg.Surface((500, 580))\n self.image.set_alpha(255 * 0.6)\n self.image.fill(color['white'])\n self.rect = self.image.get_rect()\n self.text = help\n\n self.draw_text()\n\n def draw_text(self):\n fo = pg.font.Font(*status_font)\n\n prevpos = None\n for line in self.text.split('\\n'):\n textr = fo.render(line, 1, color['danger'])\n textrpos = textr.get_rect()\n textrpos.left = self.image.get_rect().left\n if prevpos:\n textrpos.top = prevpos.bottom\n else:\n textrpos.top = self.image.get_rect().top\n prevpos = textrpos\n self.image.blit(textr, textrpos)\n\n\nclass Sector(pg.sprite.Sprite):\n\n def __init__(self, group=None):\n pg.sprite.Sprite.__init__(self, group)\n self.image = pg.Surface((180, 180))\n self.image.fill(color['gray'])\n\n self.index = None\n self.visited = False\n self.danger = False\n self.view = False\n self.things = []\n\n def draw_things(self):\n if self.danger:\n self.image.fill(color['danger'])\n for t in self.things:\n self.draw_img(t)\n\n def clear_things(self):\n if self.danger:\n self.image.fill(color['danger'])\n else:\n self.image.fill(color['gray'])\n\n def view(self, view_flag):\n self.view = view_flag\n if view_flag:\n self.draw_things()\n else:\n self.clear_things()\n\n def set_danger(self):\n if not self.danger:\n self.danger = True\n self.image.fill(color['danger'])\n if self.view:\n self.draw_things()\n\n def draw_img(self, s):\n image, rect = load_image('%s.png' % s)\n rect.center = self.image.get_rect().center\n\n self.image.blit(image, rect)\n\n def visit(self):\n if not self.visited:\n self.visited = True\n self.image.fill((180, 180, 180))\n self.draw_things()\n\n def update(self):\n pass\n\n\n\nclass Player(pg.sprite.Sprite):\n\n def __init__(self):\n pg.sprite.Sprite.__init__(self)\n self.image = pg.Surface((30, 30))\n self.image.fill((180, 180, 180))\n self.rect = self.image.get_rect()\n\n self.moveto = None\n self.facing = None\n\n def update_facing(self, facing=None):\n\n def draw_facing(image, rect):\n if self.facing == 0:\n rect.midtop = self.image.get_rect().midtop\n elif self.facing == 1:\n rect.midright = self.image.get_rect().midright\n elif self.facing == 2:\n rect.midbottom = self.image.get_rect().midbottom\n elif self.facing == 3:\n rect.midleft = self.image.get_rect().midleft\n self.image.blit(image, rect)\n\n # clear the old facing line\n if self.facing is not None:\n image = pg.Surface((30, 30))\n image.fill((180, 180, 180))\n rect = image.get_rect()\n draw_facing(image, rect)\n self.facing = facing\n image, rect = load_image('facing_%s.png' % self.facing)\n draw_facing(image, rect)\n\n def update(self):\n if self.moveto:\n self.rect.center = self.moveto\n self.moveto = None\n","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":8708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"398672108","text":"from pymongo import *\nimport data.Pipelines as pl\nfrom matplotlib import pyplot as plt\nimport re as r\nimport operator\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport data.constants as con\nimport os\n\n# sns.set(rc={'figure.figsize': (21.7, 11.00)})\n\n\ndef ensure_dir(file_path):\n directory = os.path.dirname(file_path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\npd.set_option('display.width', 1000)\ndb = MongoClient('mongodb://localhost:27017/').LeagueCrawler\nthreshold = 0.1\n\nfor _id in con.ADC_CHAMPIONS_DICT.keys():\n itemsets = db.itemsets_adc_pro.find({\"championId\": _id})\n champion = con.ADC_CHAMPIONS_DICT[_id]\n path = \"Viz_Pro/\" + champion + \"/\"\n PATCHES = [\"7.1-7.3\", \"7.4-7.8\", \"7.9-7.13\", \"7.14-7.18\"]\n\n ensure_dir(path)\n\n itemsets: pd.DataFrame = pd.DataFrame(list(itemsets))\n\n # itemsets[\"patch\"] = itemsets[\"gameVersion\"].apply(lambda row: r.match(\"\\d.\\d+\", row).group(0))\n itemsets[\"patch_grp\"] = np.where(itemsets['patch'].isin([\"7.1\", \"7.2\", \"7.3\"]), PATCHES[0],\n np.where(itemsets['patch'].isin([\"7.5\", \"7.6\", \"7.7\", \"7.8\"]), PATCHES[1],\n np.where(itemsets['patch'].isin([\"7.9\", \"7,10\", \"7.11\", \"7.12\", \"7.13\"]),\n PATCHES[2],\n np.where(itemsets['patch'].isin([\"7.14\", \"7.15\", \"7.16\", \"7.17\",\n \"7.18\"]), PATCHES[3], \"none\")\n )\n )\n )\n itemsets[\"core_items\"] = \\\n itemsets.apply(lambda row:\n tuple(map(\n lambda x: con.ADC_TIER_3_ITEMS_DICT[x],\n filter(lambda x: x in con.ADC_TIER_3_ITEMS_LIST, row[\"items\"])\n )),\n axis=1)\n itemsets[\"num_core_items\"] = itemsets.apply(lambda row: len(row['core_items']), axis=1)\n itemsets[\"two_core_items\"] = itemsets[\"core_items\"].apply(lambda x: True if len(x) >= 2 else False)\n itemsets[\"core_items_2\"] = itemsets[\"core_items\"].apply(lambda x: x[:2] if len(x) >= 2 else None)\n itemsets[\"three_core_items\"] = itemsets[\"core_items\"].apply(lambda x: True if len(x) >= 3 else False)\n itemsets[\"core_items_3\"] = itemsets[\"core_items\"].apply(lambda x: x[:3] if len(x) >= 3 else None)\n itemsets[\"four_core_items\"] = itemsets[\"core_items\"].apply(lambda x: True if len(x) >= 4 else False)\n itemsets[\"core_items_4\"] = itemsets[\"core_items\"].apply(lambda x: x[:4] if len(x) >= 4 else None)\n\n df = itemsets\n\n gamesCount_2_items = df[df['two_core_items']].groupby(['championId', 'patch_grp']).size().reset_index(\n name='gamesPlayed_2')\n gamesCount_3_items = df[df['three_core_items']].groupby(['championId', 'patch_grp']).size().reset_index(\n name='gamesPlayed_3')\n gamesCount_4_items = df[df['four_core_items']].groupby(['championId', 'patch_grp']).size().reset_index(\n name='gamesPlayed_4')\n\n # First two items\n result_2_items = df[df[\"num_core_items\"] >= 2] \\\n .groupby([\"championId\", \"patch_grp\", \"core_items_2\"]).core_items_2 \\\n .count() \\\n .reset_index(name=\"2_items_count\")\n\n result_2 = pd.merge(result_2_items, gamesCount_2_items, on=['championId', 'patch_grp'])\n result_2['freq'] = result_2['2_items_count'] / result_2['gamesPlayed_2']\n num_games_2 = sum(itemsets[\"two_core_items\"])\n\n # sns.set(font_scale=0.6)\n df2 = result_2[(result_2['freq'] > threshold)][['patch_grp', 'core_items_2', 'freq']]\n g2, ax = plt.subplots(figsize=(22, 10))\n g2 = sns.FacetGrid(df2,\n col='patch_grp', col_wrap=5, col_order=PATCHES)\n g2 = (g2.map(sns.barplot, 'freq', 'core_items_2', # 'platformId', hue_order=[\"EUW1\", \"NA1\", \"BR1\", \"KR\"],\n # palette=sns.color_palette(\"pastel\"),\n order=sorted(df2.core_items_2.unique(), key=operator.itemgetter(0))).add_legend())\n # g2.fig.subplots_adjust(top=0.9)\n g2.fig.suptitle(champion + \"'s Itemsets - Games: \" + str(num_games_2)+\", Threshold:\" + str(threshold), fontsize=20)\n g2.fig.tight_layout()\n g2.savefig(path + \"2_items.svg\")\n\n # alternatively:\n # g2 = sb.factorplot('freq', 'core_items_2', 'platformId',\n # data=result_2[(result_2['freq'] > 0.02)][['platformId', 'patch_grp', 'core_items_2', 'freq']],\n # kind='bar',\n # col='patch_grp',\n # col_wrap=5,\n # col_order=PATCHES,\n # palette=sb.color_palette(\"pastel\"))\n\n # First three items\n result_3_items = df[df[\"num_core_items\"] >= 3] \\\n .groupby([\"championId\", \"patch_grp\", \"core_items_3\"]).core_items_3 \\\n .count() \\\n .reset_index(name=\"2_items_count\")\n\n result_3 = pd.merge(result_3_items, gamesCount_3_items, on=['championId', 'patch_grp'])\n result_3['freq'] = result_3['2_items_count'] / result_3['gamesPlayed_3']\n num_games_3 = sum(itemsets[\"three_core_items\"])\n\n # sns.set(font_scale=1.3)\n df3 = result_3[(result_3['freq'] > threshold)][['patch_grp', 'core_items_3', 'freq']]\n g3, ax = plt.subplots(figsize=(22, 10))\n g3 = sns.FacetGrid(df3,\n col='patch_grp', col_wrap=5, col_order=PATCHES)\n g3 = (g3.map(sns.barplot, 'freq', 'core_items_3', # 'platformId', hue_order=[\"EUW1\", \"NA1\", \"BR1\", \"KR\"],\n # palette=sns.color_palette(\"pastel\"),\n order=sorted(df3.core_items_3.unique(), key=operator.itemgetter(0))\n ).add_legend())\n # g3.fig.subplots_adjust(top=0.9)\n g3.fig.suptitle(champion + \" Itemsets - Games: \" + str(num_games_3)+\", Threshold:\" + str(threshold), fontsize=20)\n g3.fig.tight_layout()\n g3.savefig(path + \"3_items.svg\")\n\n # First four items\n result_4_items = df[df[\"num_core_items\"] >= 4] \\\n .groupby([\"championId\", \"patch_grp\", \"core_items_4\"]).core_items_4 \\\n .count() \\\n .reset_index(name=\"4_items_count\")\n\n result_4 = pd.merge(result_4_items, gamesCount_4_items, on=['championId', 'patch_grp'])\n result_4['freq'] = result_4['4_items_count'] / result_4['gamesPlayed_4']\n num_games_4 = sum(itemsets[\"four_core_items\"])\n\n sns.set(font_scale=0.6)\n df4 = result_4[(result_4['freq'] > threshold)][['patch_grp', 'core_items_4', 'freq']]\n g4, ax = plt.subplots(figsize=(21.7, 11.00))\n g4 = sns.FacetGrid(df4,\n col='patch_grp', col_wrap=5, col_order=PATCHES)\n g4 = (g4.map(sns.barplot, 'freq', 'core_items_4', # 'platformId', hue_order=[\"EUW1\", \"NA1\", \"BR1\", \"KR\"],\n # palette=sns.color_palette(\"pastel\"),\n order=sorted(df4.core_items_4.unique(), key=operator.itemgetter(0))).add_legend())\n g4.fig.subplots_adjust(top=0.9)\n # g4.fig.suptitle(champion + \"'s Itemsets - Games: \" + str(num_games_4)+\", Threshold:\" + str(threshold), fontsize=20)\n # g4.tight_layout()\n g4.savefig(path + \"4_items.svg\")\n","sub_path":"data/Itemsets_pro.py","file_name":"Itemsets_pro.py","file_ext":"py","file_size_in_byte":7204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"76781171","text":"#!/usr/bin/env python\n\n#create a 3 line file\n#Write a python program that reads this file and prints out the lines of t he file\n#write a python program that writes a different file\n#close this new file and re-open it and append to it.\n#Check your code into GitHub\n\n\n\nmyfile = open(\"readme.txt\", \"r\")\ncontents = myfile.readlines()\nprint (contents)\nmyfile.close()\n\nmyfile2 = open(\"create2.txt\", \"w\")\nmyfile2.write(\"Sienna Rocks\\n\")\nmyfile2.close()\n\n\nappend_file = open(\"create2.txt\", \"a\")\nappend_file.write(\"appended\")\nappend_file.close()\n\n","sub_path":"day1/files_ex1.py","file_name":"files_ex1.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"596643418","text":"# topics = [\"动态规划\"]\n\nfrom typing import List\n\n\nclass Solution:\n \"\"\"\n 0-1 背包(二维)\n \"\"\"\n\n def findMaxForm(self, strs: List[str], m: int, n: int) -> int:\n # dp[i][j] 表示不超过 i 个 0 和 j 个 1 的最长子集\n dp: List[List[int]] = [[0] * (n + 1) for _ in range(m + 1)]\n\n for k in range(len(strs)):\n c0 = strs[k].count('0')\n c1 = strs[k].count('1')\n for i in range(m + 1)[::-1]:\n for j in range(n + 1)[::-1]:\n if k == 0:\n dp[i][j] = 0 if (c0 > i or c1 > j) else 1\n elif i >= c0 and j >= c1:\n dp[i][j] = max(dp[i][j], dp[i - c0][j - c1] + 1)\n\n return dp[m][n]\n","sub_path":"algorithms/[474]一和零/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"475522691","text":"\"\"\"\nGiven an array of size N containing only 0s, 1s, and 2s;\n sort the array in ascending order.\nExample 1:\nInput:\nN = 5\narr[]= {0 2 1 2 0}\nOutput:\n0 0 1 2 2\nExplanation:\n0s 1s and 2s are segregated\ninto ascending order.\n\"\"\"\n\n\ndef sort012(arr, n):\n c0 = 0\n c1 = 0\n c2 = 0\n for ele in arr:\n if ele == 0:\n c0 += 1\n elif ele == 1:\n c1 += 1\n else:\n c2 += 1\n i = 0\n while c0 != 0:\n arr[i] = 0\n i += 1\n c0 -= 1\n while c1 != 0:\n arr[i] = 1\n i += 1\n c1 -= 1\n while c2 != 0:\n arr[i] = 2\n i += 1\n c2 -= 1\n print(arr)\n\n\ndef sort012_opti(arr, n):\n start = 0\n mid = 0\n end = n - 1\n\n while mid <= end:\n if arr[mid] == 0:\n arr[start], arr[mid] = arr[mid], arr[start]\n start = start + 1\n mid = mid + 1\n elif arr[mid] == 1:\n mid = mid + 1\n else:\n arr[mid], arr[end] = arr[end], arr[mid]\n end = end - 1\n print(arr)\n\n\nsort012_opti([0, 1, 1, 2, 0, 1, 1], 7)\n","sub_path":"pyalgos/arrays/sort012.py","file_name":"sort012.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"298756189","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 1 17:28:04 2019\n\n@author: YUSS\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 27/5/2019\nInterceptor V2\n@author: I. \nThis ML challenge was created by SAMLA (National Electronic Warfare, Cyber & Intelligence Research Center) - a national research lab at Rafael http://portal.rafael.co.il/mlchallenge2019/Documents/samla.html\n\n\n\nThe goal of the game:\n Getting highest score in 100 games each running for 1000 steps.\n The player have access to 3 functions:\n \n Init(): This function initializes the game. It should be called before each game.\n \n Game_step(action_button): This function performs an action as described:\n action_button = 0: Change turret angle one step left\n action_button = 1: Do nothing in the current game step\n action_button = 2: Change turret angle one step right\n action_button = 3: Fire\n \n This function returns several variables:\n r_locs: Location of each rocket in the game (x,y)\n i_locs: Location of each interceptor in the game (x,y)\n c_locs: Location of each city in the game (x, width)\n ang: Turret angle\n score: Current player score\n \n Draw(): This function displays current game state (slows down your program. Not a must)\n \n Score is as follows: \n Rocket hits city: -15 points\n Rocket hits open field: -1 point\n Firing an interceptor: -1 point\n Intercepting a rocket: +4 points\n \nIn order to play, do the following:\n***********************************\n \nfrom Interceptor_V2 import Init, Draw, Game_step\n\nInit()\nfor stp in range(1000):\n action_button = *** Insert your logic here: 0,1,2 or 3 ***\n r_locs, i_locs, c_locs, ang, score = Game_step(action_button)\n Draw()\n\n*************************************\n\nDon't forget to play by the rules described in the website.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nfrom mrcnn.config import Config\nfrom mrcnn.model import MaskRCNN\n\n\n\nclass RocketConfig(Config):\n NAME = \"rocket\"\n IMAGES_PER_GPU = 1 \n NUM_CLASSES = 2\n# STEPS_PER_EPOCH = 1000\n\nconfig = RocketConfig()\nconfig.IMAGE_SHAPE = [192, 384, 3]\nconfig.IMAGE_MIN_DIM = 192\nconfig.IMAGE_MAX_DIM = 384\nconfig.IMAGE_RESIZE_MODE = 'none'\nconfig.USE_MINI_MASK = False\nconfig.display() \n\nmodel = MaskRCNN(mode='inference', model_dir='./', config=config)\nmodel.load_weights('./mask_rcnn_rocket_0070.h5', by_name=True)\n\n\nclass World():\n width = 10000 # [m]\n height = 4000 # [m]\n dt = 0.2 # [sec]\n time = 0 # [sec]\n score = 0\n reward_city = -15\n reward_open = -1\n reward_fire = -1\n reward_intercept = 4\n g = 9.8 # Gravity [m/sec**2]\n fric = 5e-7 # Air friction [Units of Science]\n rocket_prob = 1 # expected rockets per sec\n\n\nclass Turret():\n x = -2000 # [m]\n y = 0 # [m]\n x_hostile = 4800\n y_hostile = 0\n ang_vel = 30 # Turret angular speed [deg/sec]\n ang = 0 # Turret angle [deg]\n v0 = 800 # Initial speed [m/sec]\n prox_radius = 150 # detonation proximity radius [m]\n reload_time = 1.5 # [sec]\n last_shot_time = -3 # [sec]\n \n def update(self, action_button):\n if action_button == 0:\n self.ang = self.ang - self.ang_vel * world.dt\n if self.ang < -90: self.ang = -90\n \n if action_button == 1:\n pass\n \n if action_button == 2:\n self.ang = self.ang + self.ang_vel * world.dt \n if self.ang > 90: self.ang = 90\n \n if action_button == 3:\n if world.time - self.last_shot_time > self.reload_time:\n Interceptor()\n self.last_shot_time = world.time # [sec]\n\n \nclass Interceptor():\n def __init__(self):\n self.x = turret.x\n self.y = turret.y\n self.vx = turret.v0 * np.sin(np.deg2rad(turret.ang))\n self.vy = turret.v0 * np.cos(np.deg2rad(turret.ang))\n world.score = world.score + world.reward_fire\n interceptor_list.append(self)\n \n def update(self):\n self.v_loss = (self.vx ** 2 + self.vy ** 2) * world.fric * world.dt\n self.vx = self.vx * (1 - self.v_loss)\n self.vy = self.vy * (1 - self.v_loss) - world.g * world.dt\n self.x = self.x + self.vx * world.dt\n self.y = self.y + self.vy * world.dt \n if self.y < 0:\n Explosion(self.x, self.y)\n interceptor_list.remove(self)\n if np.abs(self.x) > world.width / 2:\n interceptor_list.remove(self)\n \n \nclass Rocket():\n def __init__(self, world):\n self.x = turret.x_hostile # [m] \n self.y = turret.y_hostile # [m]\n self.v0 = 700 + np.random.rand() * 300 # [m/sec] \n self.ang = -88 + np.random.rand() * 68 # [deg] \n self.vx = self.v0 * np.sin(np.deg2rad(self.ang))\n self.vy = self.v0 * np.cos(np.deg2rad(self.ang))\n rocket_list.append(self)\n \n def update(self):\n self.v_loss = (self.vx ** 2 + self.vy ** 2) * world.fric * world.dt\n self.vx = self.vx * (1 - self.v_loss)\n self.vy = self.vy * (1 - self.v_loss) - world.g * world.dt\n self.x = self.x + self.vx * world.dt\n self.y = self.y + self.vy * world.dt\n\n\nclass City():\n def __init__(self, x1, x2, width):\n self.x = np.random.randint(x1, x2) # [m]\n self.width = width # [m]\n city_list.append(self)\n self.img = np.zeros((200, 800))\n for b in range(60):\n h = np.random.randint(30, 180)\n w = np.random.randint(30, 80)\n x = np.random.randint(1, 700)\n self.img[0:h, x:x+w] = np.random.rand()\n self.img = np.flipud(self.img)\n \n \nclass Explosion():\n def __init__(self,x,y):\n self.x = x\n self.y = y\n self.size = 500\n self.duration = 0.4 # [sec]\n self.verts1 = (np.random.rand(30 ,2)- 0.5) * self.size\n self.verts2 = (np.random.rand(20 ,2)- 0.5) * self.size / 2\n self.verts1[:,0] = self.verts1[:,0] + x\n self.verts1[:,1] = self.verts1[:,1] + y\n self.verts2[:,0] = self.verts2[:,0] + x\n self.verts2[:,1] = self.verts2[:,1] + y\n self.hit_time = world.time\n explosion_list.append(self)\n \n def update(self):\n if world.time - self.hit_time > self.duration:\n explosion_list.remove(self)\n \n\ndef Check_interception():\n for intr in interceptor_list:\n for r in rocket_list:\n if ((r.x - intr.x)**2 + (r.y - intr.y)**2)**0.5 < turret.prox_radius:\n rocket_list.remove(r)\n Explosion(intr.x, intr.y)\n if intr in interceptor_list: interceptor_list.remove(intr)\n world.score = world.score + world.reward_intercept\n\n\ndef Check_ground_hit():\n for r in rocket_list:\n if r.y < 0:\n city_hit = False\n for c in city_list:\n if np.abs(r.x - c.x) < c.width:\n city_hit = True\n if city_hit == True:\n world.score = world.score + world.reward_city\n else:\n world.score = world.score + world.reward_open\n Explosion(r.x, r.y)\n rocket_list.remove(r)\n\n\ndef Draw():\n plt.cla()\n plt.rcParams['axes.facecolor'] = 'black'\n for r in rocket_list:\n plt.plot(r.x, r.y,'.y')\n for intr in interceptor_list:\n plt.plot(intr.x, intr.y,'or')\n C1 = plt.Circle((intr.x, intr.y), radius=turret.prox_radius, linestyle='--', color='gray', fill=False)\n ax = plt.gca()\n ax.add_artist(C1)\n for c in city_list:\n plt.imshow(c.img, extent=[c.x-c.width/2, c.x+c.width/2, 0, c.img.shape[0]])\n plt.set_cmap('bone')\n for e in explosion_list:\n P1 = plt.Polygon(e.verts1, True, color='yellow')\n P2 = plt.Polygon(e.verts2, True, color='red')\n ax = plt.gca()\n ax.add_artist(P1)\n ax.add_artist(P2)\n plt.plot(turret.x, turret.y,'oc', markersize=12)\n plt.plot([turret.x, turret.x + 100*np.sin(np.deg2rad(turret.ang))],\n [turret.y, turret.y + 100*np.cos(np.deg2rad(turret.ang))],'c', linewidth = 3)\n plt.plot(turret.x_hostile, turret.y_hostile,'or', markersize=12)\n plt.axes().set_aspect('equal')\n plt.axis([-world.width / 2, world.width / 2, 0, world.height])\n #plt.title('Score: ' + str(world.score))\n \n plt.xticks([])\n plt.yticks([])\n \n plt.draw()\n \n plt.gcf().savefig('./im.jpg', bbox_inches = 'tight', pad_inches = 0) \n n_img = cv2.imread('./im.jpg') \n os.remove('./im.jpg') \n \n plt.pause(0.001)\n return n_img\n\n \n\ndef Init():\n global world, turret, rocket_list, interceptor_list, city_list, explosion_list\n world = World()\n rocket_list = []\n interceptor_list = []\n turret = Turret()\n city_list = []\n explosion_list = []\n City(-world.width * 0.5 + 400, -world.width * 0.25 - 400, 800)\n City(-world.width * 0.25 + 400, -400, 800)\n plt.rcParams['axes.facecolor'] = 'black'\n \n \ndef Game_step(action_button):\n \n world.time = world.time + world.dt \n rand_in = np.random.rand() \n if rand_in < world.rocket_prob * world.dt:\n Rocket(world)\n \n for r in rocket_list:\n r.update()\n \n for intr in interceptor_list:\n intr.update()\n \n for e in explosion_list:\n e.update()\n \n turret.update(action_button)\n Check_interception()\n Check_ground_hit()\n \n r_locs = np.zeros(shape=(len(rocket_list), 2))\n for ind in range(len(rocket_list)):\n r_locs[ind, :] = [rocket_list[ind].x, rocket_list[ind].y]\n \n i_locs = np.zeros(shape=(len(interceptor_list), 2))\n for ind in range(len(interceptor_list)):\n i_locs[ind, :] = [interceptor_list[ind].x, interceptor_list[ind].y]\n \n c_locs = np.zeros(shape=(len(city_list), 2))\n for ind in range(len(city_list)):\n c_locs[ind, :] = [city_list[ind].x, city_list[ind].width]\n \n return r_locs, i_locs, c_locs, turret.ang, world.score\n\n\ndef Create_state(n_img):\n\n #prepare image for prediction\n gray_im = cv2.cvtColor(n_img, cv2.COLOR_BGR2GRAY)\n gray_im = gray_im[:135, 4:]\n pad = np.zeros((192, 384)).astype('uint8')\n pad[29:-28, 25:-25] = gray_im\n color_im = cv2.cvtColor(pad, cv2.COLOR_GRAY2BGR)\n #state\n state=np.zeros((192,384))\n #predict\n y_h = model.detect([color_im])[0]\n #rois (bounding boxes)\n for y_min,x_min,y_max,x_max in y_h[\"rois\"]:\n #take the center of each rocket (calculate middle of two points)\n# center_y,center_x=(y_min+y_max)/2,(x_min+x_max)/2\n #add one to rocket's center in state mat\n state[y_min:y_max+1, x_min:x_max+1] = 1\n# state[center_y,center_x]=1\n \n return state\n \n \n#def start(iters):\n# Init()\n# np.random.seed(1)\n# table=list()\n## state = env.observation_space\n# for stp in range(iters):\n# action_button = np.random.randint(0,4) \n## action_button = RockeAgent.act(state) \n## --> agent(state) --> a\n# r_locs, i_locs, c_locs, ang, score = Game_step(action_button)\n# n_img=Draw()\n# state=Create_state(n_img)\n# table.append([state,action_button,score])\n \n#start(15)\n \n\n\"\"\"\nTESTING CONVERSATION\nim = cv2.imread('./train/85.jpg')\ngray_im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\ngray_im = gray_im[:135, 4:]\npad = np.zeros((192, 384)).astype('uint8')\npad[29:-28, 25:-25] = gray_im\n\ncolor = cv2.cvtColor(pad, cv2.COLOR_GRAY2BGR)\nplt.imshow(color)\n\nim = plt.imread('./train/crop/85.jpg')\nplt.imshow(im)\n\"\"\"\n\n","sub_path":"interceptor/envs/check_real_time.py","file_name":"check_real_time.py","file_ext":"py","file_size_in_byte":11747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"302270521","text":"from django.core.management.base import BaseCommand, CommandError\nfrom menuFinder.models import DiningHall\n\nclass Command(BaseCommand):\n \n def handle(self, *args, **options):\n dining_hall_info = [['104West!', 'west'], ['Rose House Dining Room', 'west'], ['Keeton House Dining Room', 'west'],\n ['Jansen\\'s Dining Room at Bethe House', 'west'], ['Cook House Dining Room', 'west'],\n ['Becker House Dining Room', 'west'], ['Okenshields Dining Room', 'central'],\n ['Robert Purcell Marketplace Eatery', 'north'], ['Risley Dining Room', 'north'],\n ['North Star Dining Room', 'north']]\n \n DiningHall.objects.all().delete()\n\n for arr in dining_hall_info:\n DiningHall.objects.create(hall_name=arr[0], campus_location=arr[1])\n\n","sub_path":"diningAppV2/menuFinder/management/commands/setup_halls.py","file_name":"setup_halls.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"164871434","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 1 10:34:46 2016\n\n@author: Admin_1\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef pca(x):\n print(x.shape)\n mu = np.mean(x, axis=1)\n print(mu)\n xmf = np.vstack(mu) - x\n xcov = 1/x.shape[1]*np.dot(xmf, np.transpose(xmf))\n w, v = np.linalg.eigh(xcov)\n w_sorted = np.sort(w)[::-1]\n v_sorted = v[:, w_sorted.argsort()]\n\n\n return mu, xmf, xcov, w_sorted, v_sorted\n\nnum1 = np.array([0,1,0,0,1,0,0,1,0,0,1,0,0,1,0,1,1,1,0,0,1,1,1,1,1,0,0,\n 1,1,1,1,1,1,0,0,1,0,1,1,0,0,1,1,1,1,1,0,1,1,0,1,1,1,1,\n 0,0,1,0,0,1,1,1,1,1,0,0,1,1,1,0,0,1,1,1,1,1,1,1,1,0,0,\n 1,1,1,1,0,1,1,1,1,1,1,1,0,0,1,0,0,1,0,0,1,0,0,1,1,1,1,\n 1,0,1,1,1,1,1,0,1,1,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,1,\n 1,1,1,1,0,1,1,0,1,1,0,1,1,1,1]).reshape((10 ,15))\n\nplt.figure(1)\nfor i in range(num1.shape[0]):\n plt.subplot(2, 5, i+1)\n # b1 = np.flipud(np.transpose(num[i,:].reshape((5,3))))\n b1 = np.flipud(np.transpose(num1[i, ::-1].reshape((5, 3))))\n plt.imshow(b1.T, origin='lower', cmap=plt.cm.gray_r,\n interpolation='nearest')\n plt.xticks([])\n plt.yticks([])\n\nplt.subplot(2, 5, 3)\nplt.title('Ziffernmuster')\n\nmu, xmf, xcov, w, v = pca(np.transpose(num1))\n\nplt.figure(2)\nplt.imshow(np.flipud(np.transpose(mu[::-1].reshape((5, 3)))).T, origin='lower',\n cmap=plt.cm.gray_r, interpolation='nearest')\nplt.xticks([])\nplt.yticks([])\nplt.title('Mittelwertbild')\n\nplt.figure(3)\nfor i in range(v.shape[1]):\n plt.subplot(3, 5, i+1)\n b1 = np.flipud(np.transpose((-1)*v[::-1, i].reshape((5, 3))))\n plt.imshow(b1.T, origin='lower', cmap=plt.cm.gray_r,\n interpolation='nearest')\n plt.xticks([])\n plt.yticks([])\nV = v[:, 0:14]\nX1 = np.dot(np.transpose(V), xmf)\nrec = np.vstack(mu)+np.dot(V, X1)\n\nplt.figure(4)\nfor i in range(rec.shape[1]):\n plt.subplot(2, 5, i+1)\n # b1 = np.flipud(np.transpose(num[i,:].reshape((5,3))))\n b1 = np.flipud(np.transpose(rec[::-1, i].reshape((5, 3))))\n plt.imshow(b1.T, origin='lower', cmap=plt.cm.gray_r,\n interpolation='nearest')\n plt.xticks([])\n plt.yticks([])\n","sub_path":"aktuell/dsv/me_pca.py","file_name":"me_pca.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"144212696","text":"from argparse import ArgumentParser\nimport sys\nimport re\nimport requests\n\nparser = ArgumentParser(description=\"Run a SQL command to create a table\")\nparser.add_argument('sql_file', type=str, help='Path to the .sql file')\nparser.add_argument('--database-name', type=str, default=\"\", help='name of the database')\n\nargs = parser.parse_args()\n\ndb_name = args.database_name\n\nif db_name:\n result = requests.post(\n \"http://localhost:8123\",\n params={\"query\": f\"CREATE DATABASE IF NOT EXISTS {db_name}\"},\n )\n try:\n result.raise_for_status()\n except requests.exceptions.HTTPError as exc:\n raise Exception(result.text) from exc\n\nwith open(args.sql_file) as fp:\n query = fp.read()\n\nm = re.search(r\"table (\\S*)\", query)\ntable_name = m.group(1)\n\nif db_name:\n query = query.replace(f\"create table {table_name}\", f\"create table {db_name}.{table_name}\")\n\n from_db = f\"FROM {db_name}\"\nelse:\n from_db = \"\"\n\ntables = requests.post(\"http://localhost:8123\", params={\"query\": f\"SHOW TABLES {from_db}\"}).text.split(\"\\n\")\nif table_name in tables:\n print(f\"Table `{table_name}` already exists, not continuing\")\n sys.exit()\n\nresult = requests.post(\n \"http://localhost:8123\",\n params={\"query\": query},\n)\ntry:\n result.raise_for_status()\nexcept requests.exceptions.HTTPError as exc:\n raise Exception(result.text) from exc\n","sub_path":"create_table.py","file_name":"create_table.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"422333739","text":"def orientation(a, b, c):\r\n det = (b.x - a.x) * (c.y - a.y) - (b.y - a.y) * (c.x - a.x)\r\n return sign(det)\r\n\r\n\r\ndef sign(a):\r\n if a == 0:\r\n return 0\r\n if a > 0:\r\n return 1\r\n if a < 0:\r\n return -1\r\n\r\n\r\ndef do_line_segments_without_endpoints_intersect(a, b, c, d):\r\n o1 = orientation(a, b, c)\r\n o2 = orientation(a, b, d)\r\n o3 = orientation(c, d, a)\r\n o4 = orientation(c, d, b)\r\n\r\n if o1 == 0 and o2 == 0 and o3 == 0 and o4 == 0: # all colinear\r\n if a.x == b.x == c.x == d.x:\r\n sorted_a = min(a.x, b.x)\r\n sorted_b = max(a.x, b.x)\r\n sorted_c = min(c.x, d.x)\r\n sorted_d = max(c.x, d.x)\r\n elif a.y == b.y == c.y == d.y:\r\n return False\r\n else:\r\n sorted_a = min(a.y, b.y)\r\n sorted_b = max(a.y, b.y)\r\n sorted_c = min(c.y, d.y)\r\n sorted_d = max(c.y, d.y)\r\n\r\n return sorted_c < sorted_a < sorted_d \\\r\n or sorted_c < sorted_b < sorted_d \\\r\n or sorted_a < sorted_c < sorted_b \\\r\n or sorted_a < sorted_d < sorted_b \\\r\n or sorted_a == sorted_c \\\r\n or sorted_b == sorted_d\r\n\r\n if o1 == 0 or o2 == 0 or o3 == 0 or o4 == 0:\r\n return False\r\n\r\n return o1 != o2 and o3 != o4\r\n\r\n\r\n","sub_path":"PLDijkstraPathfindingVisualization/Pathfinding/Math.py","file_name":"Math.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"411677842","text":"nums = list(map(int, input().split()))\r\ny_size = nums[0] # кол-во строк, т.е. длина вертикали\r\nx_size = nums[1] # кол-во столбцов, т.е. длина горизонтали\r\ntab = [[0] * x_size for _ in range(y_size)]\r\n\r\nindex = 0; x = 1; y = -1\r\n\r\nwhile True:\r\n\twhile x > 0 and y < y_size -1:\r\n\t\ty += 1; x -= 1\r\n\t\tindex += 1; tab[y][x] = index\r\n\tif x == 0 and y < y_size - 1:\r\n\t\ty += 1 # вправо если дошли до верхней границы \r\n\telse:\r\n\t\tif y == y_size - 1 and x == x_size - 1:\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tx += 1 #вниз если дошли до правой границы\r\n\tindex += 1; tab[y][x] = index\r\n\r\n\tif x == x_size -1 and y == y_size -1:\r\n\t\tbreak\r\n\r\n\twhile y > 0 and x < x_size - 1:\r\n\t\ty -= 1; x += 1\r\n\t\tindex += 1; tab[y][x] = index\r\n\tif y == 0 and x < x_size - 1:\r\n\t\tx += 1\r\n\telse:\r\n\t\tif y == y_size - 1 and x == x_size - 1:\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\ty += 1\r\n\tindex += 1; tab[y][x] = index\r\n\r\nfor line in tab:\r\n\tprint(' '.join(map(str, line)))","sub_path":"LB/LabRab9/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"186647287","text":"from pdbfixer import PDBFixer\nfrom simtk.openmm.app import PDBFile\n\n\ndef fix_pdb(input_file, output_file):\n \"\"\"\n\n .. note::\n Set ``export OPENMM_CPU_THREADS=1`` if you want this function\n to use only one thead.\n \"\"\"\n fixer = PDBFixer(filename=input_file)\n fixer.findMissingResidues()\n # Replace non-standard residues with their standard equiv.\n # (But if atoms have to be added, they get added using the `.addMissingAtoms()` command)\n fixer.findNonstandardResidues()\n fixer.replaceNonstandardResidues()\n # Find missing heavy atoms\n fixer.findMissingAtoms()\n fixer.addMissingAtoms()\n # fixer.removeHeterogens(True)\n fixer.addMissingHydrogens(7.0)\n with open(output_file, 'w') as ofh:\n PDBFile.writeFile(fixer.topology, fixer.positions, ofh, keepIds=True)\n","sub_path":"ascommon/omnia_tools/openmm_tools.py","file_name":"openmm_tools.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"306577246","text":"import argparse\n\n# ================================================\n# Parse data input and output directories\n# ================================================\ndef parse_args():\n\n # Parse arguments\n parser = argparse.ArgumentParser(description='Run CNN Segmenter for 4D flow MRIs.')\n parser.add_argument('--train', dest='train', action='store_true', help='run training')\n parser.add_argument('--inference', dest='train', action='store_false', help='run inference')\n parser.add_argument('--config', type=str, default='config/cnn_segmenter_neerav.json', help='Directory containing MRI data set') # default='system/cnn_segmenter_neerav.json'\n parser.add_argument('--model', type=str, default='experiments/unet_neerav.json', help='Directory containing model configuration')\n\n # training arguments\n parser.add_argument('--training-input', type=str, help='Training input directory (for training only)')\n parser.add_argument('--training-output', type=str, help='Training output directory (for training only)')\n\n # inference arguments\n parser.add_argument('--inference-input', type=str, help='Input FlowMRI (for inference only)')\n parser.add_argument('--inference-output', type=str, help='Output SegmentedFlowMRI (for inference only)')\n\n # debug arguments\n parser.add_argument('--debug_server', type=str, help='Socket address (hostname:port) of Pycharm debug server')\n\n return parser.parse_args()\n\nargs = parse_args()\n\n","sub_path":"args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"94192451","text":"# Import libraries needed\nfrom configparser import ConfigParser\nfrom pathlib import Path\n\nimport tweepy\n\n# Path to the config file with the keys make sure not to commit this file\nCONFIG_FILE = Path.cwd() / \"config.cfg\"\n\nconfig = ConfigParser()\nconfig.read(CONFIG_FILE)\n\n# Authenticate to Twitter\nauth = tweepy.OAuthHandler(\n config.get(\"twitter\", \"consumer_key\"), config.get(\"twitter\", \"consumer_secret\")\n)\nauth.set_access_token(\n config.get(\"twitter\", \"access_token\"), config.get(\"twitter\", \"access_token_secret\")\n)\n\n# Create Twitter API object\ntwitter = tweepy.API(auth)\n\n# let's collect some of the tweets in your public timeline\npublic_tweets = twitter.home_timeline()\n\nfor tweet in public_tweets:\n print()\n print(tweet.text)\n","sub_path":"solutions/etl-basic/connect_twitter.py","file_name":"connect_twitter.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"594956228","text":"from bs4 import BeautifulSoup\nimport urllib.request\nimport os\nimport argparse\nimport sys\nimport json\nimport csv\n\n# adapted from https://gist.github.com/genekogan/ebd77196e4bf0705db51f86431099e57\n\n# CHANGES MADE:\n# changed to accommodate python 3 (instead of 2)\n# cleaned up\n# create directory if not exist\n# download multiple search terms into subdirectories - intended of TF training purposes\n\ndef get_soup(url,header):\n return BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url,headers=header)),'html.parser')\n\n\ndef main(args):\n parser = argparse.ArgumentParser(description='Scrape Google images')\n # parser.add_argument('-s', '--search', default='eye', type=str, help='search term')\n parser.add_argument('-i', '--search_file', default=os.path.join(os.getcwd(), 'additional', 'search_file.csv'), type=str, help='image search term file')\n parser.add_argument('-n', '--num_images', default=10, type=int, help='number of images to save')\n parser.add_argument('-d', '--directory', default=os.path.join(os.getcwd(), 'images'), type=str, help='save directory')\n\n args = parser.parse_args()\n # query = args.search\n query = args.search_file\n max_images = args.num_images\n save_directory = args.directory\n\n if not os.path.exists(save_directory):\n os.makedirs(save_directory)\n\n if not os.path.isfile(query):\n raise ValueError('Image search term file does not exist. Please specify an existing file.')\n else:\n with open(query) as csvfile:\n queryreader = csv.DictReader(csvfile)\n for row in queryreader:\n img_class = row['class']\n img_phrase = row['search']\n row_search = img_phrase.split()\n row_search = '_'.join(row_search)\n img_phrase = img_phrase.split()\n img_phrase = '+'.join(img_phrase)\n url = \"https://www.google.co.in/search?q=\"+img_phrase+\"&source=lnms&tbm=isch\"\n header = {'User-Agent':\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36\"}\n soup = get_soup(url, header)\n\n class_save_dir = os.path.join(save_directory, img_class)\n\n if not os.path.isdir(class_save_dir):\n os.makedirs(class_save_dir)\n\n img_links = []\n\n for a in soup.find_all(\"div\", {\"class\":\"rg_meta\"}):\n img_link = json.loads(a.text)[\"ou\"]\n img_format = json.loads(a.text)[\"ity\"]\n img_links.append((img_link, img_format))\n\n for i, (img_link, img_format) in enumerate(img_links[0:max_images]):\n try:\n with urllib.request.urlopen(img_link) as response:\n raw_img = response.read()\n\n if len(img_format) == 0:\n f = open(os.path.join(class_save_dir, row_search + \"_\" + str(i) + \".jpg\"), 'wb')\n else :\n f = open(os.path.join(class_save_dir, row_search + \"_\" + str(i) + \".\" + img_format), 'wb')\n\n f.write(raw_img)\n f.close()\n\n except Exception as e:\n print(\"could not load : {}\".format(img_link))\n print(e)\n\n\nif __name__ == '__main__':\n from sys import argv\n try:\n main(argv)\n except KeyboardInterrupt:\n pass\n sys.exit()","sub_path":"src/image_scraper/google_scrape.py","file_name":"google_scrape.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"298416548","text":"from sys import stdin, stdout\n# import math\n# import random\n# def func(N,x,co):\nfor T in range(int(input())):\n N,x = list(map(int,stdin.readline().split()))\n co = list(map(int,stdin.readline().split()))\n co = sorted(co)\n co_val = [1] * N\n # print(co)\n days = 0\n position = 0\n n = 0\n # Find the initial Position\n for i in range(len(co)):\n if co[i] == x:\n position = i+1\n days += i+1\n x += x\n break\n if co[i] > x:\n if i > 0 and 2*co[i-1] >= x:\n x = 2 * co[i-1]\n position = i-1\n days += i - 1\n else :\n days += i\n position = i\n break\n # print(position)\n for pos in range(position,N):\n while co_val[pos]:\n print(f'value: {co[pos]}, x: {x}, days: {days}')\n if co[pos] <= x:\n co_val[pos] = 0\n days += 1\n x = 2 * min(x,co[pos])\n \n # print(x)\n print(days)\n\n# if __name__ == '__main__':\n# for i in range(1):\n# N = 20\n# l1 = [random.choice(range(1, 50)) for _ in range(N)]\n# x = random.choice(range(1,50))\n# print(f'x:{x}, N : {N}')\n# # print(l1)\n# func(N,x,l1)","sub_path":"DRCHEF.py","file_name":"DRCHEF.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"350550578","text":"import os\nfrom math import ceil\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\n\n\ndef load_image(file_name, dtype='float32', gray=False):\n \"\"\"\n Loads an image as an numpy array\n :param file_name:\n :param trans:\n :return:\n \"\"\"\n img = Image.open(file_name)\n img.load()\n if gray:\n img = img.convert('L')\n data = np.asarray(img, dtype=dtype)\n\n return data\n\n\n\ndef show_histogram(img):\n plt.hist(img.ravel(), 256, [0, 256]);\n plt.show()\n\n\n\ndef divide_img(img, bound, min=0, max=1):\n less = img < bound\n greater = img >= bound\n new_img = img.copy()\n new_img[less] = min\n new_img[greater] = max\n return new_img","sub_path":"image_processing/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"335126956","text":"from impl.matrices import (\n positive_definite_matrix\n)\nfrom impl.cholesky import cholesky_decomposition\nfrom impl.lineq_solver import solve_cholesky\nimport numpy as np\nimport visual as vs\n\n\ndef impl_vs_numpy(m_size):\n M = positive_definite_matrix(size=m_size)\n L = cholesky_decomposition(M)\n L_NP = np.linalg.cholesky(M)\n\n mv = vs.MatrixVisualizer(inputs=((M, L, L.T, np.dot(L, L.T)),\n (M, L_NP, L_NP.T, np.dot(L_NP, L_NP.T))),\n labels=((\"Initial Matrix\",\n \"Lower triangular - L\",\n \"Transpose - L'\",\n \"Recomposed matrix (L*L')\"),\n (\"Initial Matrix\",\n \"Numpy - Lower triangular - L\",\n \"Numpy - Transpose - L'\",\n \"Recomposed matrix (L*L')\")),\n map_type=\"flag\")\n mv.show(rows=2, cols=4)\n\n\ndef impl_vs_numpy_timeit(m_size):\n matrix = positive_definite_matrix(size=m_size)\n funcs = [cholesky_decomposition, np.linalg.cholesky]\n labels = (\"Implemented Cholesky\", \"Numpy Cholesky\")\n tv = vs.TimeVisualizer(funcs=funcs,\n data=matrix,\n labels=labels,\n iterations=200)\n tv.show()\n\n\ndef cholesky_fail(m_size):\n matrix = rand_matrix(m_size)\n try:\n cholesky_decomposition(matrix)\n except Exception as e:\n print(e)\n\n mv = vs.MatrixVisualizer(inputs=matrix,\n labels=\"Random matrix\",\n map_type=\"plasma\")\n mv.show()\n\n\ndef cholesky_solve(m_size):\n matrix = [[4, 12, -16], [12, 37, -43], [-16, -43, 98]]\n coef = np.array([2, 5, 3]).reshape((3, 1))\n L = cholesky_decomposition(matrix)\n x = solve_cholesky(matrix, coef)\n mv = vs.MatrixVisualizer(inputs=(matrix, L, L.T, x),\n labels=(\"Matrix\", \"Lower triangular\",\n \"Transpose - L'\", \"X solution\"),\n map_type=\"plasma\")\n mv.show(rows=1, cols=4)\n print(\"Matrix A\")\n print(matrix)\n print(\"B coefficient: \")\n print(coef)\n print(\"X solution: \")\n print(x)\n","sub_path":"examples/cholesky_example.py","file_name":"cholesky_example.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"611298399","text":"from django.test import override_settings\n\nfrom ozpcenter.scripts import sample_data_generator as data_gen\nfrom tests.ozp.cases import APITestCase\nfrom tests.ozpcenter.helper import APITestHelper\n\n\n@override_settings(ES_ENABLED=False)\nclass IntentApiTest(APITestCase):\n\n @classmethod\n def setUpTestData(cls):\n data_gen.run()\n\n def setUp(self):\n pass\n\n def test_get_intent_list(self):\n url = '/api/intent/'\n response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)\n\n actions = [i['action'] for i in response.data]\n self.assertTrue('/application/json/view' in actions)\n self.assertTrue(response.data[0]['icon'] is not None)\n self.assertTrue(response.data[0]['media_type'] is not None)\n self.assertTrue(response.data[0]['label'] is not None)\n\n def test_get_intent(self):\n url = '/api/intent/1/'\n response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)\n\n self.assertEqual(response.data['action'], '/application/json/view')\n\n def test_create_intent(self):\n url = '/api/intent/'\n data = {'action': '/application/test',\n 'media_type': 'vnd.ozp-intent-v1+json.json', 'label': 'test',\n 'icon': {'id': 1, 'security_marking': 'UNCLASSIFIED'}}\n\n response = APITestHelper.request(self, url, 'POST', data=data, username='bigbrother', status_code=201)\n self.assertEqual(response.data['action'], '/application/test')\n\n def test_update_intent(self):\n url = '/api/intent/1/'\n action = '/application/json/viewtest'\n media_type = 'vnd.ozp-intent-v2+json.json'\n label = 'mylabel'\n data = {'action': action,\n 'media_type': media_type, 'label': label,\n 'icon': {'id': 1, 'security_marking': 'UNCLASSIFIED'}}\n\n response = APITestHelper.request(self, url, 'PUT', data=data, username='bigbrother', status_code=200)\n\n self.assertEqual(response.data['action'], action)\n self.assertEqual(response.data['label'], label)\n self.assertEqual(response.data['media_type'], media_type)\n\n def test_delete_intent(self):\n url = '/api/intent/1/'\n APITestHelper.request(self, url, 'DELETE', username='bigbrother', status_code=204)\n","sub_path":"tests/ozpcenter_api/test_api_intent.py","file_name":"test_api_intent.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"568522711","text":"from django.urls import path,include\nfrom .views import *\napp_name='user'\n\n\n\n\n\nurlpatterns = [\n # path('frame/',frame),\n path('login/',login),\n path('home1/',home1),\n path('register/',register),\n path('options/',options),\n path('pay/',pay),\n\n]\n","sub_path":"ShoppingOperation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"575608102","text":"import sys\n\nimport os\nimport zipfile\nimport tempfile\nimport sched, time\nimport numpy as np\n\nfrom b3w import B3W\nfrom glob import glob\nfrom osgeo import gdal\nfrom shutil import rmtree\nfrom subprocess import Popen, PIPE, STDOUT\nfrom typing import Any, List, Tuple, Set\n\nfrom sentinel import Config\nfrom sentinel import DataHub\nfrom sentinel import Polygons\nfrom utils import get_environment, print_snapshots, remove\n\n\n# Main loop period (seconds)\nINTERVAL: int = 10#0\n\n\ndef check_in_aws(s3: B3W, prefix: str, depth: int = 1) -> Set[str]:\n objects: Set[str] = set() #List[str] = []\n try:\n for s3o in s3.ls(prefix):\n if prefix:\n s3o = s3o.replace(prefix.strip('/'), '').strip('/')\n if not s3o:\n continue\n objects.add('/'.join(s3o.split('/')[:depth]))\n except Exception as e:\n #TODO: debug and handle different kinds of exceptions\n raise e\n\n return objects\n\n\ndef get_from_aws(s3: B3W, prefix: str,\n path: str = '/dev/shm/gps/input'\n ) -> List[str]:\n files: List[str] = []\n try:\n #objects = s3.ls(prefix)\n #print(f\"DEBUG: objects = {objects}\")\n extensions = ('.geojson', '.shp', '.shx', '.dbf', '.prj', '.cpg')\n for s3o in s3.ls(prefix):\n #print(f\"DEBUG: get_from_aws s3o = {s3o}\")\n if prefix:\n filename = s3o.replace(prefix.strip('/'), '').strip('/')\n else:\n filename = s3o\n if not filename.endswith(extensions):\n continue\n #filename = os.path.join(path, *filename.lstrip('/').split('/'))\n filename = os.path.join(path, *filename.split('/'))\n #print(f\"DEBUG: {s3o} -> {filename}\")\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n s3.get(s3o, filename)\n files.append(filename)\n except Exception as e:\n #TODO: debug and handle different kinds of exceptions\n raise e\n\n return files\n\n\ndef put_to_aws(s3: B3W, prefix: str,\n path: str = '/dev/shm/gps/output'\n ) -> List[str]:\n objects: List[str] = []\n try:\n path = os.path.normpath(path)\n for filename in glob(os.path.join(path, '**'), recursive=True):\n if not os.path.isfile(filename):\n continue\n s3o = os.path.relpath(filename, path)\n if prefix:\n s3o = '/'.join([prefix.strip('/'), *s3o.split(os.path.sep)])\n else:\n s3o = '/'.join([*s3o.split(os.path.sep)])\n #print(f\"DEBUG: '{filename}' -> '{s3o}'\")\n s3.put(filename, s3o, force=True)\n except Exception as e:\n #TODO: debug and handle different kinds of exceptions\n raise e\n\n return objects\n\n\ndef remove_from_aws(s3: B3W, prefix: str) -> List[str]:\n # TODO: remove this function (DEBUG only)\n objects: List[str] = []\n try:\n #objects = s3.ls(prefix)\n #print(f\"DEBUG: objects to remove = {objects}\")\n for s3o in s3.ls(prefix):\n s3._B3W__s3r.Object(s3._B3W__bucket_name, s3o).delete()\n print(f\"DEBUG: removed S3 object '{s3o}'\")\n objects.append(s3o)\n except Exception as e:\n print(f\"Error removing S3 object:\\n{e}\")\n\n return objects\n\n\ndef sync_with_aws(\n s3: B3W, prefix: str, data_hub: DataHub,\n snapshot: Any, path: str = '/dev/shm/gps/data'\n ) -> str:\n filename: str = None\n try:\n data_hub.config.output = os.path.join(path, snapshot.uuid)\n os.makedirs(data_hub.config.output, exist_ok=True)\n objects = s3.ls('/'.join([prefix, snapshot.uuid]))\n print(f\"DEBUG: S3 objects = {objects}\")\n #if len(objects) > 0:\n for s3o in objects:\n #s3o = s3o.replace(prefix, '').lstrip('/')\n if s3o.strip('/').endswith(snapshot.uuid):\n continue\n filename = os.path.join(path, s3o.replace(prefix, '').lstrip('/'))\n print(f\"DEBUG: syncing '{s3o}' -> '{filename}'\")\n s3.get(s3o, filename)\n break\n if not filename:\n data_hub.download(snapshot)\n #print(f\"DEBUG: data_hub.config.output = {data_hub.config.output}\")\n for filename in glob(os.path.join(data_hub.config.output, '*')):\n #print(f\"DEBUG: filename = {filename}\")\n #if len(filename) < 1:\n # raise FileNotFoundError(f\"failed to download {snapshot.uuid}!\")\n #else:\n # filename = filename[0]\n s3o = filename.replace(path, '').lstrip(os.path.sep)\n # TODO: check filename against snapshot.uuid\n s3o = '/'.join([prefix, s3o])\n print(f\"DEBUG: syncing '{filename}' -> '{s3o}'\")\n s3.put(filename, s3o)\n break\n if not filename:\n print(f\"failed to download {snapshot.uuid}!\")\n data_hub.config.output = None # TODO: devise something smart\n except FileNotFoundError as e:\n print(f\"Failure: {e}\")\n except Exception as e:\n #TODO: debug and handle different kinds of exceptions\n raise e\n\n return filename\n\n\ndef set_debug_aws() -> None:\n s3_id, s3_key, s3_bucket, s3_input, s3_output, s3_sync = get_environment()\n path_input, path_output = ('/dev/shm/gps/input', '/dev/shm/gps/output')\n path_data = '/dev/shm/gps/data'\n #print(f\"DEBUG:\\n\\t{'S3_ID'.ljust(16)}{s3_id}\"\n # f\"\\n\\t{'S3_KEY'.ljust(16)}{s3_key}\"\n # f\"\\n\\t{'S3_BUCKET'.ljust(16)}{s3_bucket}\"\n # f\"\\n\\t{'S3_INPUT'.ljust(16)}{s3_input}\"\n # f\"\\n\\t{'S3_OUTPUT'.ljust(16)}{s3_output}\"\n # f\"\\n\\t{'S3_SYNC'.ljust(16)}{s3_sync}\")\n s3 = B3W(s3_bucket, s3_id, s3_key)\n # DEBUG: put test data to S3\n remove_from_aws(s3, s3_input + '/test')\n remove_from_aws(s3, s3_output + '/test')\n for filename in glob(os.path.join('data', '*')):\n #s3.put('data/geotiff.xml', '/'.join([s3_input, 'test', 'geotiff.xml']))\n s3o = '/'.join([s3_input, 'test', os.path.basename(filename)])\n s3.put(filename, s3o)\n print(f\"DEBUG: '{filename}' -> '{s3o}'\")\n #print(s3.ls(s3_input))\n #sys.exit(0)\n\n return None\n\n\ndef process_sentinel1(filename, path_output, area, shapes):\n title = os.path.splitext(os.path.basename(filename))[0]\n options_warp = {\n 'format': 'GTiff',\n 'dstSRS': 'EPSG:32640',\n 'creationOptions': ['COMPRESS=DEFLATE'],\n 'xRes': 40,\n 'yRes': 40\n }\n with tempfile.TemporaryDirectory() as path_temp:\n with zipfile.ZipFile(filename, 'r') as archive:\n archive.extractall(path_temp)\n path_safe = glob(os.path.join(path_temp, f\"*.SAFE\"))[0]\n dataset = gdal.Open(path_safe, gdal.GA_ReadOnly)\n subsets = dataset.GetSubDatasets()\n datasets = {}\n for i, p in enumerate(['HH', 'HV', 'RGB']):\n print(f\"Reading {subsets[i][1]}...\")\n datasets[p] = gdal.Open(subsets[i][0], gdal.GA_ReadOnly)\n filenames = []\n name_area = os.path.splitext(os.path.basename(area))[0]\n print(f\"Warping polarizations...\")\n for name, source in datasets.items():\n #\n # Prepare filenames and paths\n #\n for shape in shapes:\n if shape:\n name_shape = os.path.basename(shape)\n name_shape = os.path.splitext(name_shape)[0]\n data_prefix = f\"{name_area}_{name_shape}\"\n options_cutline = {'cutlineDSName': shape,\n 'cropToCutline': True}\n else:\n data_prefix = f\"{name_area}\"\n options_cutline = {}\n data_output = os.path.join(path_output, data_prefix)\n if not name in ['RGB', 'INV']:\n data_output = os.path.join(data_output, name.lower())\n os.makedirs(data_output, exist_ok=True)\n print(f\"{data_output.replace(path_output, '')}\")\n destination = f\"{os.path.join(data_output, title)}.tiff\"\n filenames.append(destination)\n gdal.Warp(destination, source, **options_warp,\n **options_cutline)\n else:\n # Basic RGB processing\n driver_mem = gdal.GetDriverByName('MEM')\n memoset = driver_mem.CreateCopy('', source, 0)\n band_hh = memoset.GetRasterBand(1)\n image_hh = band_hh.ReadAsArray()\n mask_hh = image_hh == 0\n image_hh = np.ma.array(image_hh, mask=mask_hh,\n dtype=np.float32)\n del mask_hh\n band_hv = memoset.GetRasterBand(2)\n image_hv = band_hv.ReadAsArray()\n mask_hv = image_hv == 0\n image_hv = np.ma.array(image_hv, mask=mask_hv,\n dtype=np.float32)\n del mask_hv\n stats_hh = (image_hh.mean().astype(np.float32),\n image_hh.std().astype(np.float32))\n stats_hv = (image_hv.mean().astype(np.float32),\n image_hv.std().astype(np.float32))\n image_hh = np.ma.tanh(image_hh / (stats_hh[0] + 2 *\n stats_hh[1]))\n image_hv = np.ma.tanh(image_hv / (stats_hv[0] + 2 *\n stats_hv[1]))\n image_ratio = image_hh / image_hv\n stats_ratio = (image_ratio.mean().astype(np.float32),\n image_ratio.std().astype(np.float32))\n image_ratio = image_ratio / image_ratio.max()\n image_negative = (np.float32(1) -\n np.ma.tanh(image_hh / image_hv))\n # Convert to byte type\n image_hh = (image_hh * 254 + 1).astype(np.uint8)\n image_hv = (image_hv * 254 + 1).astype(np.uint8)\n image_ratio = (image_ratio * 254 + 1).astype(np.uint8)\n image_negative = (image_negative * 254 + 1)\\\n .astype(np.uint8)\n # Write channels to the MEM dataset\n memoset.AddBand()\n band_ex = memoset.GetRasterBand(3)\n band_ex.SetColorInterpretation(gdal.GCI_BlueBand)\n band_hh.WriteArray(image_hh)\n band_hh.SetColorInterpretation(gdal.GCI_RedBand)\n band_hv.WriteArray(image_hv)\n band_hv.SetColorInterpretation(gdal.GCI_GreenBand)\n # Create ratio band (HH, HV, HH/HV)\n band_ex.WriteArray(image_ratio)\n band_ex.SetMetadata({'POLARISATION': 'HH/HV',\n 'SWATH': 'EW'})\n path_ratio = os.path.join(data_output, 'ratio')\n os.makedirs(path_ratio, exist_ok=True)\n print(f\"{path_ratio.replace(path_output, '')}\")\n destination = f\"{os.path.join(path_ratio, title)}.tiff\"\n filenames.append(destination)\n gdal.Warp(destination, memoset, **options_warp,\n outputType=gdal.GDT_Byte, **options_cutline)\n # Create negative band (HH, HV, 1 - HH/HV)\n band_ex.WriteArray(image_negative)\n band_ex.SetMetadata({'POLARISATION': '1 - HH/HV',\n 'SWATH': 'EW'})\n path_negative = os.path.join(data_output, 'negative')\n os.makedirs(path_negative, exist_ok=True)\n print(f\"{path_negative.replace(path_output, '')}\")\n destination = f\"{os.path.join(path_negative, title)}.tiff\"\n filenames.append(destination)\n gdal.Warp(destination, memoset, **options_warp,\n outputType=gdal.GDT_Byte, **options_cutline)\n print(f\"Done!\")\n return filenames\n\n\ndef process_sentinel2(filename, path_output, area, shapes):\n title = os.path.splitext(os.path.basename(filename))[0]\n dataset = gdal.Open(filename, gdal.GA_ReadOnly)\n subsets = dataset.GetSubDatasets()\n assert len(subsets) > 0, f\"no sub datasets found!\"\n dataset = gdal.Open(subsets[0][0], gdal.GA_ReadOnly)\n #print(f\"{snapshot.title} -->\")\n print(f\"Reading {subsets[0][1][:1].lower()}\",\n f\"{subsets[0][1][1:]}\", sep='')\n image = dataset.ReadAsArray()\n if image.ndim < 3:\n image = image[None, ...]\n image = np.moveaxis(image[:3, ...], 0, -1) # CHW -> HWC\n image = image.astype(np.float32)\n print(f\"Calculating optimal histogram...\")\n clip = image.mean().astype(np.float32) * np.float32(2)\n image = image / clip\n image = np.tanh(image)\n # Apply gamma correction here (TODO)\n image = (image * 254 + 1).round().astype(np.uint8)\n # Apply nodata mask here (TODO)\n image = np.moveaxis(image, -1, 0) # HWC -> CHW\n print(f\"Applying histogram...\")\n tempset = gdal.GetDriverByName('MEM')\\\n .CreateCopy('', dataset, 0)\n for i in range(image.shape[0]):\n band = tempset.GetRasterBand(i + 1)\n band.WriteArray(image[i].astype(np.uint16))\n del band\n print(f\"Writing to temporary file...\")\n with tempfile.TemporaryDirectory() as path_temp:\n temp = os.path.join(path_temp, 'temp.tiff')\n gdal.Translate(temp, tempset,\n creationOptions=['COMPRESS=DEFLATE'],\n format='GTiff', bandList=[1, 2, 3],\n outputType=gdal.GDT_Byte)\n del tempset\n #\n # Prepare filenames and paths\n #\n filenames = []\n name_area = os.path.splitext(os.path.basename(area))[0]\n for shape in shapes:\n if shape:\n name_shape = os.path.basename(shape)\n name_shape = os.path.splitext(name_shape)[0]\n data_prefix = f\"{name_area}_{name_shape}\"\n options = {'cutlineDSName': shape,\n 'cropToCutline': True}\n else:\n data_prefix = f\"{name_area}\"\n options = {}\n data_output = os.path.join(path_output,\n #data_name,\n data_prefix)\n os.makedirs(data_output, exist_ok=True)\n destination = f\"{os.path.join(data_output, title)}.tiff\"\n filenames.append(destination)\n gdal.Warp(destination, temp, **options)\n print(f\"Done!\")\n return filenames\n\n\ndef main(periodic: sched.scheduler) -> None:\n # Set working variables\n s3_id, s3_key, s3_bucket, s3_input, s3_output, s3_sync = get_environment()\n path_input, path_output = ('/dev/shm/gps/input', '/dev/shm/gps/output')\n path_data = '/dev/shm/gps/data'\n\n #print(f\"\\n=== Started input processing cycle ===\\n\")\n s3 = B3W(s3_bucket, s3_id, s3_key)\n\n # Get input files from S3\n files_input = get_from_aws(s3, s3_input, path_input)\n #print(\"DEBUG: input files -->\")\n #print(\"\\n\".join([f\"DEBUG: {filename}\" for filename in files_input]))\n objects_output = check_in_aws(s3, s3_output, depth=1)\n #print(\"DEBUG: output sets -->\")\n #print(\"\\n\".join([f\"DEBUG: {name}\" for name in objects_output]))\n # DEBUG: list sync objects in S3, remove output test set\n #objects_sync = check_in_aws(s3, s3_sync) # don't uncomment - dangerous!\n #print(\"DEBUG: sync objects -->\")\n #print(\"\\n\".join([f\"DEBUG: {name}\" for name in objects_sync]))\n\n # Initialize Copernicus Open Data Access Hub search object\n config = Config.load('config.yaml')\n data_hub = DataHub(config, limit=1000)\n\n # Cycle through all the data input sets: a set may contain multiple\n # input areas and shapes to process. Result will be a snapshot that is\n # cut with each shape (if any)\n for data_input in glob(os.path.join(path_input, '*')):\n if not os.path.isdir(data_input):\n #print(f\"DEBUG: '{data_input}' is not a valid data input!\")\n #print(\"TODO: unzip archived input sets...\")\n continue\n data_name = os.path.basename(data_input)\n #print(f\"DEBUG: 'data_input' basename = {data_name}\")\n if data_name in objects_output:\n #print(f\"Output set for '{data_input}' already exists. Skipping...\")\n continue\n #print(f\"DEBUG: input directory --->\\n{os.listdir(data_input)}\\n\")\n areas = glob(os.path.join(data_input, '*.geojson'))\n shapes = glob(os.path.join(data_input, '*.shp'))\n #print(f\"DEBUG: shapes = {shapes}\")\n if not shapes:\n shapes.append(None)\n for area in areas:\n try:\n print(f\"\\n=== Processing '{area}' ===\\n\")\n polygon, properties = Polygons.read_geojson(area)\n except Exception as e:\n print(f\"Failed to read '{area}'!\\n{str(e)}\")\n continue\n #print(f\"DEBUG:\\n{polygon}\")\n\n # Set config key (search area)\n #print(f\"DEBUG: config.search -->\\n{config.search}\")\n search = config.search.copy()\n search.update(properties)\n #config.search[\"footprint\"] = f\"\\\"Intersects({polygon})\\\"\"\n #print(f\"DEBUG: config.search -->\\n{config.search}\")\n #print(f\"Config 'search' section:\\n{config.search}\")\n\n snapshots = data_hub.search(search, area=polygon)\n snapshots = sorted(snapshots,\n key=lambda item: item.begin_position)\n\n print(f\"\\n=== {len(snapshots)} snapshots found ===\\n\")\n # print_snapshots(snapshots) # DEBUG\n # break # DEBUG\n\n print(f\"\\n=== Processing snapshots and shapes ===\\n\")\n for index, snapshot in enumerate(snapshots):\n filename = sync_with_aws(s3, s3_sync, data_hub, snapshot,\n path_data)\n if not filename:\n print(f\"'\\n{snapshot.uuid}' not synced. Skipping...\")\n continue\n else:\n print(f\"\\n{index:8d}: {snapshot.title}\")\n try:\n # Process each superposition of an area and a shape\n #\n # Process a snapshot\n #\n #print(f\"DEBUG: search keys = {search.keys()}\")\n path_target = os.path.join(path_output, data_name)\n #print(f\"DEBUG: path_data = '{path_data}'\")\n if search['platformName'] == 'Sentinel-2':\n filenames = process_sentinel2(filename, path_target,\n area, shapes)\n elif search['platformName'] == 'Sentinel-1':\n filenames = process_sentinel1(filename, path_target,\n area, shapes)\n else:\n filenames = []\n print(f\"NOT IMPLEMENTED: {snapshot.title}\",\n f\"{config.search['platformName']}\")\n #print(f\"DEBUG: exporting '{data_prefix}' to S3 -->\")\n # Put processing result (for each output set) to S3\n result = put_to_aws(s3, s3_output, path_output) # result...\n for outfile in filenames:\n remove(outfile) # all files (TODO: file or directory)\n except Exception as e:\n print(f\"FAILED: {e}\")\n raise e\n remove(filename) # remove snapshot\n #break # DEBUG: the first snapshot only\n print(f\"\\n=== Done snapshots for '{area}' ===\\n\")\n # Clean up output set (there should remain only logs)\n try:\n rmtree(os.path.join(path_output, data_name)) # data output - prefix\n except FileNotFoundError as e:\n pass\n # Clean up\n for path in (path_data, path_input, path_output):\n try:\n #print(f\"DEBUG: removing {path}\")\n rmtree(path)\n except FileNotFoundError as e:\n pass\n\n #print(f\"\\n=== Completed input processing cycle ===\\n\")\n periodic.enter(INTERVAL, 1, main, (periodic,))\n\n return None\n\n\nif __name__ == '__main__':\n if os.getenv('GPS_DEBUG', '').lower() in ['1', 'y', 'on', 'yes', 'true']:\n set_debug_aws()\n periodic = sched.scheduler(time.time, time.sleep)\n periodic.enter(INTERVAL, 1, main, (periodic,))\n periodic.run()\n","sub_path":"service/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"519519209","text":"class Stack(object):\n def __init__(self, initial_size=10):\n self.arr = [0 for _ in range(initial_size)]\n self.next_index = 0\n self.num_elements = 0\n\n def push(self, value):\n if self.next_index == len(self.arr):\n print('Out of space! Increasing array capacity...')\n self._handle_stack_capacity_full()\n\n self.arr[self.next_index] = value\n self.next_index += 1\n self.num_elements += 1\n\n def _handle_stack_capacity_full(self):\n old_arr = self.arr\n\n self.arr = [0 for _ in range(2*len(old_arr))]\n for index, element in enumerate(old_arr):\n self.arr[index] = element\n\n def size(self):\n return self.num_elements\n\n def is_empty(self):\n if self.num_elements == 0:\n return True\n else:\n return False\n\n def pop(self):\n if self.is_empty():\n self.next_index = 0\n return None\n\n self.arr[self.num_elements - 1] = 0\n self.num_elements -= 1\n self.next_index -= 1\n return self.arr[self.next_index]\n\nfoo = Stack(5)\nfoo.push(1)\nfoo.push(2)\nfoo.push(3)\nfoo.push(4)\nfoo.push(5)\nfoo.push(6)\nprint(foo.arr)\nfoo.pop()\nfoo.push(-4)\nprint(foo.arr)\n# print(foo.size())\n# print(foo.is_empty())\n\n","sub_path":"data-structures/stacks-and-queues/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"216289025","text":"from __future__ import absolute_import, division, print_function\n# LIBTBX_SET_DISPATCHER_NAME phenix.map_box\n\nimport mmtbx.utils\nimport mmtbx.model\nfrom mmtbx.refinement import print_statistics\nimport iotbx.pdb\nimport libtbx.phil\nfrom libtbx.utils import Sorry\nimport os, sys\nfrom iotbx import reflection_file_utils\nfrom iotbx.file_reader import any_file\nfrom cctbx import maptbx\nfrom scitbx.matrix import col\nfrom six.moves import zip\n\nmaster_phil = libtbx.phil.parse(\"\"\"\n include scope libtbx.phil.interface.tracking_params\n pdb_file = None\n .type = path\n .help = Optional model file used to define region to be cut out\n .short_caption = Model file (optional)\n .style = file_type:pdb bold input_file\n map_coefficients_file = None\n .type = path\n .help = Input map coefficients file (alternative to ccp4 map file)\n .short_caption = Map coefficients\n .style = file_type:hkl bold input_file process_hkl child:map_labels:label\n label = None\n .type = str\n .short_caption = Map labels\n .help = Labels for map coefficients file\n .style = renderer:draw_map_arrays_widget parent:file_name:map_coefficients_file\n ccp4_map_file = None\n .help = Input map file (CCP4/mrc format).\n .short_caption = Input map file\n .type = str\n target_ncs_au_file = None\n .help = File with model indicating which au to choose in extract_unique\n .short_caption = Input target ncs au file\n .type = str\n half_map_list = None\n .type = strings\n .help = Half maps (extract_unique only). Supply file names \\\n separated by spaces\n .short_caption = Half maps (extract_unique only)\n selection = all\n .type = str\n .help = Atom selection to be applied to input PDB file\n .short_caption = Atom selection (optional)\n .input_size = 400\n selection_radius = 3.0\n .type = float\n .help = Atoms within selection_radius of a selected atom model will be\\\n kept as part of the selection.\n .short_caption = Selection radius\n box_cushion = 3.0\n .type = float\n .help = If mask_atoms is False, a box of density will be cut out around\\\n the input model (after selections are applied to the model). \\\n The size of the box of density will be box_cushion bigger than \\\n the model. Box cushion also applied if density_select is set.\n .short_caption = Box cushion\n\n mask_atoms=False\n .type=bool\n .help = Set map values to 0 outside molecular mask\n .short_caption = Mask atoms\n\n mask_atoms_atom_radius = 3.\n .type=float\n .help = Radius for masking around atoms\n .short_caption = Mask atoms atom radius\n\n value_outside_atoms = None\n .type = str\n .help = Set to 'mean' to make average value same inside and outside mask.\n .short_caption = Value outside atoms\n resolution_factor = 1./4\n .type = float\n .help = Resolution factor for calculation of map coefficients\n .short_caption = Resolution factor\n map_scale_factor = None\n .type = float\n .help = Scale factor to apply to map\n .short_caption = Map scale factor\n scale_max = 99999\n .type = float\n .help = Maximum value of amplitudes for output mtz file. If None, apply\\\n volume scaling\n .short_caption = Scale max\n resolution = None\n .type = float\n .help = Resolution for calculation of output map coefficients. Default is \\\n based on the gridding of the map (and may be higher-resolution than\\\n you want).\n .short_caption = Resolution\n output_format = xplor *mtz *ccp4\n .type=choice(multi=True)\n .help = Output format(s) for boxed map. Note that mtz format is only\\\n available if keep_origin=False or keep_map_size=True. (These \\\n are the cases where the map is cut down to size and placed \\\n at the origin or there is a full unit cell of data.)\n .short_caption = Output format\n\n output_file_name_prefix=None\n .type = str\n .help = Prefix for output file names. Default is name of the pdb file \\\n without the \".pdb\" suffix.\n .short_caption = Output file name prefix\n\n mask_select = False\n .type = bool\n .help = Select boundaries (min,max in x,y,z) based on auto-mask\n .short_caption = Mask select\n\n density_select = False\n .type = bool\n .help = Select boundaries based on where density is located.\n .short_caption = Density select\n\n density_select_threshold = 0.05\n .type = float\n .help = Choose region where density is this fraction of maximum or greater\n .short_caption = density_select threshold\n\n get_half_height_width = True\n .type = bool\n .help = Use 4 times half-width at half-height as estimate of max size \\\n in density_select\n .short_caption = Use half-height width\n\n symmetry = None\n .type = str\n .help = Optional symmetry (e.g., D7, I, C2) to be used if extract_unique\\\n is set. Alternative to symmetry_file. To find symmetry \\\n automatically specify symmetry=ALL.\n .short_caption = Symmetry\n symmetry_file = None\n .type = path\n .help = Symmetry file.\\\n Symmetry or symmetry_file required if extract_unique=True. \\\n May be a \\\n Phenix .ncs_spec file or BIOMTR records or a resolve ncs file.\n .short_caption = Symmetry file\n\n sequence_file = None\n .type = path\n .help = Sequence file (any standard format). Can be unique part or \\\n all copies. Used in identification of unique part of map \\\n and in masking with mask_select\n .short_caption = Sequence file (optional)\n\n molecular_mass = None\n .type = float\n .help = Molecular mass of object in map in Da (i.e., 33000 for 33 Kd).\\\n Used in identification \\\n of unique part of map and in masking by mask_select.\n .short_caption = Molecular mass (optional)\n\n solvent_content = None\n .type = float\n .help = Optional fraction of volume of map that is empty. \\\n Used in identification \\\n of unique part of map and in masking by mask_select\n .short_caption = Solvent content\n\n extract_unique = False\n .type = bool\n .help = Extract unique part of map. Requires either symmetry_file or \\\n symmetry and\\\n either sequence file or molecular mass to be supplied. If chain \\\n type is not protein it should be set as well.\n .short_caption = Extract unique\n\n box_buffer = 5\n .type = int\n .help = Padding around unique region in extract_unique\n .short_caption = Padding around unique region\n\n soft_mask_extract_unique = True\n .type = bool\n .help = Create soft mask at edges of extract_unique box (feather map into \\\n edge of box). Uses resolution as mask_radius\n .short_caption = Soft mask in extract unique\n\n mask_expand_ratio = 1\n .type = int\n .help = Mask expansion relative to resolution for extract_unique\n .short_caption = Mask expand ratio\n\n regions_to_keep = None\n .type = int\n .short_caption = Regions to keep\n .help = You can specify a limit to the number of regions to keep\\\n when generating the asymmetric unit of density.\n\n keep_low_density = True\n .type = bool\n .help = Get remainder (weak density) with extract_unique.\n .short_caption = Get remainder\n\n\n chain_type = None *PROTEIN DNA RNA\n .type = choice\n .help = Chain type. Only used if extract_unique is set. Has minor effect \\\n in setting thresholds for identification of molecular region.\\\n Use None if there is a mixture.\n .short_caption = Chain type\n\n soft_mask = False\n .type=bool\n .help = Use Gaussian mask in mask_atoms and on outside surface of box\n .short_caption = Soft mask\n\n soft_mask_radius=3\n .type=float\n .help = Gaussian mask smoothing radius\n .short_caption = Soft mask radius\n\n lower_bounds = None\n .type = ints\n .help = Lower bounds for cut out box. You can specify them directly.\\\n NOTE: lower and upper bounds refer to grid points after shifting \\\n the map to place the origin at (0,0,0). To refer to absolute \\\n values specify bounds_are_absolute=True.\n .short_caption = Lower bounds\n\n upper_bounds = None\n .type = ints\n .help = Upper bounds for cut out box. You can specify them directly.\\\n NOTE: lower and upper bounds refer to grid points after shifting \\\n the map to place the origin at (0,0,0). To refer to absolute \\\n values specify bounds_are_absolute=True.\n .short_caption = Upper bounds\n\n bounds_are_absolute = False\n .type = bool\n .help = Define lower and upper bounds as absolute. \\\n NOTE: lower and upper bounds refer to grid points after shifting \\\n the map to place the origin at (0,0,0). To refer to absolute \\\n values specify bounds_are_absolute=True.\n .short_caption = Bounds are absolute\n\n zero_outside_original_map = False\n .type = bool\n .help = If bounds for new map are outside original map, zero all points\\\n outside of original map\n .short_caption = Zero outside original map\n keep_map_size = False\n .type=bool\n .help = Keep original map gridding (do not cut anything out). \\\n Use to apply soft_mask and/or mask_atoms keeping same map size.\n .short_caption = Keep map size\n\n keep_origin = True\n .type=bool\n .help = Write out map, map_coefficients, and model \\\n with origin in original location. \\\n If False, shift the origin to (0,0,0). \\\n NOTE: to cut out a part of a map, shift the origin to (0,0,0),\\\n and make a new small map use keep_origin=False\\\n keep_input_unit_cell_and_grid=False\n .short_caption = Keep origin\n\n keep_input_unit_cell_and_grid = True\n .type = bool\n .help = Keep the input unit_cell dimensions and unit_cell_grid. \\\n If False, use the dimensions and grid of the cut out box as the \\\n unit cell map_box dimensions and grid.\\\n NOTE: to cut out a part of a map, shift the origin to (0,0,0),\\\n and make a new small map set keep_origin=False and \\\n keep_input_unit_cell_and_grid=False\n .short_caption = Keep input unit cell and grid\n\n output_unit_cell = None\n .type = floats\n .help = You can specify the unit cell for your map with 3 numbers. \\\n This should normally\\\n not be necessary. It can be used to fix a map that has the \\\n wrong unit cell.\n .short_caption = Output unit cell\n .expert_level = 3\n\n output_unit_cell_grid = None\n .type = ints\n .help = You can specify the grid (3 integers) corresponding to the \\\n output unit cell. \\\n This can be used to specify the full grid for the unit cell. \\\n if output_unit_cell is not specified, new unit cell parameters\\\n will be generated to maintain the grid spacing.\n .short_caption = Output unit cell grid\n .expert_level = 3\n\n output_origin_grid_units = None\n .type = ints\n .help = You can specify the origin of your output map. Normally you \\\n should use keep_origin=True or False to specify your origin \\\n but if you want to move it to a specific grid point you can do that.\\\n .short_caption = Output origin\n .expert_level = 3\n\n output_origin_match_this_file = None\n .type = path\n .help = As output_origin_grid_units, but use origin from this file\n .short_caption = File with origin info\n\n bounds_match_this_file = None\n .type = path\n .help = Take the lower and upper bounds from this map file and apply them \\\n to the input map file.\n .short_caption = File with bounds to match\n\n output_external_origin = None\n .type = floats\n .help = Write ORIGIN record to map file (this is an external origin \\\n used to specify relationship to external files such as model files.\\\n Three floating point numbers (A).\n .short_caption = output external origin\n\n restrict_map_size = False\n .type=bool\n .help = Do not go outside original map boundaries\n .short_caption = Restrict map size\n\n ignore_symmetry_conflicts = False\n .type=bool\n .help = Ignore unit cell from model if it conflicts with the map.\n .short_caption = Ignore symmetry conflicts\n\n output_ccp4_map_mean = None\n .type = float\n .help = Choose mean and SD of output CCP4 map\n .short_caption = Mean of output CCP4 map\n\n output_ccp4_map_sd = None\n .type = float\n .help = Choose mean and SD of output CCP4 map\n .short_caption = SD of output CCP4 map\n\n output_map_labels = None\n .type = str\n .multiple = True\n .help = Add this label to output map\n .short_caption = Add label\n\n gui\n .help = \"GUI-specific parameter required for output directory\"\n {\n output_dir = None\n .type = path\n .style = output_dir\n }\n\"\"\", process_includes=True)\n\nmaster_params = master_phil\n\ndef remove_element(text_list,element=None):\n new_text_list=[]\n for x in text_list:\n if x != element:\n new_text_list.append(x)\n return new_text_list\n\ndef run(args, crystal_symmetry=None,\n ncs_object=None,\n pdb_hierarchy=None,\n map_data=None,\n mask_data=None,\n half_map_data_list=None,\n half_map_labels_list=None,\n lower_bounds=None,\n upper_bounds=None,\n write_output_files=True,\n log=None):\n h = \"phenix.map_box: extract box with model and map around selected atoms\"\n if(log is None): log = sys.stdout\n print_statistics.make_header(h, out=log)\n default_message=\"\"\"\\\n\n%s.\n\nUsage:\n phenix.map_box model.pdb map_coefficients.mtz selection=\"chain A and resseq 1:10\"\n\nor\n\n phenix.map_box map.ccp4 density_select=True\n\nParameters:\"\"\"%h\n if(len(args) == 0 and not pdb_hierarchy):\n print(default_message)\n master_phil.show(prefix=\" \")\n return\n\n # Process inputs ignoring symmetry conflicts just to get the value of\n # ignore_symmetry_conflicts...\n\n inputs = mmtbx.utils.process_command_line_args(args = args,\n cmd_cs=crystal_symmetry,\n master_params = master_phil,\n suppress_symmetry_related_errors=True)\n params = inputs.params.extract()\n\n # Now process inputs for real and write a nice error message if necessary.\n try:\n inputs = mmtbx.utils.process_command_line_args(args = args,\n cmd_cs=crystal_symmetry,\n master_params = master_phil,\n suppress_symmetry_related_errors=params.ignore_symmetry_conflicts)\n except Exception as e:\n if str(e).find(\"symmetry mismatch \")>1:\n raise Sorry(str(e)+\"\\nTry 'ignore_symmetry_conflicts=True'\")\n else:\n raise e\n\n params = inputs.params.extract()\n master_phil.format(python_object=params).show(out=log)\n\n # Overwrite params with parameters in call if available\n if lower_bounds:\n params.lower_bounds=lower_bounds\n if upper_bounds:\n params.upper_bounds=upper_bounds\n\n # PDB file\n if params.pdb_file and not inputs.pdb_file_names and not pdb_hierarchy:\n inputs.pdb_file_names=[params.pdb_file]\n if(len(inputs.pdb_file_names)!=1 and not params.density_select and not\n params.mask_select and not\n pdb_hierarchy and not params.keep_map_size and not params.upper_bounds\n and not params.extract_unique and not params.bounds_match_this_file):\n raise Sorry(\"PDB file is needed unless extract_unique, \"+\n \"density_select, mask_select, keep_map_size \\nor bounds are set .\")\n if (len(inputs.pdb_file_names)!=1 and not pdb_hierarchy and \\\n (params.mask_atoms )):\n raise Sorry(\"PDB file is needed for mask_atoms\")\n if params.soft_mask and (not params.resolution) and \\\n (len(inputs.pdb_file_names)!=1 and not pdb_hierarchy):\n raise Sorry(\"Need resolution for soft_mask without PDB file\")\n if ((params.density_select or params.mask_select) and params.keep_map_size):\n raise Sorry(\"Cannot set both density_select/mask_select and keep_map_size\")\n if ((params.density_select or params.mask_select) and params.upper_bounds):\n raise Sorry(\"Cannot set both density_select/mask_select and bounds\")\n if (params.keep_map_size and params.upper_bounds):\n raise Sorry(\"Cannot set both keep_map_size and bounds\")\n if (params.upper_bounds and not params.lower_bounds):\n raise Sorry(\"Please set lower_bounds if you set upper_bounds\")\n if (params.extract_unique):\n if (not params.resolution):\n raise Sorry(\"Please set resolution for extract_unique\")\n if (not params.symmetry) and (not params.symmetry_file) and \\\n (not ncs_object):\n raise Sorry(\n \"Please supply a symmetry file or symmetry for extract_unique (you \"+\n \"\\ncan try symmetry=ALL if you do not know your symmetry or \"+\n \"symmetry=C1 if \\nthere is none)\")\n from mmtbx.ncs.ncs import ncs\n ncs_object=ncs()\n ncs_object.set_unit_ncs()\n\n if params.keep_input_unit_cell_and_grid and (\n (params.output_unit_cell_grid is not None ) or\n (params.output_unit_cell is not None ) ):\n raise Sorry(\"If you set keep_input_unit_cell_and_grid then you cannot \"+\\\n \"set \\noutput_unit_cell_grid or output_unit_cell\")\n\n if (write_output_files) and (\"mtz\" in params.output_format) and (\n (params.keep_origin) and (not params.keep_map_size)):\n print(\"\\nNOTE: Skipping write of mtz file as keep_origin=True and \\n\"+\\\n \"keep_map_size is False\\n\")\n params.output_format=remove_element(params.output_format,element='mtz')\n\n if (write_output_files) and (\"mtz\" in params.output_format) and (\n (params.extract_unique)):\n print(\"\\nNOTE: Skipping write of mtz file as extract_unique=True\\n\")\n params.output_format=remove_element(params.output_format,element='mtz')\n\n\n if params.output_origin_match_this_file or params.bounds_match_this_file:\n if params.output_origin_match_this_file:\n fn=params.output_origin_match_this_file\n if params.bounds_match_this_file:\n raise Sorry(\"Cannot match origin and bounds at same time\")\n else:\n fn=params.bounds_match_this_file\n if not params.ccp4_map_file:\n raise Sorry(\n \"Need to specify your input file with ccp4_map_file=xxx if you use \"+\n \"output_origin_match_this_file=xxxx or bounds_match_this_file=xxxx\")\n\n af = any_file(fn)\n if (af.file_type == 'ccp4_map'):\n origin=af.file_content.data.origin()\n if params.output_origin_match_this_file:\n params.output_origin_grid_units=origin\n print(\"Origin of (%s,%s,%s) taken from %s\" %(\n origin[0],origin[1],origin[2],fn))\n else:\n all=af.file_content.data.all()\n params.lower_bounds=origin\n print(\"Lower bounds of (%s,%s,%s) taken from %s\" %(\n params.lower_bounds[0],params.lower_bounds[1],\n params.lower_bounds[2],fn))\n params.upper_bounds=list(col(origin)+col(all)-col((1,1,1)))\n print(\"upper bounds of (%s,%s,%s) taken from %s\" %(\n params.upper_bounds[0],params.upper_bounds[1],\n params.upper_bounds[2],fn))\n params.bounds_are_absolute=True\n else:\n raise Sorry(\"Unable to interpret %s as map file\" %(fn))\n\n if params.output_origin_grid_units is not None and params.keep_origin:\n params.keep_origin=False\n print(\"Setting keep_origin=False as output_origin_grid_units is set\")\n print_statistics.make_sub_header(\"pdb model\", out=log)\n if len(inputs.pdb_file_names)>0:\n pdb_inp = iotbx.pdb.input(file_name=inputs.pdb_file_names[0])\n pdb_hierarchy = pdb_inp.construct_hierarchy()\n if pdb_hierarchy:\n pdb_atoms = pdb_hierarchy.atoms()\n pdb_atoms.reset_i_seq()\n else:\n pdb_hierarchy=None\n # Map or map coefficients\n map_coeff = None\n input_unit_cell_grid=None\n input_unit_cell=None\n input_map_labels=None\n if (not map_data):\n # read first mtz file\n if ( (len(inputs.reflection_file_names) > 0) or\n (params.map_coefficients_file is not None) ):\n # file in phil takes precedent\n if (params.map_coefficients_file is not None):\n if (len(inputs.reflection_file_names) == 0):\n inputs.reflection_file_names.append(params.map_coefficients_file)\n else:\n inputs.reflection_file_names[0] = params.map_coefficients_file\n map_coeff = reflection_file_utils.extract_miller_array_from_file(\n file_name = inputs.reflection_file_names[0],\n label = params.label,\n type = \"complex\",\n log = log)\n if not crystal_symmetry: crystal_symmetry=map_coeff.crystal_symmetry()\n fft_map = map_coeff.fft_map(resolution_factor=params.resolution_factor)\n fft_map.apply_sigma_scaling()\n map_data = fft_map.real_map_unpadded()\n map_or_map_coeffs_prefix=os.path.basename(\n inputs.reflection_file_names[0][:-4])\n # or read CCP4 map\n elif ( (inputs.ccp4_map is not None) or\n (params.ccp4_map_file is not None) ):\n if (params.ccp4_map_file is not None):\n af = any_file(params.ccp4_map_file)\n if (af.file_type == 'ccp4_map'):\n inputs.ccp4_map = af.file_content\n inputs.ccp4_map_file_name = params.ccp4_map_file\n print_statistics.make_sub_header(\"CCP4 map\", out=log)\n ccp4_map = inputs.ccp4_map\n ccp4_map.show_summary(prefix=\" \",out=log)\n if not crystal_symmetry: crystal_symmetry=ccp4_map.crystal_symmetry()\n map_data = ccp4_map.data #map_data()\n input_unit_cell_grid=ccp4_map.unit_cell_grid\n input_unit_cell=ccp4_map.unit_cell_parameters\n input_map_labels=ccp4_map.get_labels()\n\n if inputs.ccp4_map_file_name.endswith(\".ccp4\"):\n map_or_map_coeffs_prefix=os.path.basename(\n inputs.ccp4_map_file_name[:-5])\n else:\n map_or_map_coeffs_prefix=os.path.basename(\n inputs.ccp4_map_file_name[:-4])\n else: # have map_data\n map_or_map_coeffs_prefix=None\n\n if params.half_map_list and (not half_map_data_list):\n if not params.extract_unique:\n raise Sorry(\"Can only use half_map_with extract_unique\")\n print (\"Reading half-maps\",params.half_map_list)\n half_map_data_list=[]\n half_map_labels_list=[]\n for fn in params.half_map_list:\n print(\"Reading half map from %s\" %(fn),file=log)\n af = any_file(fn)\n print_statistics.make_sub_header(\"CCP4 map\", out=log)\n h_ccp4_map = af.file_content\n h_ccp4_map.show_summary(prefix=\" \",out=log)\n h_map_data = h_ccp4_map.data\n half_map_data_list.append(h_map_data)\n half_map_labels_list.append(h_ccp4_map.get_labels())\n\n if params.map_scale_factor:\n print(\"Applying scale factor of %s to map data on read-in\" %(\n params.map_scale_factor))\n map_data=map_data*params.map_scale_factor\n\n if params.output_origin_grid_units is not None:\n origin_to_match=tuple(params.output_origin_grid_units)\n else:\n origin_to_match=None\n\n if origin_to_match:\n sc=[]\n for x,o,a in zip(crystal_symmetry.unit_cell().parameters()[:3],\n origin_to_match,\n map_data.all()):\n sc.append(-x*o/a)\n shift_cart_for_origin_to_match=tuple(sc)\n else:\n origin_to_match=None\n shift_cart_for_origin_to_match=None\n\n\n\n if crystal_symmetry and not inputs.crystal_symmetry:\n inputs.crystal_symmetry=crystal_symmetry\n\n # final check that map_data exists\n if(map_data is None):\n raise Sorry(\"Map or map coefficients file is needed.\")\n\n if len(inputs.pdb_file_names)>0:\n output_prefix=os.path.basename(inputs.pdb_file_names[0])[:-4]\n else:\n output_prefix=map_or_map_coeffs_prefix\n\n if not pdb_hierarchy: # get an empty hierarchy\n from cctbx.array_family import flex\n pdb_hierarchy=iotbx.pdb.input(\n source_info='',lines=flex.split_lines('')).construct_hierarchy()\n xray_structure = pdb_hierarchy.extract_xray_structure(\n crystal_symmetry=inputs.crystal_symmetry)\n xray_structure.show_summary(f=log)\n #\n if not params.selection: params.selection=\"all\"\n selection = pdb_hierarchy.atom_selection_cache().selection(\n string = params.selection)\n if selection.size():\n print_statistics.make_sub_header(\"atom selection\", out=log)\n print(\"Selection string: selection='%s'\"%params.selection, file=log)\n print(\" selects %d atoms from total %d atoms.\"%(selection.count(True),\n selection.size()), file=log)\n sites_cart_all = xray_structure.sites_cart()\n sites_cart = sites_cart_all.select(selection)\n selection = xray_structure.selection_within(\n radius = params.selection_radius,\n selection = selection)\n\n if not ncs_object:\n from mmtbx.ncs.ncs import ncs\n ncs_object=ncs()\n if params.symmetry_file:\n ncs_object.read_ncs(params.symmetry_file,log=log)\n print(\"Total of %s operators read\" %(ncs_object.max_operators()), file=log)\n if not ncs_object or ncs_object.max_operators()<1:\n print(\"No symmetry available\", file=log)\n if ncs_object:\n n_ops=max(1,ncs_object.max_operators())\n else:\n n_ops=1\n\n # Get sequence if extract_unique is set\n sequence=None\n if params.extract_unique or params.mask_select:\n if params.sequence_file:\n if n_ops > 1: # get unique part of sequence\n remove_duplicates=True\n else:\n remove_duplicates=False\n from iotbx.bioinformatics import get_sequences\n sequence=(\" \".join(get_sequences(file_name=params.sequence_file,\n remove_duplicates=remove_duplicates)))\n\n if params.chain_type in ['None',None]: params.chain_type=None\n if sequence and not params.molecular_mass:\n # get molecular mass from sequence\n from iotbx.bioinformatics import text_from_chains_matching_chain_type\n if params.chain_type in [None,'PROTEIN']:\n n_protein=len(text_from_chains_matching_chain_type(\n text=sequence,chain_type='PROTEIN'))\n else:\n n_protein=0\n if params.chain_type in [None,'RNA']:\n n_rna=len(text_from_chains_matching_chain_type(\n text=sequence,chain_type='RNA'))\n else:\n n_rna=0\n if params.chain_type in [None,'DNA']:\n n_dna=len(text_from_chains_matching_chain_type(\n text=sequence,chain_type='DNA'))\n else:\n n_dna=0\n params.molecular_mass=n_ops*(n_protein*110+(n_rna+n_dna)*330)\n print(\"\\nEstimate of molecular mass is %.0f \" %(params.molecular_mass), file=log)\n if params.density_select or params.mask_select:\n print_statistics.make_sub_header(\n \"Extracting box around selected density and writing output files\", out=log)\n else:\n print_statistics.make_sub_header(\n \"Extracting box around selected atoms and writing output files\", out=log)\n #\n if params.value_outside_atoms=='mean':\n print(\"\\nValue outside atoms mask will be set to mean inside mask\", file=log)\n if params.get_half_height_width and params.density_select:\n print(\"\\nHalf width at half height will be used to id boundaries\", file=log)\n\n if params.soft_mask and sites_cart_all.size()>0:\n print(\"\\nSoft mask will be applied to model-based mask\", file=log)\n elif params.soft_mask:\n print (\"\\nSoft mask will be applied to outside of map box\",file=log)\n if params.keep_map_size:\n print(\"\\nEntire map will be kept (not cutting out region)\", file=log)\n if params.restrict_map_size:\n print(\"\\nOutput map will be within input map\", file=log)\n if params.lower_bounds and params.upper_bounds:\n print(\"Bounds for cut out map are (%s,%s,%s) to (%s,%s,%s)\" %(\n tuple(list(params.lower_bounds)+list(params.upper_bounds))), file=log)\n\n if mask_data:\n mask_data=mask_data.as_double()\n box = mmtbx.utils.extract_box_around_model_and_map(\n xray_structure = xray_structure,\n map_data = map_data.as_double(),\n mask_data = mask_data,\n box_cushion = params.box_cushion,\n selection = selection,\n mask_select = params.mask_select,\n density_select = params.density_select,\n threshold = params.density_select_threshold,\n get_half_height_width = params.get_half_height_width,\n mask_atoms = params.mask_atoms,\n soft_mask = params.soft_mask,\n soft_mask_radius = params.soft_mask_radius,\n mask_atoms_atom_radius = params.mask_atoms_atom_radius,\n value_outside_atoms = params.value_outside_atoms,\n keep_map_size = params.keep_map_size,\n restrict_map_size = params.restrict_map_size,\n lower_bounds = params.lower_bounds,\n upper_bounds = params.upper_bounds,\n bounds_are_absolute = params.bounds_are_absolute,\n zero_outside_original_map = params.zero_outside_original_map,\n extract_unique = params.extract_unique,\n target_ncs_au_file = params.target_ncs_au_file,\n regions_to_keep = params.regions_to_keep,\n box_buffer = params.box_buffer,\n soft_mask_extract_unique = params.soft_mask_extract_unique,\n mask_expand_ratio = params.mask_expand_ratio,\n keep_low_density = params.keep_low_density,\n chain_type = params.chain_type,\n sequence = sequence,\n solvent_content = params.solvent_content,\n molecular_mass = params.molecular_mass,\n resolution = params.resolution,\n ncs_object = ncs_object,\n symmetry = params.symmetry,\n half_map_data_list = half_map_data_list,\n )\n\n ph_box = pdb_hierarchy.select(selection)\n ph_box.adopt_xray_structure(box.xray_structure_box)\n box.hierarchy=ph_box\n\n if params.mask_select:\n print(\"\\nSolvent content used in mask_select: %.3f \" %(\n box.get_solvent_content()),file=log)\n if (inputs and\n inputs.crystal_symmetry and inputs.ccp4_map and\n inputs.crystal_symmetry.unit_cell().parameters() and\n inputs.ccp4_map.unit_cell_parameters ) and (\n inputs.crystal_symmetry.unit_cell().parameters() !=\n inputs.ccp4_map.unit_cell_parameters):\n print(\"\\nNOTE: Input CCP4 map is only part of unit cell:\", file=log)\n print(\"Full unit cell ('unit cell parameters'): \"+\\\n \"(%.1f, %.1f, %.1f, %.1f, %.1f, %.1f) A\" %tuple(\n inputs.ccp4_map.unit_cell_parameters), file=log)\n print(\"Size of CCP4 map 'map unit cell': \"+\\\n \"(%.1f, %.1f, %.1f, %.1f, %.1f, %.1f) A\" %tuple(\n inputs.crystal_symmetry.unit_cell().parameters()), file=log)\n print(\"Full unit cell as grid units: (%s, %s, %s)\" %(\n inputs.ccp4_map.unit_cell_grid), file=log)\n print(\"Map unit cell as grid units: (%s, %s, %s)\" %(\n map_data.all()), file=log)\n\n box.unit_cell_parameters_from_ccp4_map=inputs.ccp4_map.unit_cell_parameters\n box.unit_cell_parameters_deduced_from_map_grid=\\\n inputs.crystal_symmetry.unit_cell().parameters()\n\n else:\n box.unit_cell_parameters_from_ccp4_map=None\n box.unit_cell_parameters_deduced_from_map_grid=None\n\n\n\n if box.pdb_outside_box_msg:\n print(box.pdb_outside_box_msg, file=log)\n\n # NOTE: box object is always shifted to place origin at (0,0,0)\n\n # NOTE ON ORIGIN SHIFTS: The shifts are applied locally here. The box\n # object is not affected and always has the origin at (0,0,0)\n # output_box is copy of box with shift_cart corresponding to the output\n # files. Normally this is the same as the original shift_cart. However\n # if user has specified a new output origin it will differ.\n\n # For output files ONLY:\n # keep_origin==False leave origin at (0,0,0)\n # keep_origin==True: we shift everything back to where it was,\n # output_origin_grid_units=10,10,10: output origin is at (10,10,10)\n\n # ncs_object is original\n # box.ncs_object is shifted by shift_cart\n # output_box.ncs_object is shifted back by -new shift_cart\n\n # Additional note on output unit_cell and grid_units.\n # The ccp4-style output map can specify the unit cell and grid units\n # corresponding to that cell. This can be separate from the origin and\n # number of grid points in the map as written. If specified, write these\n # out to the output ccp4 map and also use this unit cell for writing\n # any output PDB files\n\n from copy import deepcopy\n output_box=deepcopy(box) # won't use box below here except to return it\n\n\n print(\"\\nBox cell dimensions: (%.2f, %.2f, %.2f) A\" %(\n box.box_crystal_symmetry.unit_cell().parameters()[:3]), file=log)\n\n if box.shift_cart:\n print(\"Working origin moved from grid position of\"+\\\n \": (%d, %d, %d) to (0,0,0) \" %(\n tuple(box.origin_shift_grid_units(reverse=True))), file=log)\n print(\"Working origin moved from coordinates of:\"+\\\n \" (%.2f, %.2f, %.2f) A to (0,0,0)\\n\" %(\n tuple(-col(box.shift_cart))), file=log)\n\n if (params.keep_origin):\n print(\"\\nRestoring original position for output files\", file=log)\n print(\"Origin will be at grid position of\"+\\\n \": (%d, %d, %d) \" %(\n tuple(box.origin_shift_grid_units(reverse=True))), file=log)\n print(\"\\nOutput files will be in same location as original\", end=' ', file=log)\n if not params.keep_map_size:\n print(\"just cut out.\", file=log)\n else:\n print(\"keeping entire map\", file=log)\n print(\"Note that output maps are only valid in the cut out region.\\n\", file=log)\n\n else:\n if origin_to_match:\n output_box.shift_cart=shift_cart_for_origin_to_match\n if params.output_origin_grid_units:\n print(\"Output map origin to be shifted to match target\", file=log)\n print(\"Placing origin at grid point (%s, %s, %s)\" %(\n origin_to_match)+\"\\n\"+ \\\n \"Final coordinate shift for output files: (%.2f,%.2f,%.2f) A\\n\" %(\n tuple(col(output_box.shift_cart)-col(box.shift_cart))), file=log)\n elif box.shift_cart:\n output_box.shift_cart=(0,0,0) # not shifting back\n print(\"Final origin will be at (0,0,0)\", file=log)\n print(\"Final coordinate shift for output files: (%.2f,%.2f,%.2f) A\\n\" %(\n tuple(col(output_box.shift_cart)-col(box.shift_cart))), file=log)\n else:\n print(\"\\nOutput files are in same location as original and origin \"+\\\n \"is at (0,0,0)\\n\", file=log)\n\n print(\"\\nBox grid: (%s, %s, %s) \" %(output_box.map_box.all()),file=log)\n ph_output_box_output_location = ph_box.deep_copy()\n if output_box.shift_cart: # shift coordinates and NCS back by shift_cart\n # NOTE output_box.shift_cart could be different than box.shift_cart if\n # there is a target position for the origin and it is not the same as the\n # original origin.\n sites_cart = output_box.shift_sites_cart_back(\n output_box.xray_structure_box.sites_cart())\n xrs_offset = ph_output_box_output_location.extract_xray_structure(\n crystal_symmetry=output_box.xray_structure_box.crystal_symmetry()\n ).replace_sites_cart(new_sites = sites_cart)\n ph_output_box_output_location.adopt_xray_structure(xrs_offset)\n\n if output_box.ncs_object:\n output_box.ncs_object=output_box.ncs_object.coordinate_offset(\n tuple(-col(output_box.shift_cart)))\n shift_back=True\n else:\n shift_back=False\n\n if params.keep_input_unit_cell_and_grid and \\\n (input_unit_cell_grid is not None) and \\\n (input_unit_cell is not None):\n params.output_unit_cell=input_unit_cell\n params.output_unit_cell_grid=input_unit_cell_grid\n print(\"Setting output unit cell parameters and unit cell grid to\"+\\\n \" match\\ninput map file\", file=log)\n\n if params.output_unit_cell: # Set output unit cell parameters\n from cctbx import crystal\n output_crystal_symmetry=crystal.symmetry(\n unit_cell=params.output_unit_cell, space_group=\"P1\")\n output_unit_cell=output_crystal_symmetry.unit_cell()\n print(\"Output unit cell set to: %.2f, %.2f, %.2f, %.2f, %.2f, %.2f)\" %tuple(\n output_crystal_symmetry.unit_cell().parameters()), file=log)\n else:\n output_crystal_symmetry=None\n\n # ============= Check/set output unit cell grid and cell parameters =======\n if params.output_unit_cell_grid or output_crystal_symmetry:\n if params.output_unit_cell_grid:\n output_unit_cell_grid=params.output_unit_cell_grid\n else:\n output_unit_cell_grid=output_box.map_box.all()\n print(\"Output unit cell grid set to: (%s, %s, %s)\" %tuple(\n output_unit_cell_grid), file=log)\n\n expected_output_abc=[]\n box_spacing=[]\n output_spacing=[]\n box_abc=output_box.xray_structure_box.\\\n crystal_symmetry().unit_cell().parameters()[:3]\n if output_crystal_symmetry:\n output_abc=output_crystal_symmetry.unit_cell().parameters()[:3]\n else:\n output_abc=[None,None,None]\n for a_box,a_output,n_box,n_output in zip(\n box_abc,\n output_abc,\n output_box.map_box.all(),\n output_unit_cell_grid):\n expected_output_abc.append(a_box*n_output/n_box)\n box_spacing.append(a_box/n_box)\n if output_crystal_symmetry:\n output_spacing.append(a_output/n_output)\n else:\n output_spacing.append(a_box/n_box)\n\n if output_crystal_symmetry: # make sure it is compatible...\n r0=expected_output_abc[0]/output_abc[0]\n r1=expected_output_abc[1]/output_abc[1]\n r2=expected_output_abc[2]/output_abc[2]\n from libtbx.test_utils import approx_equal\n if not approx_equal(r0,r1,eps=0.001) or not approx_equal(r0,r2,eps=0.001):\n print(\"WARNING: output_unit_cell and cell_grid will \"+\\\n \"change ratio of grid spacing.\\nOld spacings: \"+\\\n \"(%.2f, %.2f, %.2f) A \" %(tuple(box_spacing))+\\\n \"\\nNew spacings: (%.2f, %.2f, %.2f) A \\n\" %(tuple(output_spacing)), file=log)\n else:\n output_abc=expected_output_abc\n\n from cctbx import crystal\n output_crystal_symmetry=crystal.symmetry(\n unit_cell=list(output_abc)+[90,90,90], space_group=\"P1\")\n print(\"Output unit cell will be: (%.2f, %.2f, %.2f, %.2f, %.2f, %.2f)\\n\"%(\n tuple(output_crystal_symmetry.unit_cell().parameters())), file=log)\n\n else:\n output_unit_cell_grid = output_box.map_box.all()\n output_crystal_symmetry=output_box.xray_structure_box.crystal_symmetry()\n # ========== Done check/set output unit cell grid and cell parameters =====\n\n if write_output_files:\n # Write PDB file\n if ph_box.overall_counts().n_residues>0:\n\n if(params.output_file_name_prefix is None):\n file_name = \"%s_box.pdb\"%output_prefix\n else: file_name = \"%s.pdb\"%params.output_file_name_prefix\n ph_output_box_output_location.write_pdb_file(file_name=file_name,\n crystal_symmetry = output_crystal_symmetry)\n print(\"Writing boxed PDB with box unit cell to %s\" %(\n file_name), file=log)\n\n # Write NCS file if NCS\n if output_box.ncs_object and output_box.ncs_object.max_operators()>0:\n if(params.output_file_name_prefix is None):\n output_symmetry_file = \"%s_box.ncs_spec\"%output_prefix\n else:\n output_symmetry_file = \"%s.ncs_spec\"%params.output_file_name_prefix\n output_box.ncs_object.format_all_for_group_specification(\n file_name=output_symmetry_file)\n\n print(\"\\nWriting symmetry to %s\" %( output_symmetry_file), file=log)\n\n # Write ccp4 map.\n if(\"ccp4\" in params.output_format):\n if(params.output_file_name_prefix is None):\n file_name = \"%s_box.ccp4\"%output_prefix\n else: file_name = \"%s.ccp4\"%params.output_file_name_prefix\n from iotbx.mrcfile import create_output_labels\n if params.extract_unique:\n program_name='map_box using extract_unique'\n limitations=[\"extract_unique\"]\n else:\n program_name='map_box'\n limitations=[]\n labels=create_output_labels(program_name=program_name,\n input_file_name=inputs.ccp4_map_file_name,\n input_labels=input_map_labels,\n limitations=limitations,\n output_labels=params.output_map_labels)\n output_box.write_ccp4_map(file_name=file_name,\n output_crystal_symmetry=output_crystal_symmetry,\n output_mean=params.output_ccp4_map_mean,\n output_sd=params.output_ccp4_map_sd,\n output_unit_cell_grid=output_unit_cell_grid,\n shift_back=shift_back,\n output_map_labels=labels,\n output_external_origin=params.output_external_origin)\n print(\"Writing boxed map \"+\\\n \"to CCP4 formatted file: %s\"%file_name, file=log)\n if not params.half_map_list:\n params.half_map_list=[]\n if not output_box.map_box_half_map_list:\n output_box.map_box_half_map_list=[]\n if not half_map_labels_list:\n half_map_labels_list=len(output_box.map_box_half_map_list)*[None]\n for hm,labels,fn in zip(\n output_box.map_box_half_map_list,\n half_map_labels_list,\n params.half_map_list): # half maps matching\n labels=create_output_labels(program_name=program_name,\n input_file_name=fn,\n input_labels=labels,\n limitations=limitations,\n output_labels=params.output_map_labels)\n hm_fn=\"%s_box.ccp4\" %( \".\".join(os.path.basename(fn).split(\".\")[:-1]))\n output_box.write_ccp4_map(file_name=hm_fn,\n map_data=hm,\n output_crystal_symmetry=output_crystal_symmetry,\n output_mean=params.output_ccp4_map_mean,\n output_sd=params.output_ccp4_map_sd,\n output_unit_cell_grid=output_unit_cell_grid,\n shift_back=shift_back,\n output_map_labels=labels,\n output_external_origin=params.output_external_origin)\n print (\"Writing boxed half map to: %s \" %(hm_fn),file=log)\n\n # Write xplor map. Shift back if keep_origin=True\n if(\"xplor\" in params.output_format):\n if(params.output_file_name_prefix is None):\n file_name = \"%s_box.xplor\"%output_prefix\n else: file_name = \"%s.xplor\"%params.output_file_name_prefix\n output_box.write_xplor_map(file_name=file_name,\n output_crystal_symmetry=output_crystal_symmetry,\n output_unit_cell_grid=output_unit_cell_grid,\n shift_back=shift_back,)\n print(\"Writing boxed map \"+\\\n \"to X-plor formatted file: %s\"%file_name, file=log)\n\n # Write mtz map coeffs. Shift back if keep_origin=True\n if(\"mtz\" in params.output_format):\n if(params.output_file_name_prefix is None):\n file_name = \"%s_box.mtz\"%output_prefix\n else: file_name = \"%s.mtz\"%params.output_file_name_prefix\n\n print(\"Writing map coefficients \"+\\\n \"to MTZ file: %s\"%file_name, file=log)\n if(map_coeff is not None):\n d_min = map_coeff.d_min()\n elif params.resolution is not None:\n d_min = params.resolution\n else:\n d_min = maptbx.d_min_from_map(map_data=output_box.map_box,\n unit_cell=output_box.xray_structure_box.unit_cell())\n output_box.map_coefficients(d_min=d_min,\n scale_max=params.scale_max,\n resolution_factor=params.resolution_factor, file_name=file_name,\n shift_back=shift_back)\n\n print(file=log)\n return box\n\n# =============================================================================\n# GUI-specific class for running command\nfrom libtbx import runtime_utils\nfrom wxGUI2 import utils\n\ndef validate_params(params):\n return True\n\nclass launcher(runtime_utils.target_with_save_result):\n def run(self):\n utils.safe_makedirs(self.output_dir)\n os.chdir(self.output_dir)\n result = run(args=self.args, log=sys.stdout)\n return result\n\n# =============================================================================\n\nif (__name__ == \"__main__\"):\n run(args=sys.argv[1:])\n","sub_path":"mmtbx/command_line/map_box.py","file_name":"map_box.py","file_ext":"py","file_size_in_byte":43085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"363990961","text":"import os\nimport pandas\nimport urllib\n\nFREEMONT_URL = 'https://data.seattle.gov/api/views/47yq-6ugv/rows.csv?accessType=DOWNLOAD'\n\ndef get_freemont_data(filename = 'Freemont.csv', url = FREEMONT_URL, force_download = False):\n if force_download or not os.path.exists(filename):\n urllib.request.urlretrieve(url, filename)\n \n data = pandas.read_csv(filename, index_col = 'Date')\n \n try:\n data.index = pandas.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p')\n except TypeError:\n data.index = pandas.to_datetime(data.index)\n \n data.columns = ['Total', 'East', 'West']\n \n return(data)\n","sub_path":"jupyterworkflow/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"288599941","text":"import argparse\nimport collections\nimport imblearn\nimport joblib\nimport json\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport skrebate\nimport sklearn\nimport sys\nimport xgboost\nimport warnings\nfrom imblearn import under_sampling, over_sampling, combine\nfrom scipy.io import mmread\nfrom mlxtend import classifier, regressor\nfrom sklearn.base import clone\nfrom sklearn import (cluster, compose, decomposition, ensemble,\n feature_extraction, feature_selection,\n gaussian_process, kernel_approximation, metrics,\n model_selection, naive_bayes, neighbors,\n pipeline, preprocessing, svm, linear_model,\n tree, discriminant_analysis)\nfrom sklearn.exceptions import FitFailedWarning\nfrom sklearn.model_selection._validation import _score, cross_validate\nfrom sklearn.model_selection import _search, _validation\n\nfrom galaxy_ml.utils import (SafeEval, get_cv, get_scoring, load_model,\n read_columns, try_get_attr, get_module)\n\n\n_fit_and_score = try_get_attr('galaxy_ml.model_validations', '_fit_and_score')\nsetattr(_search, '_fit_and_score', _fit_and_score)\nsetattr(_validation, '_fit_and_score', _fit_and_score)\n\nN_JOBS = int(__import__('os').environ.get('GALAXY_SLOTS', 1))\nCACHE_DIR = './cached'\nNON_SEARCHABLE = ('n_jobs', 'pre_dispatch', 'memory', '_path',\n 'nthread', 'callbacks')\nALLOWED_CALLBACKS = ('EarlyStopping', 'TerminateOnNaN', 'ReduceLROnPlateau',\n 'CSVLogger', 'None')\n\n\ndef _eval_search_params(params_builder):\n search_params = {}\n\n for p in params_builder['param_set']:\n search_list = p['sp_list'].strip()\n if search_list == '':\n continue\n\n param_name = p['sp_name']\n if param_name.lower().endswith(NON_SEARCHABLE):\n print(\"Warning: `%s` is not eligible for search and was \"\n \"omitted!\" % param_name)\n continue\n\n if not search_list.startswith(':'):\n safe_eval = SafeEval(load_scipy=True, load_numpy=True)\n ev = safe_eval(search_list)\n search_params[param_name] = ev\n else:\n # Have `:` before search list, asks for estimator evaluatio\n safe_eval_es = SafeEval(load_estimators=True)\n search_list = search_list[1:].strip()\n # TODO maybe add regular express check\n ev = safe_eval_es(search_list)\n preprocessings = (\n preprocessing.StandardScaler(), preprocessing.Binarizer(),\n preprocessing.MaxAbsScaler(),\n preprocessing.Normalizer(), preprocessing.MinMaxScaler(),\n preprocessing.PolynomialFeatures(),\n preprocessing.RobustScaler(), feature_selection.SelectKBest(),\n feature_selection.GenericUnivariateSelect(),\n feature_selection.SelectPercentile(),\n feature_selection.SelectFpr(), feature_selection.SelectFdr(),\n feature_selection.SelectFwe(),\n feature_selection.VarianceThreshold(),\n decomposition.FactorAnalysis(random_state=0),\n decomposition.FastICA(random_state=0),\n decomposition.IncrementalPCA(),\n decomposition.KernelPCA(random_state=0, n_jobs=N_JOBS),\n decomposition.LatentDirichletAllocation(\n random_state=0, n_jobs=N_JOBS),\n decomposition.MiniBatchDictionaryLearning(\n random_state=0, n_jobs=N_JOBS),\n decomposition.MiniBatchSparsePCA(\n random_state=0, n_jobs=N_JOBS),\n decomposition.NMF(random_state=0),\n decomposition.PCA(random_state=0),\n decomposition.SparsePCA(random_state=0, n_jobs=N_JOBS),\n decomposition.TruncatedSVD(random_state=0),\n kernel_approximation.Nystroem(random_state=0),\n kernel_approximation.RBFSampler(random_state=0),\n kernel_approximation.AdditiveChi2Sampler(),\n kernel_approximation.SkewedChi2Sampler(random_state=0),\n cluster.FeatureAgglomeration(),\n skrebate.ReliefF(n_jobs=N_JOBS),\n skrebate.SURF(n_jobs=N_JOBS),\n skrebate.SURFstar(n_jobs=N_JOBS),\n skrebate.MultiSURF(n_jobs=N_JOBS),\n skrebate.MultiSURFstar(n_jobs=N_JOBS),\n imblearn.under_sampling.ClusterCentroids(\n random_state=0, n_jobs=N_JOBS),\n imblearn.under_sampling.CondensedNearestNeighbour(\n random_state=0, n_jobs=N_JOBS),\n imblearn.under_sampling.EditedNearestNeighbours(\n random_state=0, n_jobs=N_JOBS),\n imblearn.under_sampling.RepeatedEditedNearestNeighbours(\n random_state=0, n_jobs=N_JOBS),\n imblearn.under_sampling.AllKNN(random_state=0, n_jobs=N_JOBS),\n imblearn.under_sampling.InstanceHardnessThreshold(\n random_state=0, n_jobs=N_JOBS),\n imblearn.under_sampling.NearMiss(\n random_state=0, n_jobs=N_JOBS),\n imblearn.under_sampling.NeighbourhoodCleaningRule(\n random_state=0, n_jobs=N_JOBS),\n imblearn.under_sampling.OneSidedSelection(\n random_state=0, n_jobs=N_JOBS),\n imblearn.under_sampling.RandomUnderSampler(\n random_state=0),\n imblearn.under_sampling.TomekLinks(\n random_state=0, n_jobs=N_JOBS),\n imblearn.over_sampling.ADASYN(random_state=0, n_jobs=N_JOBS),\n imblearn.over_sampling.RandomOverSampler(random_state=0),\n imblearn.over_sampling.SMOTE(random_state=0, n_jobs=N_JOBS),\n imblearn.over_sampling.SVMSMOTE(random_state=0, n_jobs=N_JOBS),\n imblearn.over_sampling.BorderlineSMOTE(\n random_state=0, n_jobs=N_JOBS),\n imblearn.over_sampling.SMOTENC(\n categorical_features=[], random_state=0, n_jobs=N_JOBS),\n imblearn.combine.SMOTEENN(random_state=0),\n imblearn.combine.SMOTETomek(random_state=0))\n newlist = []\n for obj in ev:\n if obj is None:\n newlist.append(None)\n elif obj == 'all_0':\n newlist.extend(preprocessings[0:35])\n elif obj == 'sk_prep_all': # no KernalCenter()\n newlist.extend(preprocessings[0:7])\n elif obj == 'fs_all':\n newlist.extend(preprocessings[7:14])\n elif obj == 'decomp_all':\n newlist.extend(preprocessings[14:25])\n elif obj == 'k_appr_all':\n newlist.extend(preprocessings[25:29])\n elif obj == 'reb_all':\n newlist.extend(preprocessings[30:35])\n elif obj == 'imb_all':\n newlist.extend(preprocessings[35:54])\n elif type(obj) is int and -1 < obj < len(preprocessings):\n newlist.append(preprocessings[obj])\n elif hasattr(obj, 'get_params'): # user uploaded object\n if 'n_jobs' in obj.get_params():\n newlist.append(obj.set_params(n_jobs=N_JOBS))\n else:\n newlist.append(obj)\n else:\n sys.exit(\"Unsupported estimator type: %r\" % (obj))\n\n search_params[param_name] = newlist\n\n return search_params\n\n\ndef main(inputs, infile_estimator, infile1, infile2,\n outfile_result, outfile_object=None,\n outfile_weights=None, groups=None,\n ref_seq=None, intervals=None, targets=None,\n fasta_path=None):\n \"\"\"\n Parameter\n ---------\n inputs : str\n File path to galaxy tool parameter\n\n infile_estimator : str\n File path to estimator\n\n infile1 : str\n File path to dataset containing features\n\n infile2 : str\n File path to dataset containing target values\n\n outfile_result : str\n File path to save the results, either cv_results or test result\n\n outfile_object : str, optional\n File path to save searchCV object\n\n outfile_weights : str, optional\n File path to save model weights\n\n groups : str\n File path to dataset containing groups labels\n\n ref_seq : str\n File path to dataset containing genome sequence file\n\n intervals : str\n File path to dataset containing interval file\n\n targets : str\n File path to dataset compressed target bed file\n\n fasta_path : str\n File path to dataset containing fasta file\n \"\"\"\n warnings.simplefilter('ignore')\n\n with open(inputs, 'r') as param_handler:\n params = json.load(param_handler)\n\n # conflict param checker\n if params['outer_split']['split_mode'] == 'nested_cv' \\\n and params['save'] != 'nope':\n raise ValueError(\"Save best estimator is not possible for nested CV!\")\n\n if not (params['search_schemes']['options']['refit']) \\\n and params['save'] != 'nope':\n raise ValueError(\"Save best estimator is not possible when refit \"\n \"is False!\")\n\n params_builder = params['search_schemes']['search_params_builder']\n\n with open(infile_estimator, 'rb') as estimator_handler:\n estimator = load_model(estimator_handler)\n estimator_params = estimator.get_params()\n\n # store read dataframe object\n loaded_df = {}\n\n input_type = params['input_options']['selected_input']\n # tabular input\n if input_type == 'tabular':\n header = 'infer' if params['input_options']['header1'] else None\n column_option = (params['input_options']['column_selector_options_1']\n ['selected_column_selector_option'])\n if column_option in ['by_index_number', 'all_but_by_index_number',\n 'by_header_name', 'all_but_by_header_name']:\n c = params['input_options']['column_selector_options_1']['col1']\n else:\n c = None\n\n df_key = infile1 + repr(header)\n df = pd.read_csv(infile1, sep='\\t', header=header,\n parse_dates=True)\n loaded_df[df_key] = df\n\n X = read_columns(df, c=c, c_option=column_option).astype(float)\n # sparse input\n elif input_type == 'sparse':\n X = mmread(open(infile1, 'r'))\n\n # fasta_file input\n elif input_type == 'seq_fasta':\n pyfaidx = get_module('pyfaidx')\n sequences = pyfaidx.Fasta(fasta_path)\n n_seqs = len(sequences.keys())\n X = np.arange(n_seqs)[:, np.newaxis]\n for param in estimator_params.keys():\n if param.endswith('fasta_path'):\n estimator.set_params(\n **{param: fasta_path})\n break\n else:\n raise ValueError(\n \"The selected estimator doesn't support \"\n \"fasta file input! Please consider using \"\n \"KerasGBatchClassifier with \"\n \"FastaDNABatchGenerator/FastaProteinBatchGenerator \"\n \"or having GenomeOneHotEncoder/ProteinOneHotEncoder \"\n \"in pipeline!\")\n\n elif input_type == 'refseq_and_interval':\n path_params = {\n 'data_batch_generator__ref_genome_path': ref_seq,\n 'data_batch_generator__intervals_path': intervals,\n 'data_batch_generator__target_path': targets\n }\n estimator.set_params(**path_params)\n n_intervals = sum(1 for line in open(intervals))\n X = np.arange(n_intervals)[:, np.newaxis]\n\n # Get target y\n header = 'infer' if params['input_options']['header2'] else None\n column_option = (params['input_options']['column_selector_options_2']\n ['selected_column_selector_option2'])\n if column_option in ['by_index_number', 'all_but_by_index_number',\n 'by_header_name', 'all_but_by_header_name']:\n c = params['input_options']['column_selector_options_2']['col2']\n else:\n c = None\n\n df_key = infile2 + repr(header)\n if df_key in loaded_df:\n infile2 = loaded_df[df_key]\n else:\n infile2 = pd.read_csv(infile2, sep='\\t',\n header=header, parse_dates=True)\n loaded_df[df_key] = infile2\n\n y = read_columns(\n infile2,\n c=c,\n c_option=column_option,\n sep='\\t',\n header=header,\n parse_dates=True)\n if len(y.shape) == 2 and y.shape[1] == 1:\n y = y.ravel()\n if input_type == 'refseq_and_interval':\n estimator.set_params(\n data_batch_generator__features=y.ravel().tolist())\n y = None\n # end y\n\n optimizer = params['search_schemes']['selected_search_scheme']\n optimizer = getattr(model_selection, optimizer)\n\n # handle gridsearchcv options\n options = params['search_schemes']['options']\n\n if groups:\n header = 'infer' if (options['cv_selector']['groups_selector']\n ['header_g']) else None\n column_option = (options['cv_selector']['groups_selector']\n ['column_selector_options_g']\n ['selected_column_selector_option_g'])\n if column_option in ['by_index_number', 'all_but_by_index_number',\n 'by_header_name', 'all_but_by_header_name']:\n c = (options['cv_selector']['groups_selector']\n ['column_selector_options_g']['col_g'])\n else:\n c = None\n\n df_key = groups + repr(header)\n if df_key in loaded_df:\n groups = loaded_df[df_key]\n\n groups = read_columns(\n groups,\n c=c,\n c_option=column_option,\n sep='\\t',\n header=header,\n parse_dates=True)\n groups = groups.ravel()\n options['cv_selector']['groups_selector'] = groups\n\n splitter, groups = get_cv(options.pop('cv_selector'))\n options['cv'] = splitter\n options['n_jobs'] = N_JOBS\n primary_scoring = options['scoring']['primary_scoring']\n options['scoring'] = get_scoring(options['scoring'])\n if options['error_score']:\n options['error_score'] = 'raise'\n else:\n options['error_score'] = np.NaN\n if options['refit'] and isinstance(options['scoring'], dict):\n options['refit'] = primary_scoring\n if 'pre_dispatch' in options and options['pre_dispatch'] == '':\n options['pre_dispatch'] = None\n\n # del loaded_df\n del loaded_df\n\n # handle memory\n memory = joblib.Memory(location=CACHE_DIR, verbose=0)\n # cache iraps_core fits could increase search speed significantly\n if estimator.__class__.__name__ == 'IRAPSClassifier':\n estimator.set_params(memory=memory)\n else:\n # For iraps buried in pipeline\n for p, v in estimator_params.items():\n if p.endswith('memory'):\n # for case of `__irapsclassifier__memory`\n if len(p) > 8 and p[:-8].endswith('irapsclassifier'):\n # cache iraps_core fits could increase search\n # speed significantly\n new_params = {p: memory}\n estimator.set_params(**new_params)\n # security reason, we don't want memory being\n # modified unexpectedly\n elif v:\n new_params = {p, None}\n estimator.set_params(**new_params)\n # For now, 1 CPU is suggested for iprasclassifier\n elif p.endswith('n_jobs'):\n new_params = {p: 1}\n estimator.set_params(**new_params)\n # for security reason, types of callbacks are limited\n elif p.endswith('callbacks'):\n for cb in v:\n cb_type = cb['callback_selection']['callback_type']\n if cb_type not in ALLOWED_CALLBACKS:\n raise ValueError(\n \"Prohibited callback type: %s!\" % cb_type)\n\n param_grid = _eval_search_params(params_builder)\n searcher = optimizer(estimator, param_grid, **options)\n\n # do nested split\n split_mode = params['outer_split'].pop('split_mode')\n # nested CV, outer cv using cross_validate\n if split_mode == 'nested_cv':\n outer_cv, _ = get_cv(params['outer_split']['cv_selector'])\n\n if options['error_score'] == 'raise':\n rval = cross_validate(\n searcher, X, y, scoring=options['scoring'],\n cv=outer_cv, n_jobs=N_JOBS, verbose=0,\n error_score=options['error_score'])\n else:\n warnings.simplefilter('always', FitFailedWarning)\n with warnings.catch_warnings(record=True) as w:\n try:\n rval = cross_validate(\n searcher, X, y,\n scoring=options['scoring'],\n cv=outer_cv, n_jobs=N_JOBS,\n verbose=0,\n error_score=options['error_score'])\n except ValueError:\n pass\n for warning in w:\n print(repr(warning.message))\n\n keys = list(rval.keys())\n for k in keys:\n if k.startswith('test'):\n rval['mean_' + k] = np.mean(rval[k])\n rval['std_' + k] = np.std(rval[k])\n if k.endswith('time'):\n rval.pop(k)\n rval = pd.DataFrame(rval)\n rval = rval[sorted(rval.columns)]\n rval.to_csv(path_or_buf=outfile_result, sep='\\t',\n header=True, index=False)\n else:\n if split_mode == 'train_test_split':\n train_test_split = try_get_attr(\n 'galaxy_ml.model_validations', 'train_test_split')\n # make sure refit is choosen\n # this could be True for sklearn models, but not the case for\n # deep learning models\n if not options['refit'] and \\\n not all(hasattr(estimator, attr)\n for attr in ('config', 'model_type')):\n warnings.warn(\"Refit is change to `True` for nested \"\n \"validation!\")\n setattr(searcher, 'refit', True)\n split_options = params['outer_split']\n\n # splits\n if split_options['shuffle'] == 'stratified':\n split_options['labels'] = y\n X, X_test, y, y_test = train_test_split(X, y, **split_options)\n elif split_options['shuffle'] == 'group':\n if groups is None:\n raise ValueError(\"No group based CV option was \"\n \"choosen for group shuffle!\")\n split_options['labels'] = groups\n if y is None:\n X, X_test, groups, _ =\\\n train_test_split(X, groups, **split_options)\n else:\n X, X_test, y, y_test, groups, _ =\\\n train_test_split(X, y, groups, **split_options)\n else:\n if split_options['shuffle'] == 'None':\n split_options['shuffle'] = None\n X, X_test, y, y_test =\\\n train_test_split(X, y, **split_options)\n # end train_test_split\n\n # shared by both train_test_split and non-split\n if options['error_score'] == 'raise':\n searcher.fit(X, y, groups=groups)\n else:\n warnings.simplefilter('always', FitFailedWarning)\n with warnings.catch_warnings(record=True) as w:\n try:\n searcher.fit(X, y, groups=groups)\n except ValueError:\n pass\n for warning in w:\n print(repr(warning.message))\n\n # no outer split\n if split_mode == 'no':\n # save results\n cv_results = pd.DataFrame(searcher.cv_results_)\n cv_results = cv_results[sorted(cv_results.columns)]\n cv_results.to_csv(path_or_buf=outfile_result, sep='\\t',\n header=True, index=False)\n\n # train_test_split, output test result using best_estimator_\n # or rebuild the trained estimator using weights if applicable.\n else:\n scorer_ = searcher.scorer_\n if isinstance(scorer_, collections.Mapping):\n is_multimetric = True\n else:\n is_multimetric = False\n\n best_estimator_ = getattr(searcher, 'best_estimator_', None)\n if not best_estimator_:\n raise ValueError(\"GridSearchCV object has no \"\n \"`best_estimator_` when `refit`=False!\")\n\n if best_estimator_.__class__.__name__ == 'KerasGBatchClassifier' \\\n and hasattr(estimator.data_batch_generator, 'target_path'):\n test_score = best_estimator_.evaluate(\n X_test, scorer=scorer_, is_multimetric=is_multimetric)\n else:\n test_score = _score(best_estimator_, X_test,\n y_test, scorer_,\n is_multimetric=is_multimetric)\n\n if not is_multimetric:\n test_score = {primary_scoring: test_score}\n for key, value in test_score.items():\n test_score[key] = [value]\n result_df = pd.DataFrame(test_score)\n result_df.to_csv(path_or_buf=outfile_result, sep='\\t',\n header=True, index=False)\n\n memory.clear(warn=False)\n\n if outfile_object:\n best_estimator_ = getattr(searcher, 'best_estimator_', None)\n if not best_estimator_:\n warnings.warn(\"GridSearchCV object has no attribute \"\n \"'best_estimator_', because either it's \"\n \"nested gridsearch or `refit` is False!\")\n return\n\n main_est = best_estimator_\n if isinstance(best_estimator_, pipeline.Pipeline):\n main_est = best_estimator_.steps[-1][-1]\n\n if hasattr(main_est, 'model_') \\\n and hasattr(main_est, 'save_weights'):\n if outfile_weights:\n main_est.save_weights(outfile_weights)\n del main_est.model_\n del main_est.fit_params\n del main_est.model_class_\n del main_est.validation_data\n if getattr(main_est, 'data_generator_', None):\n del main_est.data_generator_\n\n with open(outfile_object, 'wb') as output_handler:\n pickle.dump(best_estimator_, output_handler,\n pickle.HIGHEST_PROTOCOL)\n\n\nif __name__ == '__main__':\n aparser = argparse.ArgumentParser()\n aparser.add_argument(\"-i\", \"--inputs\", dest=\"inputs\", required=True)\n aparser.add_argument(\"-e\", \"--estimator\", dest=\"infile_estimator\")\n aparser.add_argument(\"-X\", \"--infile1\", dest=\"infile1\")\n aparser.add_argument(\"-y\", \"--infile2\", dest=\"infile2\")\n aparser.add_argument(\"-O\", \"--outfile_result\", dest=\"outfile_result\")\n aparser.add_argument(\"-o\", \"--outfile_object\", dest=\"outfile_object\")\n aparser.add_argument(\"-w\", \"--outfile_weights\", dest=\"outfile_weights\")\n aparser.add_argument(\"-g\", \"--groups\", dest=\"groups\")\n aparser.add_argument(\"-r\", \"--ref_seq\", dest=\"ref_seq\")\n aparser.add_argument(\"-b\", \"--intervals\", dest=\"intervals\")\n aparser.add_argument(\"-t\", \"--targets\", dest=\"targets\")\n aparser.add_argument(\"-f\", \"--fasta_path\", dest=\"fasta_path\")\n args = aparser.parse_args()\n\n main(args.inputs, args.infile_estimator, args.infile1, args.infile2,\n args.outfile_result, outfile_object=args.outfile_object,\n outfile_weights=args.outfile_weights, groups=args.groups,\n ref_seq=args.ref_seq, intervals=args.intervals,\n targets=args.targets, fasta_path=args.fasta_path)\n","sub_path":"tools/sklearn/search_model_validation.py","file_name":"search_model_validation.py","file_ext":"py","file_size_in_byte":24336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"326970383","text":"import os\nimport h5py\nimport cv2\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras.optimizers import SGD\nfrom os.path import join, getsize\nimport sys\nfrom mcc_multiclass import multimcc, confusion_matrix\n\n\ndef load_im2(paths):\n l = []\n for name in paths:\n im2 = cv2.resize(cv2.imread(name), (224, 224)).astype(np.float32)\n im2[:,:,0] -= 103.939\n im2[:,:,1] -= 116.779\n im2[:,:,2] -= 123.68\n im2 = im2.transpose((2,0,1))\n #im2 = np.expand_dims(im2, axis=0)\n #print(im2.shape)\n l.append(im2)\n return l\n\n\ndef dataset_to_parameters(dataset):\n validation_data_dir = (\"/\".join([\"datasets\", dataset.lower()]))\n validation_data_dir += \"/\"\n if len(os.listdir(validation_data_dir)) == 3:\n predict_mcc = True\n else:\n predict_mcc= False\n\n return predict_mcc, validation_data_dir\n\ndef vgg16(weights_path=None, add_fully_connected=True):\n img_width, img_height = 224, 224\n\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))\n\n model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(256, activation='relu'))\n model.add(Dropout(0.5))\n\n\n top_model = None\n if add_fully_connected:\n top_model = Sequential()\n top_model.add(Dense(3, input_dim = model.output_shape[1], activation='sigmoid'))\n\n\n return model, top_model\n #return model\n\n\n\ndef create_validationImg_validationLabel_list(predict_mcc, validation_data_dir):\n validation_images = []\n\n if predict_mcc:\n\n validation_labels = []\n\n val_path_e = validation_data_dir + \"early/\"\n val_path_g = validation_data_dir + \"good/\"\n val_path_l = validation_data_dir + \"late/\"\n val_paths = [val_path_e, val_path_g, val_path_l]\n\n val_filenames_e = os.listdir(val_path_e)\n val_filenames_g = os.listdir(val_path_g)\n val_filenames_l = os.listdir(val_path_l)\n\n for path in val_paths:\n if path == val_path_e:\n for name in val_filenames_e:\n validation_images.append(path + name)\n validation_labels.append([1,0,0])\n elif path == val_path_g:\n for name in val_filenames_g:\n validation_images.append(path + name)\n validation_labels.append([0,1,0])\n elif path == val_path_l:\n for name in val_filenames_l:\n validation_images.append(path + name)\n validation_labels.append([0,0,1])\n\n else:\n validation_images = os.listdir(validation_data_dir)\n validation_images.sort()\n for i in range(len(validation_images)):\n validation_images[i] = validation_data_dir + validation_images[i]\n validation_labels = None\n\n\n return validation_images, validation_labels\n\ndef file_generator(predict_mcc, validation_images, validation_labels, predicted_labels):\n\n lines = []\n predicted_labels_linear = []\n validation_labels_linear = []\n\n for i in range(len(predicted_labels)):\n cls_prob = predicted_labels[i]\n\n predicted_labels_linear.append(np.argmax(cls_prob))\n\n if predicted_labels_linear[i] == 0:\n predicted_label = \"Early\"\n elif predicted_labels_linear[i] == 1:\n predicted_label = \"Good\"\n elif predicted_labels_linear[i] == 2:\n predicted_label = \"Late\"\n\n line = [validation_images[i], predicted_label, str(round(cls_prob[0],3)),\n str(round(cls_prob[1],3)), str(round(cls_prob[2],3))]\n\n if predict_mcc:\n\n for j in range(len(validation_labels[i])):\n cl = validation_labels[i][j]\n\n if cl == 1 and j == 0:\n real_label = \"Early\"\n validation_labels_linear.append(j)\n\n elif cl == 1 and j == 1:\n real_label = \"Good\"\n validation_labels_linear.append(j)\n\n elif cl == 1 and j == 2:\n real_label = \"Late\"\n validation_labels_linear.append(j)\n\n line.append(real_label)\n\n lines.append(\";\".join(line)+\"\\n\")\n\n\n validation_labels_linear = np.array(validation_labels_linear)\n predicted_labels_linear = np.array(predicted_labels_linear)\n\n return lines, validation_labels_linear, predicted_labels_linear\n\n\ndef MCC_CM_calculator(validation_labels_linear, predicted_labels_linear):\n #Return MCC and confusion matrix\n\n MCC = multimcc(validation_labels_linear, predicted_labels_linear)\n MCC = round(MCC,3)\n MCC_line = \"MCC=\" + str(MCC)\n\n CM = confusion_matrix(validation_labels_linear, predicted_labels_linear)\n\n CM_lines = \";p_E;p_G;p_L\\n\"\n\n for i in range(len(CM[0])):\n if i == 0:\n l = \"r_E\"\n elif i == 1:\n l = \"r_G\"\n elif i == 2:\n l = \"r_L\"\n\n CM_lines += l + \";\"\n for j in CM[0][i]:\n CM_lines += str(j) + \";\"\n CM_lines += \"\\n\"\n\n return MCC_line, CM_lines\n\n\n\ndef main():\n file_lines = []\n dataset = sys.argv[1]\n # path to the model weights files.\n weights_path = 'weights/vgg16_am1_theano_so2.h5'\n\n # dimensions of our images.\n\n predict_mcc, validation_data_dir = dataset_to_parameters(dataset)\n\n # load the weights of the VGG16 networks\n # (trained on ImageNet, won the ILSVRC competition in 2014)\n # note: when there is a complete match between your model definition\n # and your weight savefile, you can simply call model.load_weights(filename\n #model = vgg16(weights_path)\n model, top_model = vgg16(weights_path)\n\n assert os.path.exists(weights_path), 'Model weights not found (see \"weights_path\" variable in script).'\n #model.load_weights(weights_path)\n f = h5py.File(weights_path)\n for k in range(len(f.attrs['layer_names'])):\n g = f[f.attrs['layer_names'][k]]\n weights = [g[g.attrs['weight_names'][p]] for p in range(len(g.attrs['weight_names']))]\n if k >= len(model.layers):\n top_model.layers[k-len(model.layers)].set_weights(weights)\n else:\n model.layers[k].set_weights(weights)\n f.close()\n\n\n print('Model loaded.')\n\n # build a classifier model to put on top of the convolutional model\n\n # set the first 25 layers (up to the last conv block)\n # to non-trainable (weights will not be updated)\n for layer in model.layers[:25]:\n layer.trainable = False\n\n # compile the model with a SGD/momentum optimizer\n # and a very slow learning rate.\n model.compile(loss='binary_crossentropy',\n optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),\n metrics=['accuracy'])\n\n\n validation_images, validation_labels = create_validationImg_validationLabel_list(predict_mcc, validation_data_dir)\n validation = np.array(load_im2(validation_images))\n\n np.savetxt(\"tsne/validation_labels/am1_theano_validation_labels_{}.txt\".format(dataset), validation_labels)\n\n #predicted_labels = model.predict(validation)\n predicted_features = model.predict(validation)\n np.savetxt(\"tsne/predicted_features/am1_theano_predicted_features_{}.txt\".format(dataset), predicted_features)\n\n top_model.compile(loss='binary_crossentropy',\n optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),\n metrics=['accuracy'])\n\n predicted_labels = top_model.predict(predicted_features)\n\n prediction_summary = open(\"results/vgg16_am1_theano_prediction_summary_{}.csv\".format(dataset), \"w\")\n\n\n\n lines, validation_labels_linear, predicted_labels_linear = file_generator(predict_mcc, validation_images, validation_labels, predicted_labels)\n\n\n if predict_mcc:\n MCC_line, CM_lines = MCC_CM_calculator(validation_labels_linear, predicted_labels_linear)\n file_lines.append(MCC_line)\n file_lines.append(CM_lines)\n\n file_lines.append(\"\\t\".join(['FILENAME', 'PREDICTED_LABEL', 'E', 'G', 'L', 'REAL_LABEL'])+'\\n')\n\n\n file_lines.extend(lines)\n\n for line in file_lines:\n prediction_summary.write(line)\n prediction_summary.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"vgg16_raspberry_predict.py","file_name":"vgg16_raspberry_predict.py","file_ext":"py","file_size_in_byte":10047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"268251182","text":"import speak\nimport twitter\nimport wx\nclass TweetGui(wx.Frame):\n\n\tglobal inittext\n\tglobal id\n\tglobal edit\n\n\tdef __init__(self,inittext=\"\",i=\"\",ed=0):\n\t\tself.edit=ed\n\t\tself.id=i\n\t\twx.Frame.__init__(self, None, title=\"Tweet\", size=(350,200)) # initialize the wx frame\n\t\tself.Bind(wx.EVT_CLOSE, self.OnClose)\n\t\tself.panel = wx.Panel(self)\n\t\tself.main_box = wx.BoxSizer(wx.VERTICAL)\n\t\tself.text_label = wx.StaticText(self.panel, -1, \"Tweet Te&xt\")\n\t\tself.text = wx.TextCtrl(self.panel, -1, \"\",style=wx.TE_MULTILINE|wx.TE_PROCESS_ENTER|wx.TE_DONTWRAP)\n\t\tself.main_box.Add(self.text, 0, wx.ALL, 10)\n\t\tself.text.Bind(wx.EVT_TEXT_ENTER, self.Tweet)\n\t\tself.text.Bind(wx.EVT_TEXT_MAXLEN, self.Maximum)\n\t\tself.text.Bind(wx.EVT_TEXT, self.Chars)\n\t\tself.text.AppendText(inittext)\n\t\t#self.text.SetSelection(self.text.GetLastPosition()-1,self.text.GetLastPosition())\n\t\tself.text.SetMaxLength(140)\n\t\t#self.text.SetInsertionPoint(self.text.GetLastPosition())\n\t\tself.tweet = wx.Button(self.panel, wx.ID_DEFAULT, \"&Send\")\n\t\tself.tweet.SetDefault()\n\t\tself.tweet.Bind(wx.EVT_BUTTON, self.Tweet)\n\t\tself.main_box.Add(self.tweet, 0, wx.ALL, 10)\n\t\tself.close = wx.Button(self.panel, wx.ID_CLOSE, \"&Cancel\")\n\t\tself.close.Bind(wx.EVT_BUTTON, self.OnClose)\n\t\tself.main_box.Add(self.close, 0, wx.ALL, 10)\n\t\tself.panel.Layout()\n\tdef Maximum(self,event):\n\t\ttwitter.snd.play(\"boundary\")\n\tdef Chars(self, event):\n\t\tlength=round(len(self.text.GetValue()),0)\n\t\tpercent=str(round((length/140)*100,0))\n\t\tself.SetLabel(\"Tweet - \"+str(length).split(\".\")[0]+\" of 140 characters (\"+percent+\" Percent)\")\n\tdef Tweet(self, event):\n\t\tif self.edit==1:\n\t\t\ttwitter.Delete(self.id)\n\t\tstatus=twitter.Tweet(self.text.GetValue(),self.id)\n\t\tif status==True:\n\t\t\tself.Destroy()\n\tdef OnClose(self, event):\n\t\t\"\"\"App close event handler\"\"\"\n\t\tself.Destroy()\n\n\nclass QuoteGui(wx.Frame):\n\n\tglobal inittext\n\tglobal id\n\n\tdef __init__(self,i=\"\"):\n\t\tself.id=i\n\t\ta=twitter.api.get_status(self.id)\n\t\tself.inittext=a.text\n\t\twx.Frame.__init__(self, None, title=\"Tweet\", size=(350,200)) # initialize the wx frame\n\t\tself.Bind(wx.EVT_CLOSE, self.OnClose)\n\t\tself.panel = wx.Panel(self)\n\t\tself.main_box = wx.BoxSizer(wx.VERTICAL)\n\t\tself.text_label = wx.StaticText(self.panel, -1, \"Tweet Te&xt\")\n\t\tself.text = wx.TextCtrl(self.panel, -1, \"\")\n\t\tself.main_box.Add(self.text, 0, wx.ALL, 10)\n\t\tself.text2_label = wx.StaticText(self.panel, -1, \"Quoting\")\n\t\tself.text2 = wx.TextCtrl(self.panel, -1, \"\",style=wx.TE_READONLY)\n\t\tself.main_box.Add(self.text2, 0, wx.ALL, 10)\n\t\tself.text2.SetValue(self.inittext)\n\t\tself.tweet = wx.Button(self.panel, wx.ID_DEFAULT, \"&Send\")\n\t\tself.tweet.Bind(wx.EVT_BUTTON, self.Tweet)\n\t\tself.main_box.Add(self.tweet, 0, wx.ALL, 10)\n\t\tself.tweet.SetDefault()\n\t\tself.close = wx.Button(self.panel, wx.ID_CLOSE, \"&Cancel\")\n\t\tself.close.Bind(wx.EVT_BUTTON, self.OnClose)\n\t\tself.main_box.Add(self.close, 0, wx.ALL, 10)\n\t\tself.panel.Layout()\n\tdef EVT_TEXT_ENTER(self,event):\n\t\tspeak.speak(\"Boing\")\n\tdef Tweet(self, event):\n\t\ttwitter.Quote(status=self.id,text=self.text.GetValue())\n\t\tself.Destroy()\n\tdef OnClose(self, event):\n\t\t\"\"\"App close event handler\"\"\"\n\t\tself.Destroy()\n\n\nclass DMGui(wx.Frame):\n\n\tglobal user\n\tdef __init__(self,i=\"\"):\n\t\tself.user=i\n\n\t\twx.Frame.__init__(self, None, title=\"Message\", size=(350,200)) # initialize the wx frame\n\t\tself.Bind(wx.EVT_CLOSE, self.OnClose)\n\t\tself.panel = wx.Panel(self)\n\t\tself.main_box = wx.BoxSizer(wx.VERTICAL)\n\t\tself.text_label = wx.StaticText(self.panel, -1, \"Tweet Te&xt\")\n\t\tself.text = wx.TextCtrl(self.panel, -1, \"\")\n\t\tself.main_box.Add(self.text, 0, wx.ALL, 10)\n\t\tself.recip_label = wx.StaticText(self.panel, -1, \"&Recipient\")\n\t\tself.recip = wx.TextCtrl(self.panel, -1, \"\")\n\t\tself.main_box.Add(self.recip, 0, wx.ALL, 10)\n\t\tself.recip.SetValue(self.user)\n\t\tself.tweet = wx.Button(self.panel, wx.ID_DEFAULT, \"&Send\")\n\t\tself.tweet.Bind(wx.EVT_BUTTON, self.Tweet)\n\t\tself.main_box.Add(self.tweet, 0, wx.ALL, 10)\n\t\tself.tweet.SetDefault()\n\t\tself.close = wx.Button(self.panel, wx.ID_CLOSE, \"&Cancel\")\n\t\tself.close.Bind(wx.EVT_BUTTON, self.OnClose)\n\t\tself.main_box.Add(self.close, 0, wx.ALL, 10)\n\t\tself.panel.Layout()\n\tdef Tweet(self, event):\n\t\tstatus=twitter.Tweet(\"d @\"+self.recip.GetValue()+\" \"+self.text.GetValue(),0)\n\t\tif status==True:\n\t\t\tself.Destroy()\n\tdef OnClose(self, event):\n\t\t\"\"\"App close event handler\"\"\"\n\t\tself.Destroy()\n\nclass ViewGui(wx.Frame):\n\n\tdef __init__(self,text):\n\n\t\twx.Frame.__init__(self, None, title=\"View\", size=(350,200)) # initialize the wx frame\n\t\tself.Bind(wx.EVT_CLOSE, self.OnClose)\n\t\tself.panel = wx.Panel(self)\n\t\tself.main_box = wx.BoxSizer(wx.VERTICAL)\n\t\tself.text_label = wx.StaticText(self.panel, -1, \"Tweet Te&xt\",style=wx.TE_READONLY|wx.TE_MULTILINE)\n\t\tself.text = wx.TextCtrl(self.panel, -1, \"\")\n\t\tself.main_box.Add(self.text, 0, wx.ALL, 10)\n\t\tself.text.SetValue(text)\n\t\tself.close = wx.Button(self.panel, wx.ID_CLOSE, \"&Close\")\n\t\tself.close.Bind(wx.EVT_BUTTON, self.OnClose)\n\t\tself.main_box.Add(self.close, 0, wx.ALL, 10)\n\t\tself.panel.Layout()\n\tdef OnClose(self, event):\n\t\t\"\"\"App close event handler\"\"\"\n\t\tself.Destroy()","sub_path":"Twitrocity/gui/tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"646773257","text":"from flask import render_template, request, Blueprint\nfrom cloud9.models import Post\n\nmain = Blueprint('main', __name__)\n\n\n@main.route('/')\n@main.route('/home')\ndef home():\n\t# Задает первую страницу по умолчанию:\n\tpage = request.args.get('page', 1, type=int)\n\t# Загрузка постов из БД:\n\t# По пять на страницу, в порядке desc() новые посты появляются первыми\n\tposts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=5)\n\treturn render_template('home.html', posts=posts)\n\n\n@main.route('/about')\ndef about():\n return render_template('about.html', title='About')","sub_path":"cloud9/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"530646928","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom openapi_server.models.base_model_ import Model\nfrom openapi_server.models.attribute import Attribute\nfrom openapi_server import util\n\nfrom openapi_server.models.attribute import Attribute # noqa: E501\n\nclass NodeBinding(Model):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, id=None, query_id=None, attributes=None): # noqa: E501\n \"\"\"NodeBinding - a model defined in OpenAPI\n\n :param id: The id of this NodeBinding. # noqa: E501\n :type id: str\n :param query_id: The query_id of this NodeBinding. # noqa: E501\n :type query_id: str\n :param attributes: The attributes of this NodeBinding. # noqa: E501\n :type attributes: List[Attribute]\n \"\"\"\n self.openapi_types = {\n 'id': str,\n 'query_id': str,\n 'attributes': List[Attribute]\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'query_id': 'query_id',\n 'attributes': 'attributes'\n }\n\n self._id = id\n self._query_id = query_id\n self._attributes = attributes\n\n @classmethod\n def from_dict(cls, dikt) -> 'NodeBinding':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The NodeBinding of this NodeBinding. # noqa: E501\n :rtype: NodeBinding\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def id(self):\n \"\"\"Gets the id of this NodeBinding.\n\n A Compact URI, consisting of a prefix and a reference separated by a colon, such as UniProtKB:P00738. Via an external context definition, the CURIE prefix and colon may be replaced by a URI prefix, such as http://identifiers.org/uniprot/, to form a full URI. # noqa: E501\n\n :return: The id of this NodeBinding.\n :rtype: str\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this NodeBinding.\n\n A Compact URI, consisting of a prefix and a reference separated by a colon, such as UniProtKB:P00738. Via an external context definition, the CURIE prefix and colon may be replaced by a URI prefix, such as http://identifiers.org/uniprot/, to form a full URI. # noqa: E501\n\n :param id: The id of this NodeBinding.\n :type id: str\n \"\"\"\n if id is None:\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id\n\n @property\n def query_id(self):\n \"\"\"Gets the query_id of this NodeBinding.\n\n A Compact URI, consisting of a prefix and a reference separated by a colon, such as UniProtKB:P00738. Via an external context definition, the CURIE prefix and colon may be replaced by a URI prefix, such as http://identifiers.org/uniprot/, to form a full URI. # noqa: E501\n\n :return: The query_id of this NodeBinding.\n :rtype: str\n \"\"\"\n return self._query_id\n\n @query_id.setter\n def query_id(self, query_id):\n \"\"\"Sets the query_id of this NodeBinding.\n\n A Compact URI, consisting of a prefix and a reference separated by a colon, such as UniProtKB:P00738. Via an external context definition, the CURIE prefix and colon may be replaced by a URI prefix, such as http://identifiers.org/uniprot/, to form a full URI. # noqa: E501\n\n :param query_id: The query_id of this NodeBinding.\n :type query_id: str\n \"\"\"\n\n self._query_id = query_id\n\n @property\n def attributes(self):\n \"\"\"Gets the attributes of this NodeBinding.\n\n A list of attributes providing further information about the node binding. This is not intended for capturing node attributes and should only be used for properties that vary from result to result. # noqa: E501\n\n :return: The attributes of this NodeBinding.\n :rtype: List[Attribute]\n \"\"\"\n return self._attributes\n\n @attributes.setter\n def attributes(self, attributes):\n \"\"\"Sets the attributes of this NodeBinding.\n\n A list of attributes providing further information about the node binding. This is not intended for capturing node attributes and should only be used for properties that vary from result to result. # noqa: E501\n\n :param attributes: The attributes of this NodeBinding.\n :type attributes: List[Attribute]\n \"\"\"\n\n self._attributes = attributes\n","sub_path":"code/UI/OpenAPI/python-flask-server/openapi_server/models/node_binding.py","file_name":"node_binding.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"339873477","text":"import json\n\ndef fmore_learning_course() :\n course_code=input(\"강의코드 (예: IB171106, OB0104 ..): \")\n course_name=input(\"강의명 (예: IOT 빅데이터 실무반): \")\n teacher=input(\"강사 (예: 이현구): \")\n open_date=input(\"개강일 (예: 2017-12-28): \")\n close_date=input(\"종료일 (예: 2018-06-26): \")\n learning_course_list.append(fdic_for_learning_course(close_date,open_date,course_code,course_name,teacher))\n \ndef fdic_for_learning_course(close_date,open_date,course_code,course_name,teacher):\n dic={}\n dic[\"close_date\"]=close_date\n dic[\"open_date\"]=open_date\n dic[\"course_code\"]=course_code\n dic[\"course_name\"]=course_name\n dic[\"teacher\"]=teacher\n return dic\n\ndef fdic_for_total_course(list_learnig,num_of_learned_course):\n dic={}\n dic[\"learning_course_info\"]=list_learnig\n dic[\"num_of_course_learned\"]=num_of_learned_course\n return dic\n\ndef fdic_for_per_std(std_addr,std_id,std_age,std_name,total_course_dic):\n dic={}\n dic[\"address\"] =std_addr\n dic[\"student_ID\"] =std_id\n dic[\"student_age\"] =std_age\n dic[\"student_name\"] =std_name\n dic[\"total_course_info\"] = total_course_dic\n return dic\ndef fput_std_dic():\n total_course_dic=fdic_for_total_course(learning_course_list,num_of_course_learned)\n std_dic=fdic_for_per_std(std_addr,std_id,std_age,std_name,total_course_dic)\n course_system_list.append(std_dic)\n print(course_system_list)\n \n \n###############search\ndef fread_std(i):\n global course_system_list\n print(\"*학생 ID: \"+course_system_list[i].get('student_ID'))\n print(\"*학생 이름: \"+course_system_list[i].get('student_name'))\n print(\"*나이: \"+str(course_system_list[i].get('student_age')))\n print(\"*주소: \"+course_system_list[i].get('address'))\n print(\"*수강정보: \")\n print(\" +과거 수강 횟수: \"+str(course_system_list[i].get('total_course_info').get('num_of_course_learned')))\n if len(course_system_list[i].get('total_course_info').get('learning_course_info'))==0:\n print(\" +현재 수강 과목: 0 \")\n else :\n for j in range(len(course_system_list[i].get('total_course_info').get('learning_course_info'))):\n print(\" 강의코드: \"+str(course_system_list[i].get('total_course_info').get('learning_course_info')[j].get('course_code')))\n print(\" 강의 명: \"+course_system_list[i].get('total_course_info').get('learning_course_info')[j].get('course_name'))\n print(\" 강사: \"+course_system_list[i].get('total_course_info').get('learning_course_info')[j].get('teacher'))\n print(\" 개강일: \"+course_system_list[i].get('total_course_info').get('learning_course_info')[j].get('open_date'))\n print(\" 종료일: \"+course_system_list[i].get('total_course_info').get('learning_course_info')[j].get('close_date'))\n \ndef fread_std_sumary(index) :\n print(\"복수의 결과가 검색되었습니다.\\n ----- 요약 결과 ----- \")\n for i in index :\n print(\">> 학생 ID: \"+course_system_list[i].get('student_ID') +\", 학생 이름: \"+course_system_list[i].get('student_name'))\n \ndef fsearch(parameter) :\n global index\n search=input(str_search)\n for i in range (len(course_system_list)):\n if course_system_list[i].get(parameter).find(search) >=0:\n index.append(i)\n print(index)\n if len(index) ==1 :\n fread_std(index[0])\n if len(index) >=2 :\n fread_std_sumary(index)\n index=[]\n \ndef fsearch_on_course(parameter1,parameter2):\n global index\n search=input(str_search)\n for i in range(len(course_system_list)):\n for j in range(len(course_system_list[i].get('total_course_info').get(parameter1))):\n if course_system_list[i].get('total_course_info').get(parameter1)[j].get(parameter2).find(search) >=0 :\n index.append(i)\n for i in index :\n fread_std(i)\n print()\n index=[]\n \n \n####update\ndef fupdate_std(parameter):\n global index_from_id\n print(\"현재값: \"+course_system_list[index_from_id].get(parameter))\n change=input(\"바꾸실 값을 입력하세요: \")\n course_system_list[index_from_id][parameter]=change\n \ndef fupdate_course(parameter):\n global index_from_id\n if parameter ==\"num_of_course_learned\":\n print(\"현재값: \"+course_system_list[index_from_id].get('total_course_info').get(parameter))\n change=input(\"바꾸실 값을 입력하세요: \")\n course_system_list[index_from_id].get('total_course_info')[parameter]=change\n elif parameter !=\"num_of_course_learned\":\n print(\"현재값 :\",end='')\n for j in range(len(course_system_list[index_from_id].get('total_course_info').get('learning_course_info'))) :\n print(course_system_list[index_from_id].get('total_course_info').get('learning_course_info')[j].get(parameter),end='')\n print()\n if len(course_system_list[index_from_id].get('total_course_info').get('learning_course_info')) == 1 :\n change=input(\"바꾸실 값을 입력하세요: \")\n course_system_list[index_from_id].get('total_course_info').get('learnig_course_info')[0][parameter]=change\n elif len(course_system_list[index_from_id].get('total_course_info').get('learning_course_info')) >1 :\n change_num = int(input(\"몇 번째 값을 바꾸시겠습니까? (예: 2) : \"))\n change_num=-1\n change=input(\"바꾸실 값을 입력하세요: \")\n course_system_list[index_from_id].get('total_course_info').get('learnig_course_info')[change_num][parameter]=change\n\n \npath='.'\nfile='\\\\ITT_Student.json'\nwhile True :\n try :\n with open(path+file,'r',encoding='utf8') as ITT :\n ITT_data=json.load(ITT)\n course_system_list=ITT_data\n break\n except :\n print(\"파일을 찾을 수 없습니다. 아래 메뉴를 선택하세요\")\n sel_fileerr_num=int(input(\"1.현재 경로에 새 파일 생성 \\n2.경로 변경해서 다시 찾기 \"))\n if sel_fileerr_num==1:\n f=open(path+file,'w')\n f.close()\n course_system_list=[]\n break\n elif sel_fileerr_num ==2 :\n path = input(\"변경된 경로를 입력하세요 (예 : c:\\\\user\\\\USER25\\\\Desktop) :\")\n\nstr_start_menu =\"\"\"1. 학생 정보입력\\n2. 학생 정보조회\\n3. 학생 정보수정\\n4. 학생 정보삭제\\n5. 프로그램 종료\\n메뉴를 선택하세요: \"\"\"\nstr_read_menu =\"\"\"아래 메뉴를 선택하세요\\n1. 전체 학생정보 조회\\n--- 검색 조건 선택 ---\\n2. ID검색\\n3. 이름 검색\\n4. 나이 검색\\n5. 주소 검색\\n6. 과거 수강 횟수 검색\\n7. 현재 강의를 수강중인 학생\\n8. 현재 수강중인 강의명\\n9. 현재 수강 강사\\n10. 이전 메뉴\\n메뉴를 선택하세요: \"\"\"\nstr_update_menu=\"1. 학생 이름\\n2. 나이\\n3. 주소\\n4. 과거 수강 횟수\\n5. 현재 수강 중인 강의 정보\\n0. 이전 메뉴\\n메뉴 번호를 입력하세요: \"\nstr_update_munu_5=\"1. 강의 코드\\n2. 강의명\\n3. 강사\\n4. 개강일\\n5. 종료일\\n0. 취소\\n메뉴 번호를 입력하세요: \"\nstr_delete_menu=\"삭제 유형을 선택하세요.\\n1. 전체 삭제\\n2. 현재 수강 중인 특정 과목정보 삭제\\n3. 이전 메뉴\\n메뉴 번호를 선택하세요: \"\nstr_search=\"검색어를 입력하세요 :\"\n\nstd_dic={}\ntotal_course_dic={}\nlearning_course_list=[]\nid_count=len(course_system_list)\nindex=[]\ncount=0\nwhile True:\n print(''*20+'<>')\n sel_start_menu_num=int(input(str_start_menu))\n if sel_start_menu_num==5: #프로그램 종료\n break\n elif sel_start_menu_num==1: #학생 정보 입력\n id_count+=1\n std_id='ITT{0:0>3}'.format(id_count)\n std_name=input(\"이름 (예: 홍길동):\")\n std_age=input(\"나이 (예: 29):\")\n std_addr=input(\"주소 (예: 대구광역시 동구 아양로 135): \")\n num_of_course_learned=int(input(\"과거 수강 횟수 (예: 1): \"))\n learning_course_yN=input(\"현재 수강 과목이 있습니까? (예: y/n): \")\n if learning_course_yN=='y' :\n fmore_learning_course()\n elif learning_course_yN=='n' :\n learning_course_list.append({})\n while True:\n anoter_leaning_course=input(\"현재 수강하는 과목이 더 있습니까? (y/n): \")\n if anoter_leaning_course=='y':\n fmore_learning_course()\n elif anoter_leaning_course=='n':\n break\n fput_std_dic()\n continue\n elif sel_start_menu_num==2: #학생 정보 조회\n sel_read_menu_num=int(input(str_read_menu))\n if sel_read_menu_num==10 :\n print()\n continue\n elif sel_read_menu_num==1 : #전체 학생정보 조회\n for i in range(len(course_system_list)):\n fread_std(i)\n elif sel_read_menu_num==2 : #id검색\n fsearch(\"student_ID\")\n elif sel_read_menu_num==3 : #이름검색\n fsearch(\"student_name\")\n elif sel_read_menu_num==4 : #나이검색\n fsearch(\"student_age\")\n elif sel_read_menu_num==5 : #주소검색\n fsearch(\"address\")\n elif sel_read_menu_num==6 : #과거 수강횟수검색\n fsearch(\"num_of_course_learned\")\n elif sel_read_menu_num==7 : #현재 강의 수강중인 학생 검색\n for i in range(len(course_system_list)):\n if len(course_system_list[i].get('total_course_info').get('learning_course_info'))!=0:\n index.append(i)\n print(index)\n print(\"현재 강의를 수강 중인 학생: \" ,end='')\n for idx in index:\n print('['+course_system_list[idx].get('student_name')+']',end='')\n index=[]\n print()\n elif sel_read_menu_num==8 : #현재 운영중인 강의명\n fsearch_on_course(\"learning_course_info\",'course_name')\n elif sel_read_menu_num==9 : #현재 수업진행중인 강사\n fsearch_on_course(\"learning_course_info\",'teacher')\n elif sel_start_menu_num==3: # 학생 정보 변경\n update_std_id_input=input('수정할 학생 ID를 입력하세요: ')\n sel_update_menu_num=int(input(str_update_menu))\n\n for i in range(len(course_system_list)):\n if course_system_list[i].get('student_ID')==update_std_id_input :\n index_from_id=i\n\n if sel_update_menu_num==0: # 이전 메뉴\n print()\n continue\n elif sel_update_menu_num==1: #이름\n fupdate_std('student_name')\n elif sel_update_menu_num==2: #나이\n fupdate_std('student_age')\n elif sel_update_menu_num==3: #주소\n fupdate_std('address')\n elif sel_update_menu_num==4: #과거 수강횟수\n fupdate_course('num_of_course_learned')\n elif sel_update_menu_num==5: #현재 수강 과목\n sel_update_menu_5_num =int(input(str_update_munu_5))\n if sel_update_menu_5_num == 0: continue #취소\n elif sel_update_menu_5_num==1:\n fupdate_course('course_code')\n elif sel_update_menu_5_num==2:\n fupdate_course('course_name')\n elif sel_update_menu_5_num==3:\n fupdate_course('teacher')\n elif sel_update_menu_5_num==4:\n fupdate_course('open_date')\n elif sel_update_menu_5_num==5:\n fupdate_course('close_date')\n\n\n elif sel_start_menu_num==4: #학생 정보 삭제\n del_std_id_input=input(\"삭제할 학생ID를 입력하세요: \")\n sel_delete_menu_num=int(input(str_delete_menu))\n\n for i in range(len(course_system_list)):\n if course_system_list[i].get('student_ID')==del_std_id_input :\n index_from_id=i\n\n if sel_delete_menu_num==1:\n del course_system_list[index_from_id]\n if sel_delete_menu_num==2:\n print(\"아이디 \"+del_std_id_input+\", 이름 \"+course_system_list[index_from_id].get('student_name')+\" 학생의 현재 수강 중인 과목 정보 삭제\")\n for j in range(len(course_system_list[index_from_id].get('total_course_info').get('learning_course_info'))) :\n print(course_system_list[index_from_id].get('total_course_info').get('learning_course_info')[j].get('course_name'))\n if len(course_system_list[index_from_id].get('total_course_info').get('learning_course_info')) ==1:\n print('#### '+course_system_list[index_from_id].get('total_course_info').get('learning_course_info')[0].get('course_name')+' 수업이 삭제되었습니다 ####')\n del course_system_list[index_from_id].get('total_course_info').get('learning_course_info')[0]\n print()\n elif course_system_list[index_from_id].get('total_course_info').get('learning_course_info') > 1 :\n del_subject_num_input=int(input('몇 번째 과목을 삭제 하시겠습니까?'))\n del_subject_num_input-=1\n print('#### '+course_system_list[index_from_id].get('total_course_info').get('learning_course_info')[del_subject_num_input].get('course_name')+' 수업이 삭제되었습니다 ####')\n del course_system_list[index_from_id].get('total_course_info').get('learning_course_info')[del_subject_num_input]\n\n if sel_delete_menu_num==3:\n print()\n continue\n pass\nwith open(path+file,'w',encoding='utf8') as outfile :\n retJson= json.dumps(course_system_list,indent=4,sort_keys=True,ensure_ascii=False)\n outfile.write(retJson)\n","sub_path":"05_졸업작품/학생_정보_시스템/학생정보 시스템(딕셔너리 중첩)_ver2.py","file_name":"학생정보 시스템(딕셔너리 중첩)_ver2.py","file_ext":"py","file_size_in_byte":13755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"538333580","text":"from django.urls import path\nfrom wms import views, services, prints\nfrom interfacing import views as interface_view\n\nurlpatterns = []\n\n# Common\nurlpatterns += [\n path('list_of_pharmaceutical_suppliers/', services.ListOfPharmaceuticalSuppliers.as_view()),\n path('list_of_product_combo/', services.ListOfProducts.as_view()),\n path('list_of_logistic_flew_combo/', services.ListOfLogisticFlewTypes.as_view()),\n path('list_of_from_store_authorized_combo/', services.ListOfFromStoreAuthorized.as_view()),\n path('list_of_to_store_authorized_combo/', services.ListOfFromStoreAuthorized.as_view()),\n path('list_of_emplacement_combo/', services.ListOfEmplacements.as_view()),\n path('list_of_product_status_combo/', services.ListOfProductStatus.as_view()),\n path('workload_statistics/', services.WorkloadStatistics.as_view()),\n path('upload_invoice_scan/', views.upload_invoice_scan),\n\n]\n\n# Purchase\nurlpatterns += [\n path('list_of_pharmaceutical_purchases_details/', services.ListOfPurchaseDetails.as_view()),\n path('list_of_pharmaceutical_purchases_details_tempo/', services.ListOfPurchaseDetailsTempo.as_view()),\n path('list_of_pharmaceutical_purchases/', services.ListOfPurchase.as_view()),\n path('pharmaceutical_purchases_transactions/', views.pharmaceutical_purchase_transactions),\n path('pharmaceutical_purchases_history/', views.pharmaceutical_purchase_history),\n path('print_pharmaceutical_purchase_receipt/', prints.print_pharmaceutical_purchase_receipt),\n path('upload_purchase_receipt_scan/', views.upload_purchase_receipt_scan),\n]\n\n# Purchase confirmation\nurlpatterns += [\n path('pharmaceutical_purchase_confirmation/', views.pharmaceutical_purchase_confirmation),\n path('list_of_temporary_executors_for_pharmaceutical_purchase/',\n services.ListOfTemporaryExecutorsForPharmaceuticalPurchase.as_view()),\n]\n\n# Invoices management\nurlpatterns += [\n path('pharmaceutical_invoices_view/', views.pharmaceutical_invoices),\n path('pharmaceutical_invoices_history/', services.HistoryOfPharmaceuticalInvoices.as_view()),\n path('list_of_pharmaceutical_invoices_combo/', services.ListOfPharmaceuticalInvoices.as_view()),\n\n]\n\n# Inventory reports\nurlpatterns += [\n path('inventory_by_emplacement_view', views.inventory_by_emplacement),\n path('inventory_by_emplacement', services.InventoryByEmplacement.as_view()),\n path('export_inventory_by_emplacement', services.ExportInventoryByEmplacement.as_view()),\n]\n\n# Transfer\nurlpatterns += [\n path('list_of_transfer_details/', services.ListOfTransferDetails.as_view()),\n path('list_of_transfer_details_tempo/', services.ListOfTransferDetailsTempo.as_view()),\n path('list_of_transfer/', services.ListOfTransfer.as_view()),\n path('transfer_transactions/', views.transfer_transactions),\n path('inventory_for_transfer/', services.ListOfAvailableProductsForTransfer.as_view()),\n path('transfer_history/', views.transfer_history),\n path('print_transfer_order/', prints.print_transfer_order)\n]\n\n# Transfer confirmation\nurlpatterns += [\n path('warehouse_transfer_confirmation/', views.warehouse_transfer_confirmation),\n path('list_of_temporary_executors/', services.ListOfTemporaryExecutorsForTransfer.as_view()),\n\n]\n\n# External Transfer\nurlpatterns += [\n path('list_of_external_transfer_details/', services.ListOfExternalTransferDetails.as_view()),\n path('list_of_external_transfer_details_tempo/', services.ListOfExternalTransferDetailsTempo.as_view()),\n path('list_of_external_transfer/', services.ListOfExternalTransfer.as_view()),\n path('external_transfer_transactions/', views.external_transfer_transactions),\n path('external_transfer_history/', views.external_transfer_history),\n path('print_external_transfer_order/', prints.print_external_transfer_order)\n]\n\n# External Transfer confirmation, shipping and reception\nurlpatterns += [\n path('external_transfer_confirmation/', views.external_transfer_confirmation),\n path('external_transfer_shipping/', views.external_transfer_shipping),\n path('external_transfer_reception/', views.external_transfer_reception),\n path('list_of_temporary_executors_for_external_transfer/',\n services.ListOfTemporaryExecutorsForExternalTransfer.as_view()),\n path('export_external_transfer_data/', services.ExportExternalTransferData.as_view()),\n path('upload_external_transfer/', interface_view.upload_external_transfer),\n]\n","sub_path":"wms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"626608971","text":"import random\n\nimport matplotlib.pyplot as plt\n\n## Part d\nfrom scipy import stats\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# 1.c Simple Random Walk\ndef get_next_position(current_position, step, length, boundary_conditions):\n\n\tif boundary_conditions == \"unlimited\":\n\t\treturn current_position + step\n\n\telif boundary_conditions == \"periodic\":\n\t\t# Subtract and then add 1 to deal with python indexing at 0 while still making use of modulus function\n\t\treturn (current_position + step-1)%(length) + 1\n\t\n\telif boundary_conditions == \"reflecting\":\n\t\tif current_position == length:\n\t\t\treturn length -1\n\t\telif current_position == 1:\n\t\t\treturn 2\t\n\t\telse:\n\t\t\treturn current_position + step\n\t\t\n\telif boundary_conditions == \"closed\":\n\t\tnext_position = current_position + step\n\t\tif next_position > length:\n\t\t\treturn length\n\t\telif next_position < 1:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn next_position\n\n\telif boundary_conditions == \"absorbing\":\n\t\tif current_position == length:\n\t\t\treturn length\n\t\telif current_position == 1:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn current_position + step\n\n\ndef simple_random_walk(p, total_steps, length, boundary_conditions):\n\n\tpositions = [1]\n\tsteps = []\n\n\tfor i in range(total_steps):\n\t\tif random.random() < p:\n\t\t\tstep = 1\n\t\telse:\n\t\t\tstep = -1\n\n\t\tsteps.append(step)\n\t\tcurrent_position = positions[i]\n\t\tnext_position = get_next_position(current_position, step, length, boundary_conditions)\n\t\tpositions.append(next_position)\n\n\treturn positions\n\n\ndef plot_distribution_500_realisations(total_steps):\n\n\tp = 0.65\n\trealisations = 500\n\tobservations = np.zeros(realisations)\n\tfor i in range(realisations):\n\t\tfinal_position = simple_random_walk(p, total_steps, 10, \"closed\")[-1]\n\t\tobservations[i] = final_position\n\n\n\tscaled_observations = (observations-1) * 10/9\n\n\t\n\tstates = [1,2,3,4,5,6,7,8,9,10]\n\tstat_dist = [(p/(1-p))**x for x in states]\n\ttotal_stat_dist = sum(stat_dist)\n\tstat_dist_norm = [x/total_stat_dist for x in stat_dist]\n\tplt.hist(scaled_observations, density=True, bins=10, label=\"Empirical density distribution\")\n\tplt.plot(states, stat_dist_norm, 'ro', label=\"Theoretical stationary distribution\")\n\t\n\tplt.title('Closed SRW - {} timesteps, L=10, p=0.65'.format(total_steps,p), fontsize = 16)\n\n\tplt.legend()\n\tplt.xlabel('x', fontsize = 20)\n\tplt.ylabel('frequency', fontsize = 20)\n\n\tplt.savefig('{}_steps.png'.format(total_steps))\n\t\n\tplt.show()\n\n\n\ndef plot_distribution_1_realisation_500_steps(title):\n\n\n\tp = 0.65\n\n\tpositions = simple_random_walk(p, 500, 10, \"closed\")\n\tnp_positions = np.array(positions)\n\tscaled_positions = (np_positions-1) * 10/9\n\n\n\tplt.hist(scaled_positions, density=True, label=\"Empirical density distribution\")\n\n\tstates = [1,2,3,4,5,6,7,8,9,10]\n\tstat_dist = [(p/(1-p))**x for x in states]\n\ttotal_stat_dist = sum(stat_dist)\n\tstat_dist_norm = [x/total_stat_dist for x in stat_dist]\n\tplt.plot(states, stat_dist_norm, 'ro',label=\"Theoretical stationary distribution\")\n\t\n\tplt.legend(loc=2, prop={'size': 6})\n\n\tplt.title('Closed SRW. 1 realisation. 500 timesteps, L=10, p=0.65')\n\n\tplt.xlabel('x', fontsize = 10)\n\tplt.ylabel('frequency', fontsize = 12)\n\tplt.savefig('500_steps.png')\n\n\tplt.show()\n\nplot_distribution_1_realisation_500_steps(1)\n\ndef plot_4_realisations():\n\n\tplt.rcParams['figure.figsize'] = [10, 6]\n\n\tplt.figure(1)\n\tplt.suptitle('Closed SRW. 4 seperate realisations. 500 timesteps, L=10, p=0.6', fontsize = 12)\n\n\tplt.subplot(221)\t\n\tplt.axis((0,10,0,0.5))\n\tplot_distribution_1_realisation_500_steps(\"a\")\n\tplt.subplot(222)\n\n\tplt.axis((0,10,0,0.5))\n\n\tplot_distribution_1_realisation_500_steps(\"b\")\n\tplt.subplot(223)\n\n\tplt.axis((0,10,0,0.5))\n\n\tplot_distribution_1_realisation_500_steps(\"c\")\n\tplt.subplot(224)\n\n\tplt.axis((0,10,0,0.5))\n\n\tplot_distribution_1_realisation_500_steps(\"d\")\n\n\tplt.savefig(\"500_steps_4.png\")\n\tplt.show()\n\n\n\ndef one_realisation(mu, sigma, tmax):\n\n\t#np.random.seed(12)\n\n\n\t#Generate sequences of random variables\n\tx_sequence = np.random.normal(mu, sigma, tmax)\n\t#x_sequence = np.random.randn(tmax)*sigma + mu # How \n\ty_sequence = np.cumsum(x_sequence)\n\tz_sequence = np.exp(y_sequence)\n\n\treturn y_sequence, z_sequence\n\t\n\t\ndef plot_y_and_z(mu, sigma, tmax):\n\t\n\t\n\t\n\ty_sequence, z_sequence = one_realisation(mu, sigma, tmax)\n\t\n\tplt.plot(range(tmax), z_sequence, label = r'$Z_n$')\n\tplt.plot(range(tmax), y_sequence, label = r'$Y_n$')\n\tplt.legend(loc = 'upper left', fontsize = 20)\n\tplt.xlabel('n', fontsize=20)\n\tplt.show()\n\n\ndef empirical_results(mu, sigma, tmax, realisations=500):\n\t\n\tresults = np.zeros((realisations,tmax))\n\t\n\tfor realisation in range(realisations):\n\t\tresults[realisation,:] = one_realisation(mu,sigma,tmax)[1]\n\n\treturn results\n\n\ndef plot_empirical_results():\n\n\tmu = 0\n\tsigma = 0.2\n\ttmax = 100\n\t\n\n\tplt.rcParams['figure.figsize'] = [10, 6]\n\n\tresults = empirical_results(mu,sigma,tmax)\n\t\t\n\tempirical_averages = results.mean(axis=0)\n\tempirical_sds = results.std(axis=0)/np.sqrt(500)\n\t\n\tplt.figure(0)\n\tplt.errorbar(range(tmax), empirical_averages, yerr=empirical_sds, label = r'Emprical Average and standard deviation')\n\t\n\tplt.legend(loc=\"upper left\")\n\tplt.xlabel('n', fontsize=20)\n\tplt.ylabel(r'$Z_n$', fontsize=20)\n\n\tplt.title(r'Emprical Average of $Z_n$. 500 realisations, $\\mu_x=0$, $\\sigma_x=0.2$')\n\n\tplt.savefig('Emprical Average.png')\n\n\n\n\t\n\tplt.show()\n\t\n\n\ndef box_plots():\n\t\n\n\tmu = 0\n\tsigma = 0.2\n\ttmax = 100\n\t\n\n\tplt.rcParams['figure.figsize'] = [10, 6]\n\n\n\tresults = empirical_results(mu,sigma,tmax)\n\t\n\ttimestep_10_results = results[:,[9]]\n\t\n\tf, (ax1, ax2) = plt.subplots(1, 2)\n\t\n\tax1.boxplot(timestep_10_results)\n\tax1.set_yscale(\"log\")\n\tax1.set_title(\"n=10\")\n\t\t\n\tax1.set_ylabel(r'$log(Z_n)$')\n\n\tax2.boxplot(results[:,[99]])\n\t#ax2.boxplot(results[:,[99]], whis=100) # Plot whiskers right up to all outlying points\n\tax2.set_title(\"n=100\")\t\n\n\tax2.set_ylabel(r'$log(Z_n)$')\n\tax2.set_yscale(\"log\")\n\n\n\tplt.suptitle(r\"Box plots of $Z_n$ at n=10 and n=100. $\\mu_x=0$, $\\sigma_x=0.2$\")\n\tplt.savefig('box_plots.png')\n\n\n\tplt.show()\n\n\ndef empirical_pdf_at_timestep_original(timestep):\n\n\n\tmu = 0\n\tsigma = 0.2\n\ttmax = 100\n\t\n\t\n\tresults = empirical_results(mu,sigma,tmax)\n\ttimestep_results = results[:,[timestep-1]]\n\t#plt.hist(timestep_results, density=True)\n\t\n\t# KDE plot 1\n\tkde = stats.gaussian_kde(timestep_results.reshape(500))\n\txx = np.linspace(0,10,1000)\n\t#plt.plot(xx, kde(xx))\n\t\n\n\t# Theoretical plot 1\n\trv = stats.lognorm([(sigma)*(timestep**0.5)], scale=np.exp(timestep*mu))\n\tplt.plot(xx, rv.pdf(xx))\n\n\n\t# KDE plot 2\n\tsns.kdeplot(timestep_results.reshape(500), gridsize=10000 )\n\t\n\tx1,x2,y1,y2 = plt.axis()\n\tplt.axis((0,10,y1,y2))\n\t\n\n\tplt.show()\n\t\ndef empirical_pdf_at_timestep(timestep):\n\n\n\tmu = 0\n\tsigma = 0.2\n\ttmax = 100\n\n\tplt.rcParams['figure.figsize'] = [10, 6]\n\t\n\tresults = empirical_results(mu,sigma,tmax, 5000)\n\ttimestep_results = results[:,[timestep-1]]\n\t#plt.hist(timestep_results, density=True)\n\t\n\t# KDE plot 1\n\tkde = stats.gaussian_kde(timestep_results.reshape(5000))\n\txx = np.linspace(0,10,1000)\n\t#plt.plot(xx, kde(xx))\n\t\n\n\t# Theoretical plot 1\n\trv = stats.lognorm([(sigma)*(timestep**0.5)], scale=np.exp(timestep*mu))\n\tplt.plot(xx, rv.pdf(xx), label=\"Theoretical Distribution\")\n\n\t# KDE plot 2\n\tsns.kdeplot(timestep_results.reshape(5000), gridsize=10000, label=\"Empirical KDE\")\n\t\n\tif timestep > 10:\n\t\tx1,x2,y1,y2 = plt.axis()\n\t\tplt.axis((-1,10,0,0.1))\n\n\tplt.xlabel(r'$Z_n$')\n\tplt.ylabel(\"Probablity Density\")\n\tplt.title(r\"Emprical KDE and Theoretical PDF of $Z_n$ at n={}. $\\mu_x=0, \\sigma_x=0.2$\".format(timestep))\n\tplt.legend()\n\n\t#plt.savefig('emprical_pdf_kde_{}.png'.format(timestep))\n\n\tplt.show()\n\n\ndef empirical_pdf_at_timestep_b(timestep):\n\n\tmu = 0\n\tsigma = 0.2\n\ttmax = 100\n\n\n\t\n\tresults = empirical_results(mu,sigma,tmax)\n\ttimestep_results = results[:,[timestep-1]]\n\t#plt.hist(timestep_results, density=True)\n\t\n\t# KDE plot 1\n\tkde = stats.gaussian_kde(timestep_results.reshape(500))\n\txx = np.linspace(0,10,1000)\n\t#plt.plot(xx, kde(xx))\n\t\n\t# KDE plot 2\n\tsns.kdeplot(timestep_results.reshape(500), gridsize=10000 )\n\t\n\t# Theoretical plot 1\n\trv = stats.lognorm([(sigma)*(timestep**0.5)], scale=np.exp(timestep*mu))\n\tplt.plot(xx, rv.pdf(xx))\n\t\n\tx1,x2,y1,y2 = plt.axis()\n\tplt.axis((0,10,0,0.4))\n\n\tplt.show()\n\n\n\ndef log_normal_pdf(z_input, mu, sigma, timestep):\n\n\tpdf_sequence = []\n\t\n\tfor z in z_input:\n\t\t\n\t\tif z == 0:\n\t\t\tpdf_sequence.append(0)\n\t\telse:\n\t\t\texp_numerator = -1*((math.log(z) - timestep*mu)**2)\n\t\t\texp_denominator = 2 * timestep * sigma**2\n\t\t\tanswer = (1/z) * 1/(sigma) * 1/((2*math.pi*timestep)**0.5)* np.exp(exp_numerator/exp_denominator)\n\t\t\tpdf_sequence.append(answer)\n\treturn pdf_sequence\n\t\ndef ergodic_average():\n\t\n\tmu = 0\n\tsigma = 0.2\n\ttmax = 100\n\n\tresult = one_realisation(mu,sigma,tmax)[1]\n\t\n\tergodic_totals = np.cumsum(result)\n\tn_sequence = np.array(range(tmax))+1\n\tergodic_averages = np.divide(ergodic_totals, n_sequence)\n\t\n\tplt.plot(range(tmax), ergodic_averages)\n\n\tplt.ylabel(r'$\\bar{\\mu}_N$')\n\tplt.xlabel(\"N\")\n\tplt.title(r\"Ergodic Average of $Z_n$ for 1 realisation. $\\mu_x=0, \\sigma_x=0.2$\")\n\t\n\tplt.savefig('ergodic_average.png')\t\n\n\t\n\tplt.show()\n\n\t\t\ndef empirical_tail(timestep):\n\t\n\tmu = -0.02\n\tsigma = 0.2\n\ttmax = 100\n\n\tresults = empirical_results(mu,sigma,tmax)\n\t\n\ttimestep_results = results[:,[timestep-1]].reshape(500,)\n\t\n\tdata_size=len(timestep_results)\n\n\t# Set bins edges\n\tdata_set=sorted(set(timestep_results))\n\tbins=np.append(data_set, data_set[-1]+1)\n\n\t# Use the histogram function to bin the data\n\tcounts, bin_edges = np.histogram(timestep_results, bins=bins, density=False)\n\n\tcounts=counts.astype(float)/data_size\n\n\t# Find the cdf\n\tcdf = np.cumsum(counts)\n\n\t# Plot the cdf\n\tplt.plot(bin_edges[0:-1], np.ones(len(cdf))-cdf,linestyle='--', marker=\"o\", color='b', markersize=1, label=\"Empirical Tail\")\n\tplt.ylim((0,1))\n\tplt.ylabel(\"1-CDF\")\n\tplt.xlabel(r\"$Z_n$\")\n\tplt.yscale(\"linear\")\n\tplt.xscale(\"log\") # Q _ what axis scales are most informative?\n\tplt.grid(True)\n\n\n\trv = stats.lognorm([(sigma)*(timestep**0.5)], scale=np.exp(timestep*mu))\n\tplt.plot(bin_edges[0:-1], rv.sf(bin_edges[0:-1]), color=\"red\", label=\"Theoretical Survival Function\")\n\tplt.legend()\n\n\tplt.title(r\"Empirical Tail of $Z_n$ at n={}. 500 realisations, $\\mu_x=-0.02, \\sigma_x=0.2$\".format(timestep))\n\tplt.savefig('empirical_tail_{}.png'.format(timestep))\t\n\n\tplt.show()\n","sub_path":"Assignment 1/assignment_1.py","file_name":"assignment_1.py","file_ext":"py","file_size_in_byte":10193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"654195917","text":"#Program prints numbers from list smaller than 5\n\nlist = [1, 5, 2, 1, 4, 7, 8, 99, 3, 55, 22, 33, 421, 68, 12, 1, 3, 9, 3]\nnewList =[]\nuserList = []\nlistLength = list.__len__()\ncount = 0\nfor number in list:\n if(number<5):\n newList.append(number)\n\nprint('List length: ', listLength)\nprint('Numbers less than 5:', newList)\n\nuserNumber = input('Smaller than which value you want to write numbers?')\nuserNumber = int(userNumber)\n\nfor number in list:\n if(number len(X_train) - FLAGS.batch_size:\n np.random.shuffle(indices)\n start_idx = 0\n feed_dict = {x: X_train[excerpt], y_: y_train[excerpt]}\n# feed.dict.update(network.all_drop) \n yield feed_dict\n\n\ndef get_validate_data(x, y_, FLAGS):\n X_train, y_train, X_val, y_val, X_test, y_test = \\\n tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1), path=\"./data/\")\n \n X_train = np.asarray(X_train, dtype=np.float32) \n y_train = np.asarray(y_train, dtype=np.int64)\n X_val = np.asarray(X_val, dtype=np.float32)\n y_val = np.asarray(y_val, dtype=np.int64)\n X_test = np.asarray(X_test, dtype=np.float32)\n y_test = np.asarray(y_test, dtype=np.int64)\n for this_X_val, this_y_val in tl.iterate.minibatches(\n X_val, y_val,\n batch_size=FLAGS.batch_size,\n shuffle=True):\n feed_dict = {x: this_X_val, y_: this_y_val}\n yield feed_dict\n\ndef inference(FLAGS):\n x = tf.placeholder(tf.float32,\n shape=[FLAGS.batch_size, 28, 28, 1],\n name=\"x\")\n y_ = tf.placeholder(tf.int64,\n shape=[FLAGS.batch_size,],\n name=\"y_\")\n \n network = tl.layers.InputLayer(x, name=\"input_layer\")\n network = tl.layers.Conv2dLayer(network,\n act=tf.nn.relu,\n shape=[5, 5, 1, 32],\n strides=[1, 1, 1, 1],\n padding=\"SAME\",\n name=\"cnn_layer_0\")\n network = tl.layers.PoolLayer(network,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n pool=tf.nn.max_pool,\n name=\"pool_layer_0\")\n network = tl.layers.Conv2dLayer(network,\n act=tf.nn.relu,\n shape=[5, 5, 32, 64],\n strides=[1, 1, 1, 1],\n padding=\"SAME\",\n name=\"cnn_layer_1\")\n network = tl.layers.PoolLayer(network,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n pool=tf.nn.max_pool,\n name=\"pool_layer_1\")\n network = tl.layers.FlattenLayer(network, name=\"flatten_layer\")\n# network = tl.layers.DropoutLayer(network, keep=0.5, name=\"drop_0\")\n network = tl.layers.DenseLayer(network, n_units=256,\n act=tf.nn.relu, name=\"dense_layer_1\")\n# network = tl.layers.DropoutLayer(network, keep=0.5, name=\"drop_1\")\n network = tl.layers.DenseLayer(network, n_units=10,\n act=tf.identity, name=\"output_layer\")\n return [x, y_, network]\n\ndef calc_loss(true, pred):\n return tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=pred, labels=true))\n \ndef trainer(cost, global_step):\n optimizer = tf.train.GradientDescentOptimizer(\n learning_rate=FLAGS.learning_rate)\n train_op = optimizer.minimize(loss=cost, global_step=global_step)\n return train_op\n \n\ndef main(argv=None): \n cluster_spec = tf_utils.get_cluster_spec()\n job_name = tf_utils.get_job_name()\n task_index = tf_utils.get_task_index()\n \n cluster = tf.train.ClusterSpec(cluster_spec)\n server = tf.train.Server(cluster,\n job_name=job_name,\n task_index=task_index)\n \n if job_name == \"ps\":\n print(\"Current process id: {}\".format(os.getpid()))\n server.join()\n elif job_name == \"worker\":\n print(\"Current process id: {}\".format(os.getpid()))\n with tf.device(tf.train.replica_device_setter(\n worker_device=\"/job:{}/task:{}\".format(job_name, task_index),\n cluster=cluster)):\n \n # Build model...\n x, y_, network = inference(FLAGS)\n \n # Calculate loss...\n loss = calc_loss(y_, network.outputs)\n correct_prediction = tf.equal(tf.arg_max(network.outputs, 1), y_)\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n tf.summary.scalar(\"train_loss\", loss)\n val_loss = tf.placeholder(tf.float32, shape=(), name=\"val_loss\")\n tf.summary.scalar(\"val_loss\", val_loss)\n val_accuracy = tf.placeholder(tf.float32, shape=(), name=\"val_accuracy\")\n tf.summary.scalar(\"val_accuracy\", val_accuracy)\n \n # Get train operation...\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n train_op = trainer(loss, global_step)\n \n init_op = tf.global_variables_initializer()\n summary_op = tf.summary.merge_all()\n saver = tf.train.Saver(sharded=True)\n \n sv = tf.train.Supervisor(is_chief=(task_index == 0),\n init_op=init_op,\n logdir=FLAGS.checkpoint_dir, \n summary_op=None,\n saver=saver,\n global_step=global_step,\n save_model_secs=60,\n summary_writer=None)\n \n with sv.managed_session(server.target) as sess:\n if task_index == 0:\n print(\"Save tensorboard files into: {}.\".format(\n FLAGS.tensorboard_dir))\n writer = tf.summary.FileWriter(FLAGS.tensorboard_dir,\n sess.graph)\n\n step = 0\n feed_dict_generator = get_feed_dict(\n x, y_, network, FLAGS)\n if task_index == 0:\n next_summary_time = time.time() + FLAGS.summary_period\n while not sv.should_stop() and step < FLAGS.max_step:\n this_feed_dict = feed_dict_generator.next()\n _, step = sess.run([train_op, global_step],\n feed_dict=this_feed_dict)\n \n if task_index == 0 \\\n and next_summary_time < time.time():\n this_val_accuracy = []\n this_val_loss = []\n for val_feed_dict in get_validate_data(\n x, y_, FLAGS):\n this_acc, this_loss = sess.run(\n [accuracy, loss], feed_dict=val_feed_dict)\n this_val_accuracy.append(this_acc)\n this_val_loss.append(this_loss)\n this_val_accuracy = np.mean(this_val_accuracy)\n this_val_loss = np.mean(this_val_loss)\n print(\"{} {}\".format(this_val_accuracy, this_val_loss))\n summary_feed_dict = {\n val_loss: this_val_loss.astype(np.float32),\n val_accuracy: this_val_accuracy.astype(np.float32)}\n summary_feed_dict.update(this_feed_dict)\n summary_value, step = sess.run(\n [summary_op, global_step],\n feed_dict=summary_feed_dict)\n writer.add_summary(summary_value, step)\n writer.flush()\n next_summary_time += FLAGS.summary_period\n if task_index == 0:\n writer.close()\n \nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"examples/example_distribute_tf.py","file_name":"example_distribute_tf.py","file_ext":"py","file_size_in_byte":10452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"340921522","text":"import numpy as np\nfrom copy import deepcopy\nfrom astropy.table import Table\n\n# Structure of incoming data will always consist of time stamps, flux values,\n# and exposure times.\n\n# There will be gaps in the data. These gaps can be replaced with either the\n# median flux of the time series, or left at 0.\n\n# TERMINOLOGY:\n#\n# bins - the evenly-spaced \"new\" bins that the flux data will be distributed\n# into\n#\n# binwidth - the width of the bins, in units of time\n#\n# time - the time stamps from the original time series\n#\n# flux - the flux data from the original time series\n#\n# timeseries - the original time series (time and flux)\n#\n# exptime - the exposure time for each frame of the time series\n#\n# timestamp_position - the position of the time stamp relative to the\n# exposure\n#\n# 0 - the time stamp is at the beginning of each exposure\n# 0.5 - the time stamp is in the middle of each exposure\n# 1 - the time stamp is at the end of each exposure\n#\n#\n#\n\ndef zipper(a, b):\n \"\"\"\n Combines two arrays, fitting the values of b in between the values of a.\n \"\"\"\n empty = np.empty((a.size + b.size), dtype=a.dtype)\n empty[0::2], empty[1::2] = a, b\n return empty\n\n\ndef rebin(timeseries, binwidth=None, exptime=None, timestamp_position=0.5,\n median_replace=True):\n \"\"\"\n Rebin a time series into evenly-spaced bins, the number of which is a power\n of two for fast fourier transform compatibility.\n\n Parameters\n ----------\n timeseries : numpy array or astropy table\n The time series to be rebinned, consisting of two columns: timestamps\n and fluxes, in that order.\n binwidth : scalar, optional\n The width, in the same units of time as the timeseries, of each bin\n into which the fluxes from the timeseries will be distributed. If not\n provided, it will be inferred from the original time series.\n exptime : scalar, optional\n The exposure time, in the same units of time as the timeseries, of each\n image taken. If not provided, it will be inferred from the original\n time series.\n timestamp_position : scalar, optional\n The position of the time stamp relative to each exposure. Values are\n 0 - the time stamp is at the beginning of each exposure; 0.5 - the time\n stamp is in the middle of each exposure; 1 - the time stamp is at the\n end of each exposure.\n median_replace : boolean, optional\n If true, gaps in the timeseries will be replaced with the median flux\n of all the data points.\n\n Returns\n -------\n a : array\n A two-column numpy array containing timestamps and fluxes in evenly-\n spaced bins. Timestamps indicate the beginning of each bin.\n \"\"\"\n\n # FORMAT THE TIME SERIES\n\n if type(timeseries) != np.ndarray and type(timeseries) != Table:\n print('Error: timeseries data type is not a recognizable format.')\n return\n elif type(timeseries) == Table:\n time = np.array(timeseries.columns[0])\n flux = np.array(timeseries.columns[1])\n timeseries = np.vstack((time, flux)).T\n\n if np.shape(timeseries)[1] != 2:\n print('Error: input array is of the wrong dimension. Please input as '\\\n 'a two-column array with time and flux, in that order.')\n return\n\n # Unpack the timeseries\n time = timeseries[:,0]\n flux = timeseries[:,1]\n\n # If keyword arguments are unspecified, infer them here\n if exptime is None:\n dt = time[1:] - time[:-1]\n exptime = np.median(dt)\n\n if binwidth is None:\n dt = time[1:] - time[:-1]\n binwidth = np.median(dt)\n\n # Shift timestamps to be at beginning of exposures\n starttimes = time - timestamp_position*exptime\n endtimes = starttimes + exptime\n\n # Barycentered data may have overlapping time bins. Cut off the ends\n overlaps = endtimes[:-1] - starttimes[1:]\n endtimes[np.where(overlaps > 0)] = starttimes[np.where(overlaps > 0)[0]+1]\n\n # Find gaps in the timeseries\n gaps = starttimes[1:] - endtimes[:-1]\n gaps[np.abs(gaps)<1e-6]=0.0#\n\n # # Median replace, if set to true.\n median_flux = np.median(flux)\n # if median_replace is True:\n # gaps = gaps*median_flux/exptime\n # else:\n # gaps = gaps*0.\n\n # Inject gap replacements into original data\n gapflag1=np.zeros(len(flux))+1\n gapflag2=np.zeros(len(gaps))+1\n gapflag2[gaps>0]=0\n gapflag=zipper(gapflag1,gapflag2)\n print(\"gaps [:25]\", gaps[:25])\n flux = zipper(flux, gaps)\n print(\"flux[:50]\",flux[:50])\n gapidx=np.where(gapflag==0)\n for i in range(len(gapidx)):\n # flux[gapidx[i]]=((flux[gapidx[i]-1] + flux[gapidx[i]+1])*(flux[gapidx[i]]))/exptime\n # flux[gapidx[i]] = (flux[gapidx[i]]*median_flux)/exptime #global median replacement\n print(\"gapidx[i]-1\",(flux[gapidx[i]-1]),\"[gapidx[i]+1]\",(flux[gapidx[i]+1]))#5/31\n flux[gapidx[i]]=(flux[gapidx[i]]*(flux[gapidx[i]-1]+flux[gapidx[i]+1]))/(2.*exptime)#5/31\n starttimes = zipper(starttimes, endtimes[:-1])\n endtimes = np.append(starttimes[1:], endtimes[-1])\n\n # Remove injected replacements where no gap was found\n indices_to_remove = np.where(starttimes == endtimes)\n flux = np.delete(flux, indices_to_remove)\n starttimes = np.delete(starttimes, indices_to_remove)\n endtimes = np.delete(endtimes, indices_to_remove)\n\n # Create the bins the flux will be redistributed into\n duration = np.max(time) + exptime - np.min(time)\n nbins = 2**np.ceil(np.log10(duration/binwidth)/np.log10(2)).astype(int)\n bins = np.zeros(nbins)\n\n # Bin start and stop times\n startbins = (starttimes[0] + binwidth*np.linspace(0, nbins-1, nbins))\n endbins = startbins + binwidth\n\n # Add final gap between last data point and end of the bins\n # flux = np.append(flux, (endbins[-1]-endtimes[-1])*median_flux/exptime)\n flux = np.append(flux, flux[-1])\n # print(\"flux[:25] after rebin\",flux[:25]) #\n starttimes = np.append(starttimes, endtimes[-1])\n endtimes = np.append(endtimes, endbins[-1])\n\n # Split up timeseries flux into bins\n for i in range(len(flux)):\n\n j = np.where(startbins <= starttimes[i])[0][-1] # 1st bin left of start\n k = np.where(endbins >= endtimes[i])[0][0] # 1st bin right of end\n\n width = endtimes[i]-starttimes[i]\n\n if j == k:\n bins[j] += flux[i]\n\n if j < k:\n\n # Left bin\n frac_L = (startbins[j+1] - starttimes[i]) / width\n bins[j] += frac_L*flux[i]\n\n # Right bin\n frac_R = (endtimes[i] - endbins[k-1]) / width\n bins[k] += frac_R*flux[i]\n\n if j+1 < k:\n\n # Middle bins\n n = k - (j+1)\n frac = (endbins[k-1] - startbins[j+1]) / width / n\n bins[j+1:k] += frac*flux[i]\n\n print(\"bins[:25]\",bins[:25])#\n return np.vstack((startbins, bins)).T\n","sub_path":"Desktop/UF Research/rebin2.py","file_name":"rebin2.py","file_ext":"py","file_size_in_byte":7001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"539721879","text":"#!/usr/bin/env python3\nimport random\nfrom sys import stderr\n\nalive = set()\nof_which = dict()\nalives = dict()\n\ni = 0\nfor ship in input().split():\n\tbegin, end = ship[:2], ship[2:]\n\tif begin > end: begin, end = end, begin\n\talives[i] = 0\n\tif begin[0] == end[0]: # vertical\n\t\tx = ord(begin[0])\n\t\tfor y in range(int(begin[1]), int(end[1]) + 1):\n\t\t\talive.add((x, y))\n\t\t\tof_which[(x, y)] = i\n\t\t\talives[i] += 1\n\telse: # horizontal ship\n\t\ty = int(begin[1])\n\t\tfor x in range(ord(begin[0]), ord(end[0]) + 1):\n\t\t\talive.add((x, y))\n\t\t\tof_which[(x, y)] = i\n\t\t\talives[i] += 1\n\ti += 1\n\nif int(input()) == 0:\n\tf = random.sample(alive, 1)[0]\n\tprint('0X ' + chr(f[0]) + str(f[1]) + 'A0')\n\nwhile True:\n\tinp = input()\n\tif len(inp) == 1:\n\t\tto = random.sample(alive, 1)[0]\n\t\tprint('0X ' + chr(to[0]) + str(to[1]) + f)\n\t\tcontinue\n\n\tf, t = inp[:2], inp[2:]\n\n\tstderr.write('alive = ' + str(alive) + '\\n')\n\tstderr.write('t = ' + str(t) + '\\n')\t\n\n\tprint('0X', end=' ')\n\tpair = ord(t[0]), int(t[1])\n\t\n\tkilled = True\t\n\tif pair in alive:\n\t\talive.remove(pair)\n\t\talives[of_which[pair]] -= 1\n\t\tif alives[of_which[pair]]:\n\t\t\tprint('H')\n\t\telse:\n\t\t\tprint('K')\t\n\telse:\n\t\tto = random.sample(alive, 1)[0]\n\t\tprint(chr(to[0]) + str(to[1]) + f)\n","sub_path":"server/static-files/sources/seabattle/combalg/2016/pikulin.py","file_name":"pikulin.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"251570948","text":"# -*- coding: utf-8 -*-\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\nfrom django.contrib import auth\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.shortcuts import render_to_response\nfrom django.utils import simplejson\nfrom django.conf import settings\nfrom django.views.decorators.cache import never_cache\nfrom django.template import RequestContext\n\nfrom core.reqcont import admin_proc\n\n@never_cache\n@ensure_csrf_cookie\ndef login(request):\n next = request.GET.get('next', '')\n if not request.user.is_authenticated() :\n return render_to_response(\"login.html\",\n { 'login': 1,\n 'next': next,\n 'subtitle' : 'Авторизация пользователя',\n }, context_instance=RequestContext(request, processors=[admin_proc]))\n else:\n msg = request.user.first_name + u\" \" + request.user.last_name + u\" выйти из системы?\"\n return render_to_response(\"login.html\", { 'logoutmesg': msg,\n 'subtitle' : 'Авторизация пользователя',\n }, context_instance=RequestContext(request, processors=[admin_proc]))\n\n@never_cache\ndef authapi(request):\n response = None\n if not request.is_ajax() or not request.POST:\n response = {'result': 'error', 'text': 'AJAX only'}\n else:\n req = simplejson.loads(request.raw_post_data)\n act = req.get('act', '')\n if act == \"login\":\n username = req.get('login', '')\n password = req.get('pass', '')\n user = auth.authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n auth.login(request, user)\n response = { 'result': 'ok' }\n else:\n response = { 'result': 'error', 'text': 'User not active' }\n else:\n response = { 'result': 'error' }\n elif act == \"logout\":\n auth.logout(request)\n response = { 'result': 'ok' }\n return HttpResponse(simplejson.dumps(response), mimetype='application/json')\n","sub_path":"web/scripts/core/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"431209041","text":"import pandas as pd\nimport numpy as np\n\nfrom random import randint\nfrom sklearn.preprocessing import StandardScaler\n\n\nclass TmpPreprocessor(object):\n\tdef __init__(self, number_of_samples, input_trace):\n\t\tsuper(TmpPreprocessor, self).__init__()\n\t\tself.number_of_samples = number_of_samples\n\t\tself.input_trace = input_trace\n\n\tdef fit_transform(self, X, y, noise=False):\n\n\t\t# Apply Noise\n\t\tif noise==True:\n\t\t\tX = self.apply_noise(X)\n\n\t\t# Standardize Features and transform into numpy array\n\t\tX = self.standardize_features(X)\n\t\t\n\t\t# Rescale Labels and transform into numpy array\n\t\ty = self.rescale_labels(y)\n\n\t\t# Generate Sequences \n\t\tX, y = self.generate_data(X, y)\n\n\t\t# Train, test, validation split\n\t\tX_train, y_train, X_test, y_test, X_val, y_val = self.train_test_validation_split(X, y)\n\n\t\treturn X_train, y_train, X_test, y_test, X_val, y_val\n\n\tdef standardize_features(self, features):\n\t\tsc = StandardScaler().fit(features)\n\t\tfeatures = sc.transform(features)\n\t\t\n\t\treturn features\n\n\tdef rescale_labels(self, labels):\n\n\t\tlabels['x'] = labels['x'].apply(lambda x: x + 600)\n\t\t# labels['y'] = labels['y'].apply(lambda x: x - img_height)\n\t\tlabels['z'] = labels['z'].apply(lambda x: x + 450)\n\t\t\n\t\treturn labels.values\n\n\tdef generate_data(self, features, labels):\n\t\tdata_x = []\n\t\tdata_y = []\n\t\tfor i in range(0, len(features)-25):\n\t\t\tinitial_idx = randint(0, len(features)-self.input_trace-1)\n\t\t\tx = features[initial_idx:initial_idx+self.input_trace,:]\n\t\t\ty = labels[initial_idx+int(self.input_trace/2):initial_idx+int(self.input_trace/2)+1,:]\n\t\t\tdata_x.append(x)\n\t\t\tdata_y.append(y)\n\t\tdata_x = np.asarray(data_x)\n\t\tdata_y = np.asarray(data_y)\n\t\t\n\t\treturn data_x, data_y\n\n\tdef train_test_validation_split(self, features, labels, val_samples=100, test_samples=200):\n\n\t\tX_test = features[0:test_samples]\n\t\ty_test = labels[0:test_samples]\n\n\t\tX_val = features[test_samples:test_samples + val_samples]\n\t\ty_val = labels[test_samples:test_samples + val_samples]\n\n\t\tX_train = features[test_samples + val_samples:]\n\t\ty_train = labels[test_samples + val_samples:]\n\t\t\n\t\treturn X_train, y_train, X_test, y_test, X_val, y_val\n\n\tdef apply_noise(self, features, mean=0.0, std_dev_size=2.5, std_dev_x_y=3.0):\n\n\t\t# To the x\n\t\tnoise = np.random.normal(mean, std_dev_x_y, len(features))\n\t\tfeatures[\"relative_x\"] = features[\"relative_x\"] + noise\n\n\t\t# To the y\n\t\tnoise = np.random.normal(mean, std_dev_x_y, len(features))\n\t\tfeatures[\"relative_y\"] = features[\"relative_y\"] + noise\n\n\t\t# To the size\n\t\tnoise = np.random.normal(mean, std_dev_size, len(features))\n\t\t# Clip the noise\n\t\tnoise[noise > std_dev_size] = std_dev_size\n\t\tnoise[noise < -std_dev_size] = -std_dev_size\n\t\tfeatures[\"size\"] = features[\"size\"] + noise\n\n\t\treturn features\n\t\t","sub_path":"ball_3d_coordinates/traj_flying/preprocessing/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"367675490","text":"import numpy as np\n\nimport tensorflow as tf\nimport threading\nimport ConvNet\nimport time\nimport os\n\nfrom six.moves import xrange\n\nBATCH_SIZE = 16\n\nIMAGE_W = 192\nIMAGE_H = 256\n\nZ_DIM = 128\nIMAGE_CHANNEL = 3\nLR = 0.00001 # Learning rate\n\n\nclass celebaBetter:\n def __init__(self, filePath):\n self.file_index = 0\n self.content_index = 0\n self.filePath = filePath\n self.bytestream = open(self.filePath + str(self.file_index)+\".bin\",\"br\")\n self.nextImage = None\n self.t = None\n def extract_data(self):\n def _load_t():\n self.content_index = self.content_index + BATCH_SIZE\n if self.content_index>=4096:#202599\n self.file_index = self.file_index + 1\n if self.file_index >= 31:\n self.file_index = 0\n self.bytestream.close()\n self.bytestream = open(self.filePath + str(self.file_index)+\".bin\",\"br\")\n self.content_index = 0\n \n buf = self.bytestream.read(BATCH_SIZE * IMAGE_H * IMAGE_W * IMAGE_CHANNEL)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = (data) / 256.0 - 0.5\n data = data.reshape(BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL)\n self.nextImage = data\n\n while self.nextImage is None:\n if self.t and self.t.isAlive():\n self.t.join()\n print(\"glich\")\n else:\n self.t = threading.Thread(target=_load_t,args=())\n self.t.start()\n\n ret = self.nextImage\n self.nextImage = None\n \n self.t = threading.Thread(target=_load_t,args=())\n self.t.start()\n \n return ret\n\nCBHR = celebaBetter(\"F:\\\\MNIST\\\\celebaBetter\\\\\")\n\nprint(\"startload\")\nglist = []\nGF = 96 # Dimension of G filters in first conv layer. default [64]\nloadFromFile = ConvNet.openEmptyFileR('gan11g.txt')\ngfc0 = ConvNet.addlist(glist,ConvNet.FC(inDepth = Z_DIM,outDepth = GF*8*3*4,loadFromFile = loadFromFile))\ngdc0 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*8,outDepth = GF*8,filterSize = 3,loadFromFile = loadFromFile))#4in\ngdc1 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*8,outDepth = GF*4,filterSize = 3,loadFromFile = loadFromFile))#8in\ngdc2 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*4,outDepth = GF*2,filterSize = 5,loadFromFile = loadFromFile))#16in\ngdc3 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*2,outDepth = GF*2,filterSize = 5,loadFromFile = loadFromFile))#32in\ngdc4 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*2,outDepth = GF*1,filterSize = 5,loadFromFile = loadFromFile))#64in\ngdc5 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*1,outDepth = IMAGE_CHANNEL,filterSize = 5,loadFromFile = loadFromFile))#128in\nif loadFromFile:loadFromFile.close()\n\ndlist = []\nDF = 96 # Dimension of D filters in first conv layer. default [64]\nloadFromFile = ConvNet.openEmptyFileR('gan11d.txt')\ndcv0 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = IMAGE_CHANNEL,outDepth = DF*1,filterSize = 5,loadFromFile = loadFromFile))#128out\ndcv1 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*1,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))#64out\ndcv2 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*2,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))#32out\ndcv3 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*2,outDepth = DF*4,filterSize = 5,loadFromFile = loadFromFile))#16out\ndcv4 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*4,outDepth = DF*8,filterSize = 3,loadFromFile = loadFromFile))#8out\ndcv5 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*8,outDepth = DF*8,filterSize = 3,loadFromFile = loadFromFile))#4out\ndfc0 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = DF*8*3*4,outDepth = 128,loadFromFile = loadFromFile))\ndfc1 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = 128,outDepth = 1,loadFromFile = loadFromFile))\nif loadFromFile:loadFromFile.close()\nprint(\"endload\")\ndef generator(z):\n _ret = gfc0.getLayer(z, isRelu=True, fixed = False)\n _ret = ConvNet.FC2Conv_Reshape(_ret, 4, 3, GF*8)\n _ret = gdc0.getLayer(_ret, height = 8, width = 6, convStride = 2, isRelu=True, fixed = False)\n _ret = gdc1.getLayer(_ret, height = 16, width = 12, convStride = 2, isRelu=True, fixed = False)\n _ret = gdc2.getLayer(_ret, height = 32, width = 24, convStride = 2, isRelu=True, fixed = False)\n _ret = gdc3.getLayer(_ret, height = 64, width = 48, convStride = 2, isRelu=True, fixed = False)\n _ret = gdc4.getLayer(_ret, height = 128, width = 96, convStride = 2, isRelu=True, fixed = False)\n _ret = gdc5.getLayer(_ret, height = IMAGE_H, width = IMAGE_W, convStride = 2, isRelu=False, fixed = False)\n return _ret\n \ndef discriminator(inputT):\n _ret = dcv0.getLayer(inputT, convStride = 2, poolSize = 1,isRelu=True, fixed = False)\n _ret = dcv1.getLayer(_ret, convStride = 2, poolSize = 1,isRelu=True, fixed = False)\n _ret = dcv2.getLayer(_ret, convStride = 2, poolSize = 1,isRelu=True, fixed = False)\n _ret = dcv3.getLayer(_ret, convStride = 2, poolSize = 1,isRelu=True, fixed = False)\n _ret = dcv4.getLayer(_ret, convStride = 2, poolSize = 1,isRelu=True, fixed = False)\n _ret = dcv5.getLayer(_ret, convStride = 2, poolSize = 1,isRelu=True, fixed = False)\n _ret = ConvNet.Conv2FC_Reshape(_ret)\n _ret = dfc0.getLayer(_ret, isRelu=True, fixed = False)\n _ret = dfc1.getLayer(_ret, isRelu=False, fixed = False)\n return _ret\n\n\ndef train():\n ###################\n# loadedimage = extract_data()\n# ConvNet.saveImages(loadedimage, [8, 8], \"test0.png\")\n# loadedimage = extract_data()\n# ConvNet.saveImages(loadedimage, [8, 8], \"test1.png\")\n# exit()\n\n \n \n# for idx in xrange(0, 1000000000):\n# loadedimage = extract_data()\n# global file_index\n# global content_index\n# print(str(file_index)+\",\"+str(content_index)) \n# exit()\n\n ###################\n \n images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])\n z = tf.placeholder(tf.float32, [BATCH_SIZE, Z_DIM])\n \n G = generator(z)\n D_logits = tf.reduce_mean(discriminator(images))\n D_logits_F = tf.reduce_mean(discriminator(G))\n \n gen_cost = -D_logits_F\n disc_cost = D_logits_F - D_logits\n\n alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.0,maxval=1.0)\n differences = G - images\n differences = tf.reshape(differences,[BATCH_SIZE,-1])\n imagereshape = tf.reshape(images,[BATCH_SIZE,-1])\n interpolates = imagereshape + (alpha*differences)\n interpolates = tf.reshape(interpolates,images.shape)\n gradients = tf.gradients(discriminator(interpolates), [interpolates])[0]\n slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))\n gradient_penalty = tf.reduce_mean((slopes-1.0)**2)\n \n LAMBDA = 10 # Gradient penalty lambda hyperparameter\n disc_cost += LAMBDA*gradient_penalty\n\n g_vars = ConvNet.getParam(glist)\n d_vars = ConvNet.getParam(dlist)\n\n d_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(disc_cost, var_list=d_vars) \n g_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(gen_cost, var_list=g_vars)\n\n sess = tf.Session()\n\n sample_z1 = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))\n sample_z2 = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))\n #sample_z3 = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))\n #sample_z4 = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))\n #for i in xrange(0,BATCH_SIZE):\n # sample_z[i] = np.random.uniform(-(i/BATCH_SIZE), (i/BATCH_SIZE), size = (Z_DIM))\n \n init = tf.global_variables_initializer() \n sess.run(init)\n\n start_time = time.time()\n idx = 0\n while True:\n idx = idx + 1\n elapsed_time = time.time() - start_time\n start_time = time.time()\n\n dt = 0\n df = 0\n for _ in xrange(2):\n batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))\n loadedimage = CBHR.extract_data()\n _,d1,d2 = sess.run([d_optim,D_logits,D_logits_F], feed_dict = {z:batch_z, images:loadedimage})\n dt = dt + d1\n df = df + d2\n\n print(str(idx)+\",\"+str(CBHR.file_index)+\",\"+str(CBHR.content_index)+\",\"+str(elapsed_time)+\",\"+str(dt)+\",\"+str(df))\n\n batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))\n sess.run(g_optim, feed_dict = {z: batch_z})\n\n if idx % 10 == 0:\n\n sample = np.zeros([BATCH_SIZE*2, IMAGE_H, IMAGE_W, IMAGE_CHANNEL], dtype=np.float32)\n sample[BATCH_SIZE*0:BATCH_SIZE*1] = sess.run(G, feed_dict = {z: sample_z1})\n sample[BATCH_SIZE*1:BATCH_SIZE*2] = sess.run(G, feed_dict = {z: sample_z2})\n #sample[BATCH_SIZE*2:BATCH_SIZE*3] = sess.run(G, feed_dict = {z: sample_z3})\n #sample[BATCH_SIZE*3:BATCH_SIZE*4] = sess.run(G, feed_dict = {z: sample_z4})\n\n def imgSave(idx,sample):\n ConvNet.saveImages(sample, [4,8], 'out11\\\\sample_%d.png' % (idx))\n \n \n t = threading.Thread(target=imgSave,args=(idx,sample))\n t.start()\n \n exist = False\n \n if idx%10 == 0:\n exist = os.path.exists(\"stop.txt\")\n \n if idx % 2000 == 0 or exist:\n \n def save(idx, gSaver, dSaver):\n print(\"start save\")\n saveToFile = ConvNet.openEmptyFileW(\"gan11g\"+str(idx)+\".txt\")\n for item in gSaver:\n item(saveToFile)\n saveToFile.flush();saveToFile.close()\n \n saveToFile = ConvNet.openEmptyFileW(\"gan11d\"+str(idx)+\".txt\")\n for item in dSaver:\n item(saveToFile)\n saveToFile.flush();saveToFile.close()\n print(\"end save\")\n \n gSaver = []\n dSaver = []\n for item in glist:\n gSaver.append(item.getSaver(sess))\n for item in dlist:\n dSaver.append(item.getSaver(sess))\n\n t = threading.Thread(target=save,args=(idx,gSaver, dSaver))\n t.start()\n \n \n if exist:\n break\n\n sess.close()\n \n\ntrain()\n","sub_path":"mytensorflow/mytf/ganBetter/gan11.py","file_name":"gan11.py","file_ext":"py","file_size_in_byte":10367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"10916237","text":"#!/usr/bin/env python\nimport os\nimport sys\nsys.path.append(\"/home/yunfeiguo/projects/kaggle_cs567/src\")\nimport multiprocessing\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nfrom pylab import *\nimport matplotlib.pyplot as plt\nimport dask.dataframe as dd\nfrom yg_utils import *\nfrom sklearn import linear_model\nimport logging\nimport argparse\nfrom sklearn.cross_validation import KFold\nlogging.basicConfig(format='%(asctime)s %(message)s')\nlogging.warning('started')\n\n#reading in training data\nfn_train='/home/yunfeiguo/projects/kaggle_cs567/data/train.csv'\nfn_test='/home/yunfeiguo/projects/kaggle_cs567/data/test.csv'\n#fn_train='/home/yunfeiguo/projects/kaggle_cs567/data/train_100000.csv'\n#fn_test='/home/yunfeiguo/projects/kaggle_cs567/data/smalltest.v2.csv'\n#fn_out='/home/yunfeiguo/projects/kaggle_cs567/results/2nd_submission.csv'\ntrainData=pd.read_csv(fn_train,sep=',',index_col='Id')\ntestData=pd.read_csv(fn_test,sep=',',index_col='Id')\n#trainData=dd.read_csv(fn_train,sep=',')\n#trainData.set_index('Id')\n#testData=dd.read_csv(fn_test,sep=',')\n#testData.set_index('Id')\nn=len(trainData.columns)\nlogging.warning('reading done')\n\n#drop records whose Ref columns are all NaNs\ntrainCleaned = trainData.groupby(trainData.index).filter(lambda x: sum(np.isfinite(x['Ref'])) > 1)\n#fill remaining NaNs with column mean\n#alternatively, try group mean\ntrainCleaned.fillna(trainCleaned.mean().to_dict(),inplace=True)\ntestData.fillna(testData.mean().to_dict(),inplace=True)\n#make sure there is no NaN any more\nsum(np.isfinite(trainCleaned)==False) #return all zeros\nsum(np.isfinite(testData)==False)\nlogging.warning('preprocessing done')\n\n\ndef marshall_palmer(ref, minutes_past):\n #print \"Estimating rainfall from {0} observations\".format(len(minutes_past))\n # how long is each observation valid?\n valid_time = np.zeros_like(minutes_past)\n valid_time[0] = minutes_past.iloc[0]\n for n in xrange(1, len(minutes_past)):\n valid_time[n] = minutes_past.iloc[n] - minutes_past.iloc[n-1]\n valid_time[-1] = valid_time[-1] + 60 - np.sum(valid_time)\n valid_time = valid_time / 60.0\n\n # sum up rainrate * validtime\n sum = 0\n for dbz, hours in zip(ref, valid_time):\n # See: https://en.wikipedia.org/wiki/DBZ_(meteorology)\n if np.isfinite(dbz):\n mmperhr = pow(pow(10, dbz/10)/200, 0.625)\n sum = sum + mmperhr * hours\n return sum\n\n# each unique Id is an hour of data at some gauge\ndef testfunc(hour):\n #rowid = hour['Id'].iloc[0]\n # sort hour by minutes_past\n hour = hour.sort('minutes_past', ascending=True)\n est = marshall_palmer(hour['Ref'], hour['minutes_past'])\n return est\n\nestimates = trainCleaned.groupby(trainCleaned.index).apply(testfunc)\nx = trainCleaned.copy()\nx['est'] = estimates\nplt.yscale('log')\nplt.xscale('log')\nplt.scatter(x['Expected'],x['est'])\nplt.xlabel('Expcted')\nplt.ylabel('benchmarkest')\nplt.savefig('alltrain_benchmarkest_expected_scatter.png')\n","sub_path":"check_error_dist/check_error_dist.py","file_name":"check_error_dist.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"156538471","text":"import random\n\nclass HashFamily:\n def __init__(self):\n self.memomask = {}\n def hash_fn(self, n):\n mask = self.memomask.get(n)\n if mask is None:\n random.seed(n)\n mask = self.memomask[n] = random.getrandbits(64)\n return lambda x: hash(x) ^ mask\n\nclass BloomFilter:\n def __init__(self, num_hashes, num_slots):\n hf = HashFamily()\n self.hash_fn_list = [hf.hash_fn(n) for n in range(num_hashes)]\n self.num_slots = num_slots\n self.vector = [0]*num_slots\n\n # Implement this method.\n # It should apply all of the hash functions to the string s\n # and then for each result value of the hash function (modulo num_slots),\n # set the value at the matching index in the vector to 1.\n def AddString(self, s):\n for fn in self.hash_fn_list:\n self.vector[fn(s) % self.num_slots] = 1\n pass\n\n # Implement this method.\n # It should return the string \"Not Member\" or \"Maybe Member\", as appropriate.\n def IsMember(self, q):\n for fn in self.hash_fn_list:\n if self.vector[fn(q) % self.num_slots] != 1:\n return \"Not Member\"\n return \"Maybe Member\"\n\ndef main():\n num_hashes = 4\n num_slots = 128\n bf = BloomFilter(num_hashes, num_slots)\n initial_set = [\"potato\", \"tomato\", \"hippopotamus\", \"rhinoceros\"]\n for word in initial_set:\n bf.AddString(word)\n\n assert(bf.IsMember(\"turnip\") == \"Not Member\")\n assert(bf.IsMember(\"potato\") == \"Maybe Member\")\n\nif __name__ == '__main__':\n main()","sub_path":"bloom_filter.py","file_name":"bloom_filter.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"470492503","text":"import json, requests\nfrom pathlib import Path\nfrom urllib.request import urlretrieve\nfrom multiprocessing import Pool\n\ndef download_channel(channel, offset=0, firsturl=False):\n Path(str(Path()) + \"/\" + channel).mkdir(exist_ok=True)\n\n while 1:\n url = f'https://focusmusic.fm/api/tracks.php?offset={offset}&channel={channel}'\n file_url = json.loads(requests.get(url, timeout=10).content).get(\"url\").replace(\"\\\\\",\"\")\n if file_url == firsturl: break\n filepath, firsturl = str(Path()) + \"/\" + channel + \"/\" + Path(file_url).name, firsturl or file_url\n if not Path(filepath).exists():\n print(f\"getting offset {offset} filename {Path(filepath).name}\"); \n urlretrieve(file_url, filepath)\n offset +=1\n\nPool().map(download_channel, [\"electronic\", \"downtempo\", \"classical\", \"rain\"])\n","sub_path":"focusmusic-get-songs.py","file_name":"focusmusic-get-songs.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"288875226","text":"import airsim\nimport time\nimport numpy as np\nimport resources\n\n\ndef location(client):\n return client.simSetVehiclePose(\n airsim.Pose(airsim.Vector3r(x_val=71.37133026123047, y_val=173.48350524902344, z_val=-0.6527218222618103),\n airsim.Quaternionr(w_val=0.7069156765937805, x_val=2.5558463676134124e-05,\n y_val=-1.7646530977799557e-05, z_val=-0.7072978019714355)), True)\n\n\ndef drive(client, car_controls, distance, top_point_of_object):\n time.sleep(0.5)\n distance = np.array(distance)\n top_point_of_object = np.array(top_point_of_object)\n x_car_in_move = client.getCarState().kinematics_estimated.position.x_val\n time.sleep(0.01)\n\n if np.any(top_point_of_object > resources.TH_left_point_obj) and np.any(distance < resources.TH_distance):\n # apply brakes\n car_controls.brake = 0.4\n client.setCarControls(car_controls)\n print(\"Frana!\", top_point_of_object, distance)\n time.sleep(1.5) # let car drive a bit\n car_controls.brake = 0 # remove brake\n #client.simPrintLogMessage(\"Regim de franare pentru evitarea coliziunii!\", \"345\", 3)\n time.sleep(1)\n\n elif resources.right_line3 < x_car_in_move < resources.left_line3 and np.any(distance > resources.TH_distance) \\\n or (np.all(distance == 0.) and np.all(top_point_of_object == 0.)):\n # go forward\n car_controls.throttle = 0.4\n car_controls.steering = 0\n client.setCarControls(car_controls)\n print(\"Inainte\", top_point_of_object, distance)\n time.sleep(0.5)\n\n elif x_car_in_move < resources.right_line3 and np.any(distance > resources.TH_distance):\n # Go forward + steer left\n car_controls.throttle = 0.4\n car_controls.steering = -0.01\n client.setCarControls(car_controls)\n print(\"Viraj stanga\", top_point_of_object, distance)\n time.sleep(0.5)\n\n elif x_car_in_move > resources.left_line3 and np.any(distance > resources.TH_distance):\n # Go forward + steer right\n car_controls.throttle = 0.4\n car_controls.steering = 0.01\n client.setCarControls(car_controls)\n print(\"Viraj dreapta\", top_point_of_object, distance)\n time.sleep(0.5)\n\n else:\n if resources.TH_distance < np.all(distance) < 7.5:\n print(\"Inainte din inertie\", top_point_of_object, distance)\n","sub_path":"behavior3.py","file_name":"behavior3.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"577456256","text":"str_no_string = 0\r\nstr_empty_string = 1\r\nstr_yes = 2\r\nstr_no = 3\r\nstr_credits_0 = 4\r\nstr_credits_1 = 5\r\nstr_credits_2 = 6\r\nstr_credits_3 = 7\r\nstr_credits_4 = 8\r\nstr_credits_5 = 9\r\nstr_credits_6 = 10\r\nstr_credits_7 = 11\r\nstr_credits_8 = 12\r\nstr_credits_9 = 13\r\nstr_credits_10 = 14\r\nstr_credits_11 = 15\r\nstr_credits_12 = 16\r\nstr_profile_banner_selection_text = 17\r\nstr_use_default_banner = 18\r\nstr_game_type_1 = 19\r\nstr_game_type_2 = 20\r\nstr_game_type_3 = 21\r\nstr_game_type_4 = 22\r\nstr_game_type_5 = 23\r\nstr_game_types_end = 24\r\nstr_fac_1_default_name = 25\r\nstr_fac_2_default_name = 26\r\nstr_fac_3_default_name = 27\r\nstr_fac_4_default_name = 28\r\nstr_fac_5_default_name = 29\r\nstr_fac_6_default_name = 30\r\nstr_fac_7_default_name = 31\r\nstr_fac_8_default_name = 32\r\nstr_leave_edit_mode = 33\r\nstr_distance_reg1_sq_distance_reg2 = 34\r\nstr_choose_faction_banner = 35\r\nstr_keep_current_banner = 36\r\nstr_next = 37\r\nstr_previous = 38\r\nstr_spectate = 39\r\nstr_join_game = 40\r\nstr_choose_an_option = 41\r\nstr_choose_an_option_targeting_s1 = 42\r\nstr_change_options = 43\r\nstr_change_controls = 44\r\nstr_show_rules = 45\r\nstr_show_info = 46\r\nstr_admin_panel = 47\r\nstr_admin_tools = 48\r\nstr_admin_items = 49\r\nstr_kick_player = 50\r\nstr_ban_player_temp = 51\r\nstr_ban_player_perm = 52\r\nstr_mute_player = 53\r\nstr_mute_players = 54\r\nstr_kill_player = 55\r\nstr_fade_player_out = 56\r\nstr_freeze_player = 57\r\nstr_teleport_to_player = 58\r\nstr_teleport_behind_player = 59\r\nstr_teleport_player = 60\r\nstr_teleport_forwards = 61\r\nstr_equip_admin_armor = 62\r\nstr_become_invisible = 63\r\nstr_refill_health = 64\r\nstr_become_godlike = 65\r\nstr_spawn_admin_horse = 66\r\nstr_remove_admin_horses = 67\r\nstr_remove_stray_horses = 68\r\nstr_teleport_to_ships = 69\r\nstr_reset_sunken_ships = 70\r\nstr_join_faction = 71\r\nstr_lock_current_faction = 72\r\nstr_unlock_current_faction = 73\r\nstr_log_current_position = 74\r\nstr_log_distance_to_position = 75\r\nstr_your_current_position_is = 76\r\nstr_your_distance_to_position_is = 77\r\nstr_quit = 78\r\nstr_choose_a_player_to_s0 = 79\r\nstr_kick = 80\r\nstr_ban_temp = 81\r\nstr_ban_perm = 82\r\nstr_mute = 83\r\nstr_kill = 84\r\nstr_fade_out = 85\r\nstr_freeze = 86\r\nstr_teleport_to = 87\r\nstr_teleport_behind = 88\r\nstr_teleport = 89\r\nstr_propose_as_lord = 90\r\nstr_send_message_to = 91\r\nstr_give_take_door_key = 92\r\nstr_give_take_money_chest_key = 93\r\nstr_give_take_item_chest_key = 94\r\nstr_give_take_marshal = 95\r\nstr_allow_disallow_announcing = 96\r\nstr_outlaw = 97\r\nstr_request_poll = 98\r\nstr_choose_a_poll_type = 99\r\nstr_s0__reg0_ = 100\r\nstr_choose_poll_scene = 101\r\nstr_choose_poll_kick = 102\r\nstr_choose_poll_ban = 103\r\nstr_choose_poll_faction_lord = 104\r\nstr_choose_a_scene = 105\r\nstr_faction_admin = 106\r\nstr_change_faction_banner = 107\r\nstr_change_faction_name = 108\r\nstr_kick_player_from_faction = 109\r\nstr_outlaw_player = 110\r\nstr_manage_door_keys = 111\r\nstr_manage_money_chest_keys = 112\r\nstr_manage_item_chest_keys = 113\r\nstr_manage_announcers = 114\r\nstr_manage_marshals = 115\r\nstr_choose_a_faction = 116\r\nstr_declare_faction_hostile = 117\r\nstr_offer_faction_peace = 118\r\nstr_you_have_door_keys = 119\r\nstr_you_have_money_keys = 120\r\nstr_you_have_item_keys = 121\r\nstr_you_have_announcers = 122\r\nstr_you_are_a_marshal = 123\r\nstr_log_admin_target_player = 124\r\nstr_log_admin_target_self = 125\r\nstr_log_admin_target_faction = 126\r\nstr_log_admin_kick = 127\r\nstr_log_admin_ban_temp = 128\r\nstr_log_admin_ban_perm = 129\r\nstr_log_admin_mute = 130\r\nstr_log_admin_mute_all = 131\r\nstr_log_admin_kill = 132\r\nstr_log_admin_fade_out = 133\r\nstr_log_admin_freeze = 134\r\nstr_log_admin_teleport_to = 135\r\nstr_log_admin_teleport_behind = 136\r\nstr_log_admin_teleport = 137\r\nstr_log_admin_teleport_forwards = 138\r\nstr_log_admin_refill_health = 139\r\nstr_log_admin_become_godlike = 140\r\nstr_log_admin_get_armor = 141\r\nstr_log_admin_get_invisible = 142\r\nstr_log_admin_get_horse = 143\r\nstr_log_admin_remove_horses = 144\r\nstr_log_admin_remove_stray_horses = 145\r\nstr_log_admin_teleport_to_ships = 146\r\nstr_log_admin_reset_ships = 147\r\nstr_log_admin_join_faction = 148\r\nstr_log_admin_lock_faction = 149\r\nstr_log_admin_own_position = 150\r\nstr_log_admin_own_distance = 151\r\nstr_log_admin_cheat_money = 152\r\nstr_log_admin_cheat_item = 153\r\nstr_log_admin_restocked_s4 = 154\r\nstr_poll_change_scene = 155\r\nstr_poll_kick_player = 156\r\nstr_poll_ban_player = 157\r\nstr_poll_faction_lord = 158\r\nstr_poll_log = 159\r\nstr_poll_existing = 160\r\nstr_poll_invalid = 161\r\nstr_poll_result_no = 162\r\nstr_poll_result_yes = 163\r\nstr_poll_result_admin_no = 164\r\nstr_poll_result_admin_yes = 165\r\nstr_poll_requester_keys = 166\r\nstr_poll_time_left = 167\r\nstr_departed_player = 168\r\nstr_set_s0 = 169\r\nstr_s0_reg1 = 170\r\nstr_invalid_respawn_period = 171\r\nstr_invalid_max_players = 172\r\nstr_invalid_scene = 173\r\nstr_command_not_implemented = 174\r\nstr_bot_count = 175\r\nstr_round_max_seconds = 176\r\nstr_respawn_period = 177\r\nstr_num_bots_voteable = 178\r\nstr_scenes_voteable = 179\r\nstr_factions_voteable = 180\r\nstr_player_respawn_as_bot = 181\r\nstr_kick_voteable = 182\r\nstr_ban_voteable = 183\r\nstr_valid_vote_ratio = 184\r\nstr_auto_team_balance_limit = 185\r\nstr_starting_gold = 186\r\nstr_combat_gold_bonus = 187\r\nstr_round_gold_bonus = 188\r\nstr_player_banners_allowed = 189\r\nstr_force_default_armor = 190\r\nstr_team_points_gained_for_flags = 191\r\nstr_points_gained_for_capturing_flags = 192\r\nstr_game_time_limit = 193\r\nstr_team_point_limit = 194\r\nstr_defender_spawn_count = 195\r\nstr_disallow_ranged_weapons = 196\r\nstr_use_class_limits = 197\r\nstr_class_limit_player_count = 198\r\nstr_squad_size = 199\r\nstr_scale_squad = 200\r\nstr_build_points_team1 = 201\r\nstr_build_points_team2 = 202\r\nstr_allow_multiple_firearms = 203\r\nstr_enable_bonuses = 204\r\nstr_bonus_strength = 205\r\nstr_bonus_range = 206\r\nstr_fall_off_horse = 207\r\nstr_horse_dying = 208\r\nstr_auto_kick = 209\r\nstr_max_teamkills_before_kick = 210\r\nstr_auto_horse = 211\r\nstr_auto_swap = 212\r\nstr_limit_grenadier = 213\r\nstr_limit_skirmisher = 214\r\nstr_limit_rifle = 215\r\nstr_limit_cavalry = 216\r\nstr_limit_lancer = 217\r\nstr_limit_hussar = 218\r\nstr_limit_dragoon = 219\r\nstr_limit_cuirassier = 220\r\nstr_limit_heavycav = 221\r\nstr_limit_artillery = 222\r\nstr_limit_rocket = 223\r\nstr_limit_sapper = 224\r\nstr_limit_musician = 225\r\nstr_limit_sergeant = 226\r\nstr_limit_officer = 227\r\nstr_limit_general = 228\r\nstr_max_players = 229\r\nstr_friendly_fire = 230\r\nstr_melee_friendly_fire = 231\r\nstr_friendly_fire_damage_self_ratio = 232\r\nstr_friendly_fire_damage_friend_ratio = 233\r\nstr_ghost_mode = 234\r\nstr_control_block_direction = 235\r\nstr_combat_speed = 236\r\nstr_add_to_game_servers_list = 237\r\nstr_anti_cheat = 238\r\nstr_combat_speed_0 = 239\r\nstr_combat_speed_1 = 240\r\nstr_combat_speed_2 = 241\r\nstr_combat_speed_3 = 242\r\nstr_combat_speed_4 = 243\r\nstr_automatic = 244\r\nstr_by_mouse_movement = 245\r\nstr_free = 246\r\nstr_stick_to_any_player = 247\r\nstr_stick_to_team_members = 248\r\nstr_stick_to_team_members_view = 249\r\nstr_game_rules = 250\r\nstr_s0_reg0 = 251\r\nstr_s0_s1 = 252\r\nstr_s2_s3 = 253\r\nstr_s0__s1 = 254\r\nstr_basic_name_labels = 255\r\nstr_scene_name = 256\r\nstr_server_name = 257\r\nstr_game_password = 258\r\nstr_welcome_message = 259\r\nstr_game_type = 260\r\nstr_scene = 261\r\nstr_start_scene = 262\r\nstr_edit_scene = 263\r\nstr_zero = 264\r\nstr_remaining_time_reg0_s0reg1_s1reg2 = 265\r\nstr_respawning_in_reg0_seconds = 266\r\nstr_press_select_spawn_point = 267\r\nstr_press_select_spawn_area = 268\r\nstr_press_select_confirm_spawn_area = 269\r\nstr_restart_as_peasant_commoner = 270\r\nstr_number_of_factions_reg0 = 271\r\nstr_victory_condition_none = 272\r\nstr_victory_condition_castles = 273\r\nstr_respawn_with_partial_health = 274\r\nstr_respawn_with_full_health = 275\r\nstr_herd_animal_limit_reg0 = 276\r\nstr_always_fine = 277\r\nstr_always_raining = 278\r\nstr_dynamic = 279\r\nstr_player_name = 280\r\nstr_class = 281\r\nstr_kills = 282\r\nstr_deaths = 283\r\nstr_ping = 284\r\nstr_score = 285\r\nstr_score_reg0 = 286\r\nstr_all_players = 287\r\nstr_reg0_players = 288\r\nstr_reg0_player = 289\r\nstr_reg0_players_of_reg1 = 290\r\nstr_reg0_player_of_reg1 = 291\r\nstr_reg0 = 292\r\nstr_s0 = 293\r\nstr_s1 = 294\r\nstr_enable_s0 = 295\r\nstr_disable_s0 = 296\r\nstr_un = 297\r\nstr_drop_items = 298\r\nstr_display_faction_in_name_labels = 299\r\nstr_display_chat_overlay = 300\r\nstr_display_faction_chat_on_overlay = 301\r\nstr_activate_clickable_animation_menu = 302\r\nstr_mute_global_chat = 303\r\nstr_mute_music = 304\r\nstr_walk_on = 305\r\nstr_open_close_helmet = 306\r\nstr_toggle_head = 307\r\nstr_toggle_hand = 308\r\nstr_attach_cart_pack = 309\r\nstr_detach_cart_pack = 310\r\nstr_reveal_money_pouch_to_target = 311\r\nstr_reveal_money_pouch_to_area = 312\r\nstr_commit_suicide = 313\r\nstr_action_menu_end = 314\r\nstr_s0_are_you_sure = 315\r\nstr_unmute_music = 316\r\nstr_hide_name_labels = 317\r\nstr_display_name_labels = 318\r\nstr_display_basic_name_labels = 319\r\nstr_hide_chat_overlay = 320\r\nstr_display_local_chat_on_overlay = 321\r\nstr_deactivate_clickable_animation_menu = 322\r\nstr_unmute_global_chat = 323\r\nstr_walk_off = 324\r\nstr_profile_display_name_labels = 325\r\nstr_profile_hide_faction_in_name_labels = 326\r\nstr_profile_display_basic_name_labels = 327\r\nstr_profile_display_chat_overlay = 328\r\nstr_profile_overlay_shows_faction_chat = 329\r\nstr_profile_mute_global_chat = 330\r\nstr_profile_mute_music = 331\r\nstr_profile_non_clickable_animation_menu = 332\r\nstr_profile_disable_automatic_shadow_recalculation = 333\r\nstr_profile_disable_rain_snow_particles = 334\r\nstr_open_close_helmet_error = 335\r\nstr_suicide_in_reg1 = 336\r\nstr_log_s1_committed_suicide = 337\r\nstr_overlay_shows_faction_chat = 338\r\nstr_disable_automatic_shadow_recalculation = 339\r\nstr_disable_rain_snow_particles = 340\r\nstr_reg0__s0 = 341\r\nstr_menu_guestures = 342\r\nstr_menu_guestures_2 = 343\r\nstr_menu_stationary = 344\r\nstr_menu_neutral = 345\r\nstr_menu_hostile = 346\r\nstr_menu_robbery = 347\r\nstr_menu_commands = 348\r\nstr_menu_music_1 = 349\r\nstr_menu_music_2 = 350\r\nstr_anim_cheer = 351\r\nstr_anim_clap = 352\r\nstr_anim_raise_sword = 353\r\nstr_anim_hands_on_hips = 354\r\nstr_anim_hand_on_chest = 355\r\nstr_anim_arms_crossed = 356\r\nstr_anim_stand_still = 357\r\nstr_anim_surrender = 358\r\nstr_anim_nod_head = 359\r\nstr_anim_peeing = 360\r\nstr_anim_wave_hand = 361\r\nstr_anim_this_way = 362\r\nstr_anim_go_away = 363\r\nstr_anim_sit = 364\r\nstr_anim_sleeping = 365\r\nstr_anim_kneel = 366\r\nstr_anim_pray = 367\r\nstr_anim_beg = 368\r\nstr_anim_track = 369\r\nstr_anim_bow = 370\r\nstr_anim_lean_on_sword = 371\r\nstr_anim_pike_stance = 372\r\nstr_anim_away_vile_beggar = 373\r\nstr_anim_my_lord = 374\r\nstr_anim_almost_harvesting_season = 375\r\nstr_anim_whats_this_then = 376\r\nstr_anim_out_for_a_stroll_are_we = 377\r\nstr_anim_we_ride_to_war = 378\r\nstr_anim_less_talking_more_raiding = 379\r\nstr_anim_you_there_stop = 380\r\nstr_anim_war_cry = 381\r\nstr_anim_tear_you_limb_from_limb = 382\r\nstr_anim_better_not_be_a_manhunter = 383\r\nstr_anim_drink_from_your_skull = 384\r\nstr_anim_gods_will_decide_your_fate = 385\r\nstr_anim_nice_head_on_shoulders = 386\r\nstr_anim_hunt_you_down = 387\r\nstr_anim_dead_men_tell_no_tales = 388\r\nstr_anim_stand_and_deliver = 389\r\nstr_anim_your_money_or_your_life = 390\r\nstr_anim_have_our_pay_or_fun = 391\r\nstr_anim_word_about_purse_belongings = 392\r\nstr_anim_easy_way_or_hard_way = 393\r\nstr_anim_everything_has_a_price = 394\r\nstr_anim_slit_your_throat = 395\r\nstr_anim_horn_charge = 396\r\nstr_anim_horn_regroup = 397\r\nstr_anim_horn_retreat = 398\r\nstr_anim_lute_1 = 399\r\nstr_anim_lute_2 = 400\r\nstr_anim_lute_3 = 401\r\nstr_anim_lute_4 = 402\r\nstr_anim_lyre_1 = 403\r\nstr_anim_lyre_2 = 404\r\nstr_anim_lyre_3 = 405\r\nstr_anim_lyre_4 = 406\r\nstr_anim_flute_1 = 407\r\nstr_anim_flute_2 = 408\r\nstr_anim_flute_3 = 409\r\nstr_anim_flute_4 = 410\r\nstr_anim_vielle_1 = 411\r\nstr_anim_vielle_2 = 412\r\nstr_anim_vielle_3 = 413\r\nstr_anim_vielle_4 = 414\r\nstr_log_animation = 415\r\nstr_anim_giving_birth = 416\r\nstr_done = 417\r\nstr_use = 418\r\nstr_buy = 419\r\nstr_buy_sell = 420\r\nstr_buy_sell_craft = 421\r\nstr_take = 422\r\nstr_take_put_craft = 423\r\nstr_dont_have_enough_money = 424\r\nstr_cant_equip_item = 425\r\nstr_collect_reg1_gold = 426\r\nstr_rest = 427\r\nstr_rest_horse = 428\r\nstr_winch_lower = 429\r\nstr_winch_raise = 430\r\nstr_winch_drop = 431\r\nstr_destructible = 432\r\nstr_cut_down = 433\r\nstr_mine = 434\r\nstr_harvest = 435\r\nstr_prune = 436\r\nstr_burn = 437\r\nstr_take_a_seat = 438\r\nstr_destroy_s1 = 439\r\nstr_destroy_all_items_cart = 440\r\nstr_process_wood = 441\r\nstr_process_metal = 442\r\nstr_process_hammer_metal = 443\r\nstr_process_grind = 444\r\nstr_process_cook = 445\r\nstr_process_press = 446\r\nstr_process_brew = 447\r\nstr_process_tavern = 448\r\nstr_process_preserve = 449\r\nstr_process_spin = 450\r\nstr_process_weave = 451\r\nstr_process_cut = 452\r\nstr_process_leather = 453\r\nstr_stockpile = 454\r\nstr_stockpile_nearly_full = 455\r\nstr_stockpile_full = 456\r\nstr_export = 457\r\nstr_export_for_s1 = 458\r\nstr_import = 459\r\nstr_build = 460\r\nstr_access = 461\r\nstr_attach = 462\r\nstr_not_close_enough = 463\r\nstr_s0__s1_ = 464\r\nstr_drop_money_bag = 465\r\nstr_drop_all_money_bag = 466\r\nstr_deposit_money_chest = 467\r\nstr_withdraw_money_chest = 468\r\nstr_admin_cheat_money = 469\r\nstr_bank_withdraw = 470\r\nstr_bank_withdraw_5k = 471\r\nstr_bank_deposit = 472\r\nstr_bank_deposit_all = 473\r\nstr_no_bank_nearby = 474\r\nstr_too_close_to_bank_for_money_pouch = 475\r\nstr_no_money_chest_nearby = 476\r\nstr_cant_open_money_chest = 477\r\nstr_not_enough_money_in_chest = 478\r\nstr_chest_appears_lockpicked = 479\r\nstr_gold_reg2 = 480\r\nstr_buy_banner_faction = 481\r\nstr_stock_count_reg0 = 482\r\nstr_crafting_refund_reg0_reward_reg1 = 483\r\nstr_crafting_reward_reg1 = 484\r\nstr_selling_price_reg0 = 485\r\nstr_womens_clothes = 486\r\nstr_item_id = 487\r\nstr_spawn_s1 = 488\r\nstr_troop_not_available = 489\r\nstr_troop_train = 490\r\nstr_troop_assume_role = 491\r\nstr_troop_become = 492\r\nstr_troop_become_for = 493\r\nstr_troop_cost = 494\r\nstr_troop_strength_agility = 495\r\nstr_troop_weapon_proficiencies = 496\r\nstr_requires_strength_reg1 = 497\r\nstr_requires_power_draw_reg1 = 498\r\nstr_requires_power_throw_reg1 = 499\r\nstr_requires_shield_reg1 = 500\r\nstr_requires_riding_reg1 = 501\r\nstr_damage_reg1_speed_reg2 = 502\r\nstr_accuracy_reg1_reload_reg2 = 503\r\nstr_joined_the_s1 = 504\r\nstr_s0_joined_the_s1 = 505\r\nstr_must_leave_s1_first = 506\r\nstr_not_a_member_of_s1 = 507\r\nstr_inactive_faction_change = 508\r\nstr_s0_has_dismounted_a_s1 = 509\r\nstr_s0_has_mounted_a_s1 = 510\r\nstr_s1_captured_s2 = 511\r\nstr_your_faction_not_hostile_to_s1 = 512\r\nstr_your_faction_not_captured_required_points = 513\r\nstr_door_locked_by_s1 = 514\r\nstr_door_bolted = 515\r\nstr_chest_locked_by_s1 = 516\r\nstr_s0_killed_faction_member = 517\r\nstr_s0_killed_friendly_faction_member = 518\r\nstr_s0_has_been_outlawed = 519\r\nstr_you_have_been_outlawed = 520\r\nstr_your_outlaw_rating_now_reg1 = 521\r\nstr_scene_error_this_faction_is_not_active = 522\r\nstr_you_are_not_lord_of_s1 = 523\r\nstr_you_are_now_lord_of_s1 = 524\r\nstr_s10_now_lord_of_s1 = 525\r\nstr_s10_now_known_as_s1 = 526\r\nstr_banner_used_by_s1 = 527\r\nstr_s1_doesnt_need_merc = 528\r\nstr_s1_reign_supreme = 529\r\nstr_s1_now_hostile_towards_s10 = 530\r\nstr_s1_and_s10_made_peace = 531\r\nstr_lord_of_s1_withdraws_offer_of_peace = 532\r\nstr_lord_of_s1_offers_peace = 533\r\nstr_not_riding_necessary_horse = 534\r\nstr_already_attached_cart = 535\r\nstr_resource_required = 536\r\nstr_not_at_mast_or_rudder = 537\r\nstr_craft_not_skilled = 538\r\nstr_craft_wrong_resources = 539\r\nstr_no_horse = 540\r\nstr_too_wounded_to_rest = 541\r\nstr_too_hungry_to_rest = 542\r\nstr_horse_too_wounded_to_sell = 543\r\nstr_dismount_to_sell = 544\r\nstr_item_too_long_for_container = 545\r\nstr_cant_put_money_bag_in_container = 546\r\nstr_cant_put_ammo_in_container = 547\r\nstr_cant_put_non_ammo_in_container = 548\r\nstr_too_far_away_loot = 549\r\nstr_herd_animal_limit_reached = 550\r\nstr_s1_reveals_money_pouch_containing_about_reg1 = 551\r\nstr_you_reveal_money_pouch_to_s1 = 552\r\nstr_you_reveal_money_pouch_to_near_by_players = 553\r\nstr_s1_revealed_money_pouch_containing_reg1_to_s2 = 554\r\nstr_s1_revealed_money_pouch_containing_reg1_to_near_by_players_s0 = 555\r\nstr_s1_revealed_money_pouch_containing_reg1_to_near_by_players_none = 556\r\nstr_your_target_too_far_away = 557\r\nstr_no_target_selected = 558\r\nstr_chat_format = 559\r\nstr_send_message_to_players_nearby = 560\r\nstr_send_message_to_the_s11 = 561\r\nstr_change_name_of_your_faction = 562\r\nstr_send_admin_message = 563\r\nstr_send_admin_message_to_s1 = 564\r\nstr_admin_chat_format = 565\r\nstr_admin_chat_player_format = 566\r\nstr_admin_chat_to_player_format = 567\r\nstr_admin_announcement_format = 568\r\nstr_local_chat_log_format = 569\r\nstr_faction_chat_log_format = 570\r\nstr_s1_has_left_the_game_with_id_reg0 = 571\r\nstr_error_unable_to_find_link_scene_prop = 572\r\nstr_error_unlinked_scene_prop = 573\r\nstr_error_edit_mode_not_enabled = 574\r\nstr_no_more_unlinked_scene_props = 575\r\nstr_error_scene_prop_0_pw = 576\r\nstr_error_load_out_id_reg0_not_defined = 577\r\nstr_game_type_1_info = 578\r\nstr_game_type_2_info = 579\r\nstr_game_type_3_info = 580\r\nstr_game_type_4_info = 581\r\nstr_game_type_5_info = 582\r\nstr_pw_welcome = 583\r\nstr_pw_editor_welcome = 584\r\nstr_pw_editor_info = 585\r\nstr_pw_editor_values_info = 586\r\nstr_pw_editor_castle_names = 587\r\nstr_castle_names_numbers_format = 588\r\nstr_book_of_clothing = 589\r\nstr_book_of_weapons = 590\r\nstr_book_of_armor = 591\r\nstr_book_of_healing = 592\r\nstr_name_server_error_code_reg0 = 593\r\nstr_name_server_input_error_parameter_s0 = 594\r\nstr_name_server_invalid_response = 595\r\nstr_name_server_log_s10 = 596\r\nstr_kicked_using_other_players_name = 597\r\nstr_kicked_using_other_clan_tag = 598\r\nstr_kicked_using_invalid_name = 599\r\nstr_kicked_not_registered = 600\r\nstr_http_s1_password_s2_id_reg1_uid_reg2_name_s3 = 601\r\nstr_http_s0_admin = 602\r\nstr_name_server = 603\r\nstr_name_server_password = 604\r\nstr_scene_name_1 = 605\r\nstr_scene_name_2 = 606\r\nstr_scene_name_3 = 607\r\nstr_scene_name_4 = 608\r\nstr_scene_name_5 = 609\r\nstr_scene_name_6 = 610\r\nstr_scene_name_7 = 611\r\nstr_scene_name_8 = 612\r\nstr_scene_name_9 = 613\r\nstr_scene_name_10 = 614\r\nstr_scene_name_11 = 615\r\nstr_scene_name_12 = 616\r\nstr_scene_name_13 = 617\r\nstr_scene_name_14 = 618\r\nstr_scene_name_15 = 619\r\nstr_scene_name_16 = 620\r\nstr_scene_name_17 = 621\r\nstr_scene_name_18 = 622\r\nstr_scene_name_19 = 623\r\nstr_scene_name_20 = 624\r\nstr_scene_name_21 = 625\r\nstr_scene_name_22 = 626\r\nstr_scene_name_23 = 627\r\nstr_scene_name_24 = 628\r\nstr_scene_name_25 = 629\r\nstr_scene_name_26 = 630\r\nstr_scene_name_27 = 631\r\nstr_scene_name_28 = 632\r\nstr_scene_name_29 = 633\r\nstr_scene_name_30 = 634\r\nstr_scene_names_end = 635\r\nstr_castle_name_0 = 636\r\nstr_castle_name_1 = 637\r\nstr_castle_name_2 = 638\r\nstr_castle_name_3 = 639\r\nstr_castle_name_4 = 640\r\nstr_castle_name_5 = 641\r\nstr_castle_name_6 = 642\r\nstr_castle_name_7 = 643\r\nstr_castle_name_8 = 644\r\nstr_castle_name_9 = 645\r\nstr_castle_name_10 = 646\r\nstr_castle_name_11 = 647\r\nstr_castle_name_12 = 648\r\nstr_castle_name_13 = 649\r\nstr_castle_name_14 = 650\r\nstr_castle_name_15 = 651\r\nstr_castle_name_16 = 652\r\nstr_castle_name_17 = 653\r\nstr_castle_name_18 = 654\r\nstr_castle_name_19 = 655\r\nstr_castle_name_20 = 656\r\nstr_castle_name_21 = 657\r\nstr_castle_name_22 = 658\r\nstr_castle_name_23 = 659\r\nstr_castle_name_24 = 660\r\nstr_castle_name_25 = 661\r\nstr_castle_name_26 = 662\r\nstr_castle_name_27 = 663\r\nstr_castle_name_28 = 664\r\nstr_castle_name_29 = 665\r\nstr_castle_name_30 = 666\r\nstr_castle_name_31 = 667\r\nstr_castle_name_32 = 668\r\nstr_castle_name_33 = 669\r\nstr_castle_name_34 = 670\r\nstr_castle_name_35 = 671\r\nstr_castle_name_36 = 672\r\nstr_castle_name_37 = 673\r\nstr_castle_name_38 = 674\r\nstr_castle_name_39 = 675\r\nstr_castle_name_40 = 676\r\nstr_castle_name_41 = 677\r\nstr_castle_name_42 = 678\r\nstr_castle_name_43 = 679\r\nstr_castle_name_44 = 680\r\nstr_castle_name_45 = 681\r\nstr_castle_name_46 = 682\r\nstr_castle_name_47 = 683\r\nstr_castle_name_48 = 684\r\nstr_castle_name_49 = 685\r\nstr_castle_name_50 = 686\r\nstr_castle_name_51 = 687\r\nstr_castle_name_52 = 688\r\nstr_castle_name_53 = 689\r\nstr_castle_name_54 = 690\r\nstr_castle_name_55 = 691\r\nstr_castle_name_56 = 692\r\nstr_castle_name_57 = 693\r\nstr_castle_name_58 = 694\r\nstr_castle_name_59 = 695\r\nstr_castle_name_60 = 696\r\nstr_castle_name_61 = 697\r\nstr_castle_name_62 = 698\r\nstr_castle_name_63 = 699\r\nstr_castle_name_64 = 700\r\nstr_castle_name_65 = 701\r\nstr_castle_name_66 = 702\r\nstr_castle_name_69 = 703\r\nstr_castle_name_70 = 704\r\nstr_castle_name_71 = 705\r\nstr_castle_name_72 = 706\r\nstr_castle_name_73 = 707\r\nstr_castle_name_74 = 708\r\nstr_castle_name_75 = 709\r\nstr_castle_name_76 = 710\r\nstr_castle_name_77 = 711\r\nstr_castle_name_78 = 712\r\nstr_castle_name_79 = 713\r\nstr_castle_name_80 = 714\r\nstr_castle_name_81 = 715\r\nstr_castle_name_82 = 716\r\nstr_castle_name_83 = 717\r\nstr_castle_name_84 = 718\r\nstr_castle_name_85 = 719\r\nstr_castle_name_86 = 720\r\nstr_castle_name_87 = 721\r\nstr_castle_name_88 = 722\r\nstr_castle_name_89 = 723\r\nstr_castle_name_90 = 724\r\nstr_castle_name_91 = 725\r\nstr_castle_name_92 = 726\r\nstr_castle_name_93 = 727\r\nstr_castle_name_94 = 728\r\nstr_castle_name_95 = 729\r\nstr_castle_name_96 = 730\r\nstr_castle_name_97 = 731\r\nstr_castle_name_98 = 732\r\nstr_castle_name_99 = 733\r\nstr_castle_name_100 = 734\r\nstr_castle_name_101 = 735\r\nstr_castle_name_102 = 736\r\nstr_castle_name_103 = 737\r\nstr_castle_name_104 = 738\r\nstr_castle_name_105 = 739\r\nstr_castle_name_106 = 740\r\nstr_castle_name_107 = 741\r\nstr_castle_name_108 = 742\r\nstr_castle_name_109 = 743\r\nstr_castle_name_110 = 744\r\nstr_castle_name_111 = 745\r\nstr_castle_name_112 = 746\r\nstr_castle_name_113 = 747\r\nstr_castle_name_114 = 748\r\nstr_castle_name_115 = 749\r\nstr_castle_name_116 = 750\r\nstr_castle_name_117 = 751\r\nstr_castle_name_118 = 752\r\nstr_castle_name_119 = 753\r\nstr_castle_name_120 = 754\r\nstr_castle_name_121 = 755\r\nstr_castle_name_122 = 756\r\nstr_castle_name_123 = 757\r\nstr_castle_name_124 = 758\r\nstr_castle_name_125 = 759\r\nstr_castle_name_126 = 760\r\nstr_castle_name_127 = 761\r\nstr_castle_names_end = 762\r\nstr_attach_to_himself_log = 763\r\nstr_attach_to_horse_log = 764\r\nstr_detach_from_himself_log = 765\r\nstr_detach_from_horse_log = 766\r\nstr_log_use_money_bag = 767\r\nstr_log_spawn_money_bag = 768\r\nstr_log_money_chest_withdraw = 769\r\nstr_log_money_chest_deposit = 770\r\nstr_log_hit_door = 771\r\nstr_log_repair_door = 772\r\nstr_log_hit_chest = 773\r\nstr_log_repair_chest = 774\r\nstr_log_hit_ship = 775\r\nstr_log_drop_item = 776\r\nstr_log_pick_up_item = 777\r\nstr_log_put_item_in_inventory = 778\r\nstr_log_put_item_in_finventory = 779\r\nstr_log_take_item_from_inventory = 780\r\nstr_log_take_item_from_finventory = 781\r\nstr_log_hit_player = 782\r\nstr_log_hit_phorse = 783\r\nstr_log_hit_animal = 784\r\nstr_log_bump = 785\r\nstr_log_heal_player = 786\r\nstr_log_heal_phorse = 787\r\nstr_log_heal_animal = 788\r\nstr_log_loot_corpse = 789\r\nstr_log_s11_dropped_loot = 790\r\nstr_log_use_cart = 791\r\nstr_log_capture = 792\r\nstr_log_capture_secondary = 793\r\nstr_shield_hit_log = 794\r\nstr_log_equipment = 795\r\nstr_s1_kicked_s2_from_s3 = 796\r\nstr_s1_outlawed_s2_from_s3 = 797\r\nstr_s2_kicked_you_from_the_faction = 798\r\nstr_s2_outlawed_you_from_the_faction = 799\r\n","sub_path":"Alex_RP/New_Edits/ID_strings.py","file_name":"ID_strings.py","file_ext":"py","file_size_in_byte":22781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"626826948","text":"# Copyright 2014\n# The Cloudscaling Group, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nlogging.getLogger('boto').setLevel(logging.INFO)\nlogging.getLogger('paramiko').setLevel(logging.WARNING)\nLOG = logging.getLogger(__name__)\n\n\ndef detect_new_volume(proc_partitions, proc_partitions_new):\n devices = get_devices(proc_partitions)\n devices_new = get_devices(proc_partitions_new)\n devices_new -= devices\n return devices_new.pop()\n\n\ndef get_devices(proc_partitions):\n devices = set()\n for line in proc_partitions:\n items = [item for item in line.split(' ') if len(item) > 0]\n if len(items) > 0:\n devices.add(items[3])\n\n return devices\n","sub_path":"ec2api/tests/functional/obsolete/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"128659032","text":"import json\nfrom django.http import HttpResponse\nfrom django.views.generic import View\n\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.utils.decorators import method_decorator\n\nfrom ..settings import SCRUFFY_BLOG_SETTINGS\nfrom ..models import Post, PostElement\nfrom ..forms import PostForm, PostElementForm, PostImageForm\n\n\nclass PostDetail(View):\n \"\"\" Edit a blog post. \"\"\"\n\n base_template = 'admin/base_site.html'\n editor_template = 'scruffy_blog/writing/post_detail.html'\n cancel_url = \"admin:scruffy_blog_post_changelist\" # By name\n\n # You may swap these out for proxy models, but do so carefully.\n post_model = Post\n element_model = PostElement\n\n def __init__(self, *args, **kwargs):\n super(PostDetail, self).__init__(*args, **kwargs)\n self.base_template = kwargs.get('base_template') or self.base_template\n\n @property\n def blog(self):\n \"\"\"\n Return a blog if you want to lock the post to it.\n Return `None` and the user will select the blog.\n By default, this does nothing, but you can override it.\n \"\"\"\n return None\n\n @property\n def scruffy_cropper_url(self):\n return None\n\n @method_decorator(login_required)\n @method_decorator(permission_required('blog.change_post'))\n def dispatch(self, *args, **kwargs):\n return super(PostDetail, self).dispatch(*args, **kwargs)\n\n def get_forms(self):\n self.form = PostForm(self.request.POST or None, \n files=self.request.FILES or None,\n instance=self.post, \n prefix=\"post-core\", \n redirect_url=self.request.path, \n cropper_url=self.scruffy_cropper_url,\n blog=self.blog\n )\n self.embed_form = PostElementForm(post=self.post, ui=True)\n return self.form, self.embed_form\n\n def get_post(self):\n \"\"\" Get the post. \"\"\"\n obj_id = self.kwargs.get('obj_id')\n if obj_id:\n self.post = get_object_or_404(Post, id=obj_id)\n else:\n self.post = Post()\n return self.post\n\n def post(self, request, **kwargs):\n self.get_post()\n form, embed_form = self.get_forms()\n if form.is_valid():\n obj = form.save()\n element_forms = self.convert_json_to_elements(request.POST.get('export'), obj, files=request.FILES)\n\n element_ids = []\n elements = []\n\n # Save (and keep track of) all the forms.\n for e_form in element_forms:\n if not e_form.is_valid():\n return HttpResponse(unicode(e_form.errors))\n e_obj = e_form.save()\n elements.append(e_obj)\n element_ids.append(e_obj.id)\n\n # Wipe out all the unkept embeds\n PostElement.objects.filter(post_id=obj.id).exclude(id__in=element_ids).delete()\n messages.add_message(request, messages.SUCCESS, 'Saved! Keep writing!')\n obj.save()\n\n if str(obj.id) in request.path:\n return redirect(request.path)\n else:\n # This is a hack, since I really don't know where the edit page lives.\n return redirect(\"{}../{}\".format(request.path, str(obj.id)))\n return self.render()\n\n\n def get(self, request, *args, **kwargs):\n self.get_post()\n self.get_forms()\n return self.render()\n\n def render(self):\n return render(self.request, self.editor_template, {\n 'base_template': self.base_template,\n 'post': self.post,\n 'form': self.form,\n 'embed_form': self.embed_form,\n \"cancel_url\": self.cancel_url,\n })\n\n def convert_json_to_elements(self, export, post_obj, files={}):\n \"\"\"\n Turns the json into elements for a post.\n Returns a tuple of text_elements and embed_elements\n \"\"\"\n elements = []\n if not export:\n return elements\n data = json.loads( export )\n\n for key, params in data.items():\n instance = PostElement()\n if params.get('obj_id'):\n try: # Text nodes will always be new. Oh well.\n instance = PostElement.objects.get(id=params.get('obj_id'), post=post_obj)\n except PostElement.DoesNotExist:\n pass\n \n order = int(key.split('_')[-1])\n form_data = {\n 'obj_type' : params.get('obj_type') or 'text',\n 'content' : params.get('content') or '',\n 'src' : params.get('src') or '',\n 'post' : post_obj.id,\n 'order' : order,\n 'image' : params.get('image'),\n }\n bound_form = PostElementForm(form_data, instance=instance, post=post_obj)\n elements.append(bound_form)\n\n return elements\n\n\n@login_required\ndef upload_photo(request, obj_id=None, img_id=None):\n \"\"\"The awful async iframe uploader.\"\"\"\n\n form = PostImageForm(request.POST or None, files=request.FILES or None)\n resp = None\n\n if request.POST:\n if not obj_id:\n resp = {'errors':'Please save your post before adding images.'}\n resp = json.dumps(resp)\n if obj_id and form.is_valid():\n obj = form.save(commit=False)\n obj.post_id = obj_id\n obj.save()\n resp = {'filename': obj.image.url,\n 'thumb': obj.__unicode__(),\n 'id': obj.id,\n }\n resp = json.dumps(resp)\n\n return render(request, 'scruffy_blog/writing/upload_photo.html', {\n 'form': form,\n 'resp': resp,\n 'stylesheet': SCRUFFY_BLOG_SETTINGS['upload_photo_stylesheet'],\n })\n","sub_path":"scruffy_blog/views/writing.py","file_name":"writing.py","file_ext":"py","file_size_in_byte":5962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"340880834","text":"from typing import Dict\n\nfrom .queryset import QuerySet\nfrom json import load\nfrom os.path import exists\n\n\nclass HBOSConfig(object):\n\n # noinspection PyBroadException\n def __init__(self, configFile):\n config = None\n self._new_install = False\n self._querysets: Dict[str, QuerySet] = dict()\n if exists(configFile):\n try:\n with open(configFile, \"r\") as f:\n config = load(f)\n f.close()\n except:\n config = None\n self._new_install=True\n if config is None:\n self._new_install = True\n return\n\n for k in config[\"querysets\"]:\n self._querysets[k] = QuerySet(config[\"querysets\"][k])\n\n @property\n def querysets(self) -> Dict[str, QuerySet]:\n return self._querysets\n\n @querysets.setter\n def set_querysets(self, value: Dict[str, QuerySet]):\n self._querysets = value\n\n @property\n def is_new(self) -> bool:\n return self._new_install\n\n @is_new.setter\n def set_is_new(self, value: bool):\n self._new_install = value","sub_path":"hbos_server/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"555588067","text":"import json\nimport os\nimport sys\n\nimport requests\n\nAUTH_URL = \"https://accounts.spotify.com/api/token\"\nTOKEN_RELATIVE_PATH = \"spotify/access_data/token.json\"\nCREDENTIALS_RELATIVE_PATH = \"spotify/access_data/credentials.json\"\n\nclient_id = None\nclient_secret = None\ntoken = None\n\n\ndef credentialsProvided():\n if os.path.exists(CREDENTIALS_RELATIVE_PATH):\n return True\n return False\n\n\ndef loadCredentials():\n if not credentialsProvided():\n sys.exit()\n with open(CREDENTIALS_RELATIVE_PATH) as f:\n json_credentials = json.load(f)\n global client_id\n global client_secret\n client_id = json_credentials[\"client_id\"]\n client_secret = json_credentials[\"client_secret\"]\n\n\ndef tokenFileSaved():\n if os.path.exists(TOKEN_RELATIVE_PATH):\n return True\n return False\n\n\ndef loadToken():\n global token\n if token is not None:\n return\n if tokenFileSaved():\n with open(TOKEN_RELATIVE_PATH) as f:\n token = json.load(f)[\"token\"]\n else:\n getNewToken()\n\n\ndef requestAuthorization():\n if client_id is None and client_secret is None:\n loadCredentials()\n auth_response = requests.post(AUTH_URL, {\n \"grant_type\": \"client_credentials\",\n \"client_id\": client_id,\n \"client_secret\": client_secret,\n })\n return auth_response\n\n\ndef getNewToken():\n response = requestAuthorization()\n response_json = response.json()\n global token\n token = response_json[\"access_token\"]\n saveTokenTnJsonFile()\n return token\n\n\ndef saveTokenTnJsonFile():\n if token is not None:\n with open(TOKEN_RELATIVE_PATH, \"w\") as f:\n json.dump({\"token\": token}, f)\n","sub_path":"spotify/authorization.py","file_name":"authorization.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"466844452","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n#Script is a simple, object oriented, GUI calculator for DIY projects\n\n##It has 3 functions:\n \n #Cicrumference:\n \n #Supply length of raw planks and dimenstions of room to calculate how \n #many planks required to surround the room. Once calculation is complete\n #You may select \"Next room\", this will keep any offcuts left from this\n #calculation and attempt to use them in future \"rooms\", or \"New calc\"\n #which will discard any offcuts left from this calculation and start fresh.\n \n #Area:\n \n #Supply length and width of raw planks and dimenstions of room to \n #calculate how many planks required to cover the area of the room.\n #Once calculation is complete You may select \"Next room\", this will\n #keep any offcuts left from this calculation and attempt to use them\n #in future \"rooms\", or \"New calc\" which will discard any offcuts \n #left from this calculation and start fresh.\n \n #Custom:\n \n #Supply length and width of raw planks and length and width of any boards\n #you need to construct your project. This will find various combinations\n #of cuts from each plank and select the combinatoin with least wastage\n #from each. It will then tell you the minimum number of fresh planks\n #you need to complete your project\n\n\n\nimport tkinter\nfrom tkinter import StringVar, ttk\n\n\n##Class representing lengths of fresh wood to be used\nclass Plank:\n \n def __init__(self, length, width, woodstore):\n #Save woodstore so we can refer to it easily\n self.woodstore = woodstore\n \n #Define attributes of this length\n self.length = length\n self.width = width\n\n \n def cut_board(self, length, width, offcut):\n #Attempt to cut board of given size from this plank\n try:\n #Raise an exception if passed sizes are longet than this plank\n if length > self.length or width > self.width:\n raise ValueError\n \n #Cut board off this plank\n self.length = self.length - length\n \n #add cut board to list\n self.woodstore.boards.append(Board(length, width, self.woodstore))\n \n if offcut == False:\n #Add remainder of this plank to offcuts list\n if self.length > 0:\n self.woodstore.offcuts.append(self)\n\n \n #If width of board is less than width of this length add to offcut list\n if self.width - width > 0:\n self.woodstore.offcuts.append(Plank(length, self.width - width, self.woodstore))\n\n except ValueError:\n #If we have tried to cut a longer length than available inform user\n ##Shouldnt be able to trigger this\n Error_Message(\"You tried to cut off more than was available!!\")\n \n \n\n\n##Class representing boards cut for purpose\nclass Board:\n \n def __init__(self, length, width, woodstore):\n self.woodstore = woodstore\n self.length = length\n self.width = width\n\n\n#Class to represent physical storage of cut pieces\nclass WoodStore:\n \n def __init__(self):\n #Define properties and initialize store\n self.planks = None\n self.boards = None\n self.offcuts = None\n self.empty_store()\n \n \n #Reset store to starting values\n def empty_store(self):\n #Counter to keep track of how many planks we use\n self.planks = 0\n \n \n #Lists to hold cut-to-size boards and offcuts\n self.boards = []\n self.offcuts = []\n \n \n def get_plank(self, length, width):\n #Increase count of planks used and give out plank for use\n self.planks = self.planks + 1\n return Plank(length, width, self)\n \n\n\n#Class containing 2 option \"confirmation\" popup message\nclass Confirmation_Message:\n \n def __init__(self, parent, text):\n #Define confirmation popup window\n self.popup = tkinter.Toplevel()\n self.popup.geometry(\"450x150\")\n self.popup.title(\"Continue?\")\n self.popup.resizable(0, 0)\n \n #Define frame to hold text\n self.popup_frame = tkinter.Frame(\n self.popup, padx=30, pady=15)\n \n self.popup_frame.grid(\n row=1, column=1)\n \n #Define frame to hold buttons\n self.popup_button_frame = tkinter.Frame(\n self.popup, padx=30, pady=10)\n \n self.popup_button_frame.grid(\n row=2, column=1)\n \n #Define spacer for between buttons\n self.popup_spacer = tkinter.Frame(\n self.popup_button_frame, width=180)\n \n self.popup_spacer.grid(\n row=2, column=2)\n \n #Define warning message\n self.popup_label = tkinter.Label(\n self.popup_frame, text=text)\n \n self.popup_label.grid(\n row=1, column=1, columnspan=3)\n \n #Define buttons\n self.popup_cancel = tkinter.Button(\n self.popup_button_frame, text=\"Next room\")\n \n self.popup_cancel.grid(\n row=2, column=1)\n \n self.popup_ok = tkinter.Button(\n self.popup_button_frame, text=\"New calc\")\n \n self.popup_ok.grid(\n row=2, column=3)\n \n #Destructor to completely delete popup\n def close(self):\n self.popup.destroy()\n \n\nclass Error_Message:\n\n def ok(self):\n self.warning.destroy()\n \n def __init__(self, text):\n \n #Define error popup\n self.warning = tkinter.Toplevel()\n self.warning.geometry(\"275x100\")\n self.warning.resizable(0, 0)\n self.warning.title(\"Error\")\n \n #Define frame to hold widgets\n self.warning_frame = tkinter.Frame(\n self.warning, padx=20, pady=10,)\n \n self.warning_frame.pack()\n \n #Only insert spacer if there is only one line of text in the warning message\n if len(text.split(\"\\n\")) < 2 :\n #Define spacer for between text and button\n self.warning_spacer = tkinter.Frame(\n self.warning_frame, height=10,)\n \n self.warning_spacer.grid(\n row=2, column=2)\n \n #Define warning message\n self.warning_label = tkinter.Label(\n self.warning_frame, text=text, wraplength=200)\n \n self.warning_label.grid(\n row=1, column=1, columnspan=3)\n \n #Define ok button\n self.warning_ok = tkinter.Button(\n self.warning_frame, text=\"Ok\", command=self.ok)\n \n self.warning_ok.grid(\n row=3, column=2)\n \n\n\nclass Area_View:\n \n def __init__(self, root, welcome):\n self.root = root\n self.welcome = welcome\n self.woodstore = self.welcome.woodstore\n \n #Make sure woodstore is empty\n self.woodstore.empty_store()\n \n \n self.frame = tkinter.Frame(self.root)\n self.frame.pack(fill=\"both\", expand=True)\n \n \n self.top_spacer = tkinter.Frame(self.frame, width=60, height=30)\n self.top_spacer.grid(row=0, column=0)\n \n self.title_label = tkinter.Label(self.frame, text=\"Calculate area\")\n self.title_label.grid(row=1, column=1, pady=10, columnspan=2)\n \n self.heading_label = tkinter.Label(self.frame,\\\n text=\"Please enter details below\")\n self.heading_label.grid(row=2, column=1, columnspan=2, pady=10)\n \n \n self.width_label = tkinter.Label(self.frame, text=\"Width of room: \")\n self.width_label.grid(row=3, column=1, pady=10)\n\n self.width_entry = tkinter.Entry(self.frame)\n self.width_entry.grid(row=3, column=2, pady=10)\n \n self.width_unit_label = tkinter.Label(self.frame, text=\"mm\")\n self.width_unit_label.grid(row=3, column=3, pady=10)\n \n \n self.length_label = tkinter.Label(self.frame, text=\"Length of room: \")\n self.length_label.grid(row=4, column=1, pady=10)\n\n self.length_entry = tkinter.Entry(self.frame)\n self.length_entry.grid(row=4, column=2, pady=10)\n \n self.length_unit_label = tkinter.Label(self.frame, text=\"mm\")\n self.length_unit_label.grid(row=4, column=3, pady=10)\n \n \n self.plank_width_label = tkinter.Label(self.frame, text=\"Width of planks: \")\n self.plank_width_label.grid(row=5, column=1, pady=10)\n\n self.plank_width_entry = tkinter.Entry(self.frame)\n self.plank_width_entry.grid(row=5, column=2, pady=10)\n \n self.plankw_unit_label = tkinter.Label(self.frame, text=\"mm\")\n self.plankw_unit_label.grid(row=5, column=3, pady=10)\n \n \n self.plank_length_label = tkinter.Label(self.frame, text=\"Length of planks: \")\n self.plank_length_label.grid(row=6, column=1, pady=10)\n\n self.plank_length_entry = tkinter.Entry(self.frame)\n self.plank_length_entry.grid(row=6, column=2, pady=10)\n \n self.plankl_unit_label = tkinter.Label(self.frame, text=\"mm\")\n self.plankl_unit_label.grid(row=6, column=3, pady=10)\n \n self.direction_label = tkinter.Label(self.frame, text=\"Which direction are you laying boards?\")\n self.direction_label.grid(row=7, column=1, columnspan=2, pady=10)\n \n self.direction_options = [\"Lengthwise\", \"Widthwise\"]\n self.direction = StringVar()\n \n self.direction_menu = ttk.Combobox(\n self.frame, textvariable=self.direction,\\\n values=self.direction_options, width=15)\n self.direction_menu.grid(row=8, column=2, pady=10)\n self.direction_menu.set(\"Lengthwise\")\n \n self.back_button = tkinter.Button(self.frame, text=\"Back\", command=self.welcome.back, width=12, height=3)\n self.back_button.grid(row=9, column=1, pady=20)\n \n \n self.calculate_button = tkinter.Button(self.frame, text=\"Calculate\", command=self.calculate, width=12, height=3)\n self.calculate_button.grid(row=9, column=2, columnspan=2, pady=20, sticky=\"e\")\n \n\n\n def add_room(self):\n #Close popup and leave woodstore intact to continue calculating\n self.info.close()\n \n def new_calc(self):\n #Empty woodstore for next calculation\n self.woodstore.empty_store()\n self.info.close()\n \n\n def calculate(self):\n self.error = False\n #check user has supplied all relevant information\n try:\n self.room_length = int(self.length_entry.get())\n self.room_width = int(self.width_entry.get())\n self.plank_length = int(self.plank_length_entry.get())\n self.plank_width = int(self.plank_width_entry.get())\n \n \n #If any details have been given in incorrect format alert user and halt processing\n except:\n Error_Message(\"Please check your input!\")\n self.error = True\n\n \n #If no errors gathering information we can proceed\n if self.error == False:\n \n #Send data to function to fill given area, accounting for direction of boards\n \n #If boards to be layed lengthwise in room\n if self.direction.get() == \"Lengthwise\":\n self.fill_area(self.room_length, self.room_width)\n \n \n #If boards to be layed widthwise in room\n else:\n self.fill_area(self.room_width, self.room_length)\n \n #Display result to user\n self.info = Confirmation_Message(self, \"You need {} planks!\".format(self.woodstore.planks))\n #Configure \"next room\" button\n self.info.popup_cancel.config(command=self.add_room)\n #Configure \"new calculation button\n self.info.popup_ok.config(command = self.new_calc)\n\n\n\n def fill_area(self, length, width):\n \n while width > 0:\n #Set length for next strip of boards\n this_strip = length\n \n while this_strip > 0:\n #If there is enough room to cut a full width board\n if width >= self.plank_width:\n \n \n #If enough space for a full plank take one from store and reduce length of this strip accordingly\n if this_strip > self.plank_length:\n self.woodstore.get_plank(self.plank_length, self.plank_width)\n this_strip = this_strip - self.plank_length\n \n else:\n #Check offcuts to see if we can use one of them\n for i in self.woodstore.offcuts:\n if i.length > this_strip and this_strip > 0 and self.plank_width == i.width:\n i.cut_board(this_strip, self.plank_width, True)\n\n #if we have used all of this offcut remove it from the list\n if i.length == 0:\n self.woodstore.offcuts.remove(i)\n this_strip = 0\n \n \n #If we didnt have an offcut to use cut from a new plank \n if this_strip > 0 and this_strip <= self.plank_length:\n #get fresh plank from woodstore\n this_plank = self.woodstore.get_plank(self.plank_length, self.plank_width)\n \n #Cut plank to needed size and keep offcut for use\n this_plank.cut_board(this_strip, self.plank_width, False)\n\n this_strip = 0\n \n #reduce width to cover by width of boards cut\n width = width - self.plank_width\n \n #If there is not enough width left for a full plank\n else:\n #If theres enough space for a full plank cut one to width\n if this_strip > self.plank_length:\n this_plank = self.woodstore.get_plank(self.plank_length, self.plank_width)\n this_plank.cut_board(self.plank_length, width, False)\n this_strip = this_strip - self.plank_length\n \n #If we have completed the last strip reduce width to 0\n if this_strip == 0:\n width = 0\n \n \n else:\n #Check offcuts to see if we can use one of them\n for i in self.woodstore.offcuts:\n if i.length >= this_strip and this_strip > 0 and i.width >= width:\n i.cut_board(this_strip, width, True)\n\n #if we have used all of this offcut remove it from the list\n if i.length == 0:\n self.woodstore.offcuts.remove(i)\n this_strip = 0\n width = 0\n \n \n #If we didnt have an offcut to use cut from a new plank \n if this_strip > 0 and this_strip <= self.plank_length:\n #get fresh plank from woodstore\n this_plank = self.woodstore.get_plank(self.plank_length, self.plank_width)\n \n #Cut plank to needed size and keep offcut for use\n this_plank.cut_board(this_strip, width, False)\n\n this_strip = 0\n width = 0\n\n \n\n\n\n\n\nclass Circumference_View:\n \n def __init__(self, root, welcome):\n self.root = root\n self.welcome = welcome\n self.woodstore = self.welcome.woodstore\n \n #Make sure woodstore is empty\n self.woodstore.empty_store()\n \n \n self.frame = tkinter.Frame(self.root)\n self.frame.pack(fill=\"both\", expand=True)\n \n self.top_spacer = tkinter.Frame(self.frame, width=50, height=30)\n self.top_spacer.grid(row=0, column=0, pady=10)\n \n self.title_label = tkinter.Label(self.frame, text=\"Calculate circumference\")\n self.title_label.grid(row=1, column=1, pady=10, columnspan=2)\n \n self.heading_label = tkinter.Label(self.frame,\\\n text=\"Please enter details below\")\n self.heading_label.grid(row=2, column=1, columnspan=2, pady=10)\n \n \n self.width_label = tkinter.Label(self.frame, text=\"Width of room: \")\n self.width_label.grid(row=3, column=1, pady=10)\n\n self.width_entry = tkinter.Entry(self.frame)\n self.width_entry.grid(row=3, column=2, pady=10)\n \n self.width_unit_label = tkinter.Label(self.frame, text=\"mm\")\n self.width_unit_label.grid(row=3, column=3, pady=10)\n \n \n self.length_label = tkinter.Label(self.frame, text=\"Length of room: \")\n self.length_label.grid(row=4, column=1, pady=10)\n\n self.length_entry = tkinter.Entry(self.frame)\n self.length_entry.grid(row=4, column=2, pady=10)\n \n self.length_unit_label = tkinter.Label(self.frame, text=\"mm\")\n self.length_unit_label.grid(row=4, column=3, pady=10)\n \n \n self.plank_length_label = tkinter.Label(self.frame, text=\"Length of planks: \")\n self.plank_length_label.grid(row=5, column=1, pady=10)\n\n self.plank_length_entry = tkinter.Entry(self.frame)\n self.plank_length_entry.grid(row=5, column=2, pady=10)\n \n self.plank_unit_label = tkinter.Label(self.frame, text=\"mm\")\n self.plank_unit_label.grid(row=5, column=3, pady=10)\n\n \n self.back_button = tkinter.Button(self.frame, text=\"Back\", command=self.welcome.back, width=12, height=3)\n self.back_button.grid(row=6, column=1, pady=20)\n \n self.calculate_button = tkinter.Button(self.frame, text=\"Calculate\", command = self.calculate, width=12, height=3)\n self.calculate_button.grid(row=6, column=2, columnspan=2, pady=20, sticky=\"e\")\n \n \n\n\n def calculate(self):\n \n self.error = False\n #check user has supplied all relevant information\n try:\n self.room_length = int(self.length_entry.get())\n self.room_width = int(self.width_entry.get())\n self.plank_length = int(self.plank_length_entry.get())\n \n \n #If any details have been given in incorrect format alert user and halt processing\n except:\n Error_Message(\"Please enter whole numbers only!\")\n self.error = True\n\n \n #If no errors gathering information we can proceed\n if self.error == False:\n #Cut boards for 2 longer sides\n for i in range(2):\n wall = self.room_length\n self.cut_for_wall(wall)\n \n #Cut booards for 2 shorter sides\n for i in range(2):\n wall = self.room_width\n self.cut_for_wall(wall)\n \n \n self.info = Confirmation_Message(self, \"You need {} planks!\".format(self.woodstore.planks))\n #Configure \"next room\" button\n self.info.popup_cancel.config(command=self.add_room)\n #Configure \"new calculation button\n self.info.popup_ok.config(command = self.new_calc)\n \n\n\n\n \n \n def add_room(self):\n #Close popup and leave woodstore intact to continue calculating\n self.info.close()\n \n def new_calc(self):\n #Empty woodstore for next calculation\n self.woodstore.empty_store()\n self.info.close()\n \n \n \n def cut_for_wall(self, wall):\n while wall > 0:\n #If wall length is longer than plank length take whole plank and reduce length of wall\n #to calculate accordingly\n if wall > self.plank_length:\n self.woodstore.get_plank(self.plank_length, 0)\n wall = wall - self.plank_length\n \n \n #if plank is long enough to cover remaining section of wall \n else:\n #Check offcuts to see if we can use one of them\n for i in self.woodstore.offcuts:\n if i.length > wall and wall > 0:\n i.cut_board(wall, 0, True)\n\n #if we have used all of this offcut remove it from the list)\n if i.length == 0:\n self.woodstore.offcuts.remove(i)\n wall = 0\n \n \n #If we didnt have an offcut to use cut from a new plank \n if wall > 0:\n #get fresh plank from woodstore\n this_plank = self.woodstore.get_plank(self.plank_length, 0)\n \n #Cut plank to needed size and keep offcut for use\n this_plank.cut_board(wall, 0, False)\n\n wall = 0\n \nclass Custom_View:\n \n def __init__(self, root, welcome):\n self.root = root\n self.welcome = welcome\n self.woodstore = self.welcome.woodstore\n \n #Make sure woodstore is empty\n self.woodstore.empty_store()\n \n #Counter to keep track of next available row of widgets\n self.row_counter = 2\n #dictionary to store generated widgets in using row number as key\n self.widgets = {}\n #List to hold initial row of board input widgets\n row0 = []\n\n\n self.container = tkinter.Frame(self.root)\n \n self.canvas = tkinter.Canvas(\n self.container, width=435, height=600) \n \n #Define scrollbar\n self.scrollbary = tkinter.Scrollbar(\n self.container, orient=\"vertical\",\\\n command=self.canvas.yview)\n\n #Create the top frame to hold widgets \n self.frame = tkinter.Frame(self.canvas)\n \n \n #Define scrollable region of canvas\n self.frame.bind(\n \"\", lambda e:\n self.canvas.configure(\n scrollregion=self.canvas.bbox(\n \"all\")))\n self.canvas.create_window(\n (0, 0), window=self.frame, anchor=\"nw\")\n \n #Configure the commands for interacting with scrollbars\n self.canvas.configure(\n yscrollcommand=self.scrollbary.set)\n\n \n #attach container to parent\n self.container.pack()\n \n \n #Define positioning of scrollbars\n self.scrollbary.grid(\n row=1, column=2, sticky=\"ns\")\n \n #pack canvas into container and span full area of container\n self.canvas.grid(\n row=1, column=1, sticky=\"nsew\")\n \n \n self.top_spacer = tkinter.Frame(self.frame, width=60, height=30)\n self.top_spacer.grid(row=0, column=0)\n\n self.title_label = tkinter.Label(self.frame, text=\"Custom project\")\n self.title_label.grid(row=1, column=1, pady=10, columnspan=2)\n \n self.heading_label = tkinter.Label(self.frame,\\\n text=\"Please enter sizes of raw plank of timber\")\n self.heading_label.grid(row=2, column=1, columnspan=2, pady=10)\n \n\n self.plank_length_label = tkinter.Label(self.frame, text=\"Length of planks: \")\n self.plank_length_label.grid(row=3, column=1, pady=10)\n\n self.plank_length_entry = tkinter.Entry(self.frame)\n self.plank_length_entry.grid(row=3, column=2, pady=10)\n \n self.plankl_unit_label = tkinter.Label(self.frame, text=\"mm\")\n self.plankl_unit_label.grid(row=3, column=3, pady=10)\n \n self.plank_width_label = tkinter.Label(self.frame, text=\"Width of planks: \")\n self.plank_width_label.grid(row=4, column=1, pady=10)\n\n self.plank_width_entry = tkinter.Entry(self.frame)\n self.plank_width_entry.grid(row=4, column=2, pady=10)\n \n self.plankw_unit_label = tkinter.Label(self.frame, text=\"mm\")\n self.plankw_unit_label.grid(row=4, column=3, pady=10)\n\n self.table_heading = tkinter.Label(self.frame, text=\"Please enter boards you need for your project\")\n self.table_heading.grid(row=5, column=1, columnspan=3, pady=10)\n \n #Frame to hold table of dynamic entries\n self.board_frame = tkinter.Frame(self.frame)\n self.board_frame.grid(row=6, column=1, columnspan=3, sticky=\"nsew\")\n \n \n self.length_heading = tkinter.Label(self.board_frame, text=\"L\")\n self.length_heading.grid(row=0, column=1, pady=10)\n \n self.length_entry = tkinter.Entry(self.board_frame, width=7)\n self.length_entry.grid(row=1, column=1, pady=10, padx=5)\n row0.append(self.length_entry)\n \n self.x_label = tkinter.Label(self.board_frame, text=\"X\")\n self.x_label.grid(row=1, column=2, pady=10, padx=5)\n row0.append(self.x_label) \n \n self.width_heading = tkinter.Label(self.board_frame, text=\"W\")\n self.width_heading.grid(row=0, column=3, pady=10)\n \n self.width_entry = tkinter.Entry(self.board_frame, width=7)\n self.width_entry.grid(row=1, column=3, pady=10, padx=5)\n row0.append(self.width_entry)\n \n self.unit_label = tkinter.Label(self.board_frame, text=\"mm\")\n self.unit_label.grid(row=1, column=4, pady=10, padx=5)\n row0.append(self.unit_label)\n \n \n \n #Add initially generated row of board input widgets to dictionary\n self.widgets[0] = row0\n \n \n #########Here will be generated rows for adding more boards\n \n \n \n self.row_button = tkinter.Button(self.frame, text=\"+\", command=self.add_row, height=3, width=5)\n self.row_button.grid(row=7, column=1, pady=10, sticky=\"e\")\n \n self.back_button = tkinter.Button(self.frame, text=\"Back\", command=self.welcome.back, width=12, height=3)\n self.back_button.grid(row=8, column=1, pady=10)\n \n self.calculate_button = tkinter.Button(self.frame, text=\"Calculate\", command=self.calculate, width=12, height=3)\n self.calculate_button.grid(row=8, column=2, columnspan=2, pady=10, sticky=\"e\")\n \n \n\n \n \n #Add another row of widgets to display\n def add_row(self):\n \n \n #Define list to hold widgets\n this_row = []\n \n #Define widgets for this row, add each to list\n length_entry = tkinter.Entry(self.board_frame, width=7)\n length_entry.grid(row=self.row_counter, column=1, pady=10)\n this_row.append(length_entry)\n \n x_label = tkinter.Label(self.board_frame, text=\"X\")\n x_label.grid(row=self.row_counter, column=2, pady=10)\n this_row.append(x_label)\n \n width_entry = tkinter.Entry(self.board_frame, width=7)\n width_entry.grid(row=self.row_counter, column=3, pady=10)\n this_row.append(width_entry)\n \n unit_label = tkinter.Label(self.board_frame, text=\"mm\")\n unit_label.grid(row=self.row_counter, column=4, pady=10)\n this_row.append(unit_label)\n \n \n remove_button = tkinter.Button(self.board_frame, text=\"-\", command=lambda x=self.row_counter:self.del_row(x))\n remove_button.grid(row=self.row_counter, column=5, pady=10)\n this_row.append(remove_button)\n \n #Store list of widgets in dictionary using row number as key\n self.widgets[self.row_counter] = this_row\n \n #Increase row counter for next row\n self.row_counter = self.row_counter + 1\n \n def add_room(self):\n #Close popup and leave woodstore intact to continue calculating\n self.info.close()\n \n def new_calc(self):\n #Empty woodstore for next calculation\n self.woodstore.empty_store()\n self.info.close()\n\n \n def del_row(self, i):\n #Loop over list of widgets stored under passed row number\n #And destroy all\n for j in self.widgets[i]:\n j.destroy()\n #Remove entry from dictionary to avoid errors when reading in data\n del self.widgets[i]\n \n\n def calculate(self):\n\n #Function to sort list of boards\n def sort_func(x):\n return x.length\n \n #Bool flag to toggle if any errors reading in data\n self.error = False\n \n #Declare list to hold boards we need to cut\n self.boards = []\n \n \n try:\n \n \n ###Read in data from widgets, halt processing if any errors encountered\n \n #pull info on size of raw lengths\n self.raw_length = int(self.plank_length_entry.get())\n self.raw_width = int(self.plank_width_entry.get())\n \n \n #create board objects from supplied dimensions, hold in list\n for value in self.widgets.values():\n \n \n #Check the sizes of board required are within sizes of raw plank\n if int(value[0].get()) <= self.raw_length and int(value[2].get()) <= self.raw_width:\n self.boards.append(Board(int(value[0].get()), int(value[2].get()), self.woodstore))\n \n \n #If above failed check if the supplied lehgth and width for this board wave been reversed\n elif int(value[2].get()) <= self.raw_length and int(value[0].get()) <= self.raw_width:\n self.boards.append(Board(int(value[2].get()), int(value[0].get()), self.woodstore))\n \n \n else:\n #If we couldnt get this board to fit within supplied dimensions for raw plank\n #Inform user and halt processing\n Error_Message(\"One of your boards is too big to cut from these planks!\")\n self.error = True\n \n\n \n \n except:\n Error_Message(\"Please enter whole numbers only!\")\n self.error = True\n \n ###Provided there were no errors encountered building data, sort list of boards to cut by length \n \n #If we ran into any problems gathering data halt processing\n if self.error == False:\n #sort list of boards by length, largest to smallest\n self.boards.sort(reverse=True, key=sort_func) \n \n ##While list contains something##\n while len(self.boards) > 0:\n \n\n \n \n \n ###Start with longest board that needs cut, cut this from fresh plank and remove from list of\n ###boards to be cut\n \n \n #Check offcuts to see if any will fit this board \n found = False\n for i in self.woodstore.offcuts:\n if i.length >= self.boards[0].length:\n i.cut_board(self.boards[0].length, self.boards[0].width, True)\n found = True\n this_length = i\n \n\n \n \n \n #If there wasnt an offcut that fit this board just take a new plank from stores\n if found == False:\n #take length from woodstore, cut longest board in list\n this_length = self.woodstore.get_plank(self.raw_length, self.raw_width)\n this_length.cut_board(self.boards[0].length, self.boards[0].width, False)\n \n \n\n \n \n \n #Delete board we just cut from list of required boards\n del self.boards[0]\n \n \n ###Loop over remaining boards and see if any can be cut from remainder of this plank, save any that fit\n ###into list and save list and reference to starting board in dictionaries (stored as 2 dictionaries with\n ###starting board reference as key to avoid issues if more than one combination gives the same wastage)\n \n \n ###Repeat this process until we have lists of every combination of boards we can cut from\n ###the remainder of this plank\n \n \n #Dictionary to hold results from passes over boards\n attempts = {}\n results = {}\n \n \n for i in range(len(self.boards)):\n #Save remaining length in this plank\n remaining_length = this_length.length\n \n #List to hold boards for this pass\n this_pass = []\n \n \n #look at next longest board and see if there's enough left for it\n if self.boards[i].length <= remaining_length:\n remaining_length = remaining_length - self.boards[i].length\n #Save index reference for board\n this_pass.append(self.boards[i])\n \n #Loop over remaining boards and see if any more will fit\n for j in range(len(self.boards) - i):\n if self.boards[j+i].length <= remaining_length:\n remaining_length = remaining_length - self.boards[j+i].length\n #Save index reference for board\n this_pass.append(self.boards[j+i])\n \n \n #Save list of results in dictionary using starting board index as key\n attempts[i] = this_pass\n results[i] = remaining_length\n\n\n\n\n \n ###Examine combinations we found and see which one leaves us the least left over material\n ###Then cut these boards and remove them from list of boards to be cut\n\n\n\n\n \n wastage = 999999\n permutation = None\n #Make sure we found something before trying to process\n if attempts != {}:\n #Check results to find least wastage\n for key, value in results.items():\n if value < wastage:\n wastage = value\n permutation = key #key is the key of the combination of boards we want to cut from plank\n \n\n \n\n \n #Loop over boards in chosen permutation and cut them from plank\n for i in range(len(attempts[permutation])):\n #Delete board from list of boards to be cut\n try:\n self.boards.remove(attempts[permutation][i])\n this_length.cut_board(attempts[permutation][i].length, attempts[permutation][i].width, False)\n\n #In case we try to delete a board that isnt in the list anymore\n #Shouldnt happen \n except:\n \n pass\n \n \n \n self.info = Error_Message( \"You need {} planks!\".format(self.woodstore.planks))\n \n \n \n\n\nclass Welcome_view:\n \n def __init__(self, root):\n \n self.root = root\n \n #Initialize woodstore so we can store our wood\n self.woodstore = WoodStore()\n \n #Create display\n self.create_view()\n \n def create_view(self):\n \n self.frame = tkinter.Frame(self.root)\n self.frame.pack(fill=\"both\", expand=True) \n \n self.top_spacer = tkinter.Frame(self.frame, width=80, height=50)\n self.top_spacer.grid(row=0, column=0)\n \n self.heading_label = tkinter.Label(self.frame, text=\"What are you trying to calculate today?\")\n self.heading_label.grid(row=1, column=1, pady=20)\n \n self.area_button = tkinter.Button(self.frame, text=\"Area\", command=self.area, width=12, height=3)\n self.area_button.grid(row=2, column=1, pady=20)\n \n self.circumference_button = tkinter.Button(self.frame, text=\"Circumference\", command=self.circumference, width=12, height=3)\n self.circumference_button.grid(row=3, column=1, pady=20)\n \n self.custom_button = tkinter.Button(self.frame, text=\"Custom\", command=self.custom, width=12, height=3)\n self.custom_button.grid(row=4, column=1, pady=20)\n \n\n def destroy_view(self):\n for widget in self.root.winfo_children():\n widget.destroy()\n \n def area(self):\n self.destroy_view()\n Area_View(self.root, self) \n \n def circumference(self):\n self.destroy_view()\n Circumference_View(self.root, self)\n \n def custom(self):\n self.destroy_view()\n Custom_View(self.root, self)\n \n def back(self):\n #Destroy current view and re-create welcome screen\n self.destroy_view()\n self.create_view()\n\n\n\n\n\n\n\n#Define root window\nroot = tkinter.Tk()\n\nroot.title(\"Board calculator V1.0\")\nroot.geometry(\"450x600\")\n\nview = Welcome_view(root)\n\n\nif __name__ == \"__main__\":\n root.mainloop()","sub_path":"board_calculator.py","file_name":"board_calculator.py","file_ext":"py","file_size_in_byte":38918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"329404467","text":"import logging\n\nimport discord\nfrom discord.ext import commands\nfrom motor.motor_asyncio import AsyncIOMotorClient\n\nlogger = logging.getLogger(\"DDog\")\n\n\nclass DDog(commands.AutoShardedBot):\n def __init__(self, *args, **kwargs):\n if kwargs.get(\"mongourl\"):\n logger.info(\"URL para MongoDB detectada, intentando conexion\")\n self.db = AsyncIOMotorClient(kwargs.get(\"mongourl\"))\n else:\n self.db = None\n\n if \"mongourl\" in kwargs:\n kwargs.pop(\"mongourl\")\n\n super().__init__(*args, **kwargs)\n\n self.remove_command(\"help\")\n\n self.add_command(self.stop)\n self.add_command(self._help)\n\n async def on_command(self, ctx):\n logger.info(\"Commando por {0.name} - {0.id}: {1.content}\".format(ctx.author, ctx.message))\n\n async def on_ready(self):\n logger.info(\"Listo para trabajar con el prefijo '{0}'\".format(self.command_prefix))\n\n game = discord.Game(\"{0}help\".format(self.command_prefix))\n await self.change_presence(activity=game)\n\n @commands.command(alias=[\"detener\", \"cerrar\"])\n @commands.is_owner()\n async def stop(self, ctx):\n \"\"\"Detiene al Bot (en el caso de Heroku, lo reinicia)\"\"\"\n await ctx.send(\"Adios!\")\n await self.logout()\n\n @commands.command(name=\"help\")\n async def _help(self, ctx, *, uinput=None):\n \"\"\"Muestra este mensaje de ayuda\"\"\"\n coms = []\n\n for com in self.commands:\n try:\n await com.can_run(ctx)\n except commands.CommandError:\n pass\n else:\n coms.append(com)\n\n coms = sorted(coms, key=lambda command: command.name)\n\n index = 0\n\n if uinput:\n if uinput.isnumeric():\n uinput = int(uinput)\n\n ifst = 5 * index\n ilst = 5 * index + 5\n\n if ifst >= len(coms):\n await ctx.send(\"La pagina de ayuda no es valida.\")\n return\n if ilst > len(coms):\n ilst = len(coms)\n\n coms = coms[ifst:ilst]\n\n embed = discord.Embed(title=\"Comandos de {0}\".format(ctx.me.name), colour=0x7FD935)\n for command in coms:\n embed.add_field(name=self.command_prefix + command.name,\n value=command.help.splitlines()[0], inline=False)\n\n await ctx.send(embed=embed)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"391012937","text":"import serial\r\nimport json\r\nimport os\r\nimport random\r\n##from twilio.rest import Client\r\nimport time\r\nimport requests\r\nimport pygame\r\n\r\n\r\n\r\nser = serial.Serial('COM24', 115200)\r\n\r\nprint (\"connected to: \" + ser.portstr)\r\n\r\n\r\nwhile True:\r\n line = ser.readline()\r\n print(\"read a line\")\r\n line = line.decode('utf8')\r\n ##line = line [2:13]\r\n ##line = line.replace(\" \", \"\")\r\n line=line.rstrip()\r\n ##print(line)\r\n\r\n if line.count(\"#\") != 2:\r\n print (line.count(\"##\"))\r\n continue\r\n if line.count(\"?\") != 2:\r\n continue\r\n line = line [2:30]\r\n line = line.replace(\"?\", \"\")\r\n ##print(line)\r\n\r\n words = line.split(',')\r\n if \"0.00\" in words or len(words) != 5:\r\n continue\r\n \r\n for word in words:\r\n print(word)\r\n \r\n","sub_path":"hardware/sensorreadtest.py","file_name":"sensorreadtest.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} {"seq_id":"336819250","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom .forms import ContatosForm\nfrom .models import Contatos\n\ndef contatos(request):\n lista = Contatos.objects.order_by('nome')\n return render(request, 'contatos.html', {'lista' : lista})\n\ndef menu(request):\n return render(request, 'menu.html')\n\n\ndef criar(request):\n if request.method == 'POST':\n form = ContatosForm(request.POST)\n if form.is_valid():\n\n agenda = form.save(commit = False)\n\n agenda.save()\n return redirect('recente')\n else:\n form = ContatosForm()\n\n return render(request, 'criar.html', {'form' : form})\n\ndef detalhe(request, pk):\n agenda = get_object_or_404(Contatos, pk=pk)\n return render(request, 'detalhes.html', {'agenda' : agenda})\n\n\ndef editar(request, pk):\n contato = get_object_or_404(Contatos, pk=pk)\n if request.method == 'POST':\n form = ContatosForm(request.POST, instance=contato)\n if form.is_valid():\n contato.save()\n return redirect('detalhe', pk=contato.pk)\n else:\n form = ContatosForm(instance=contato)\n return render(request, 'editar.html', {'form' : form})\n\ndef deletar(request, pk):\n contato = get_object_or_404(Contatos, pk=pk)\n contato.delete()\n return redirect('contatos')\n\ndef recente(request):\n recente = Contatos.objects.latest('id')\n return render(request, 'recente.html', {'recente' : recente})","sub_path":"contatos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}