repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
zongtong009/pythonDesktop
| 19,086,834,708,238
|
591a6ee7973f84bbd330563d33e508a31c47e292
|
32fbb5f1c9ebcd8f5a0957b8d5df1d9664ef7ca5
|
/iMoocpython/991玫瑰花.py
|
641be3bc4407595b76df591fb6a4a4faad62ff2a
|
[] |
no_license
|
https://github.com/zongtong009/pythonDesktop
|
c86ad5d1bae3db3ffde2dc3c612e5df69d456f01
|
34dab20877f297d5e366737de1389a6fcca17b8f
|
refs/heads/master
| 2021-06-18T23:46:00.489289
| 2021-01-28T03:17:32
| 2021-01-28T03:17:32
| 150,281,696
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# RoseDraw.py
from turtle import *
# 定义一个曲线绘制函数
def DegreeCurve(n, r, d=1):
for i in range(n):
left(d)
circle(r, abs(d))
# 初始位置设定
s = 0.2 # size
setup(450*5*s, 750*5*s)
pencolor("black")
fillcolor("red")
speed(100)
penup()
goto(0, 900*s)
pendown()
# 绘制花朵形状
begin_fill()
circle(200*s, 30)
DegreeCurve(60, 50*s)
circle(200*s, 30)
DegreeCurve(4, 100*s)
circle(200*s, 50)
DegreeCurve(50, 50*s)
circle(350*s, 65)
DegreeCurve(40, 70*s)
circle(150*s, 50)
DegreeCurve(20, 50*s, -1)
circle(400*s, 60)
DegreeCurve(18, 50*s)
fd(250*s)
right(150)
circle(-500*s, 12)
left(140)
circle(550*s, 110)
left(27)
circle(650*s, 100)
left(130)
circle(-300*s, 20)
right(123)
circle(220*s, 57)
end_fill()
# 绘制花枝形状
left(120)
fd(280*s)
left(115)
circle(300*s, 33)
left(180)
circle(-300*s, 33)
DegreeCurve(70, 225*s, -1)
circle(350*s, 104)
left(90)
circle(200*s, 105)
circle(-500*s, 63)
penup()
goto(170*s, -30*s)
pendown()
left(160)
DegreeCurve(20, 2500*s)
DegreeCurve(220, 250*s, -1)
# 绘制一个绿色叶子
fillcolor('green')
penup()
goto(670*s, -180*s)
pendown()
right(140)
begin_fill()
circle(300*s, 120)
left(60)
circle(300*s, 120)
end_fill()
penup()
goto(180*s, -550*s)
pendown()
right(85)
circle(600*s, 40)
# 绘制另一个绿色叶子
penup()
goto(-150*s, -1000*s)
pendown()
begin_fill()
rt(120)
circle(300*s, 115)
left(75)
circle(300*s, 100)
end_fill()
penup()
goto(430*s, -1070*s)
pendown()
right(30)
circle(-600*s, 35)
done()
|
UTF-8
|
Python
| false
| false
| 1,496
|
py
| 77
|
991玫瑰花.py
| 75
| 0.657895
| 0.473684
| 0
| 97
| 13.494845
| 27
|
seantibor/pyencourage
| 13,228,499,296,093
|
65fff3365710fa1695097f6a3bfbed329bc3642e
|
f6460c437bdc5e274e793e72cb4d75c446da888d
|
/tests/test_encouragement_length.py
|
97568addc6c516db301c22b4deaf8a83f4185de4
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/seantibor/pyencourage
|
03315df441807f9ef6382c49f72aef49bf0fdc68
|
dff354c12fd753abd2f5ece7ace10910c1a9ea6b
|
refs/heads/master
| 2023-04-15T04:12:21.248640
| 2021-04-29T13:00:27
| 2021-04-29T13:00:27
| 360,917,871
| 3
| 0
|
NOASSERTION
| true
| 2021-04-29T12:59:29
| 2021-04-23T14:51:46
| 2021-04-26T17:29:27
| 2021-04-29T12:59:29
| 715
| 2
| 0
| 1
|
Python
| false
| false
|
from pyencourage.encourage_en import encourage_en
def _test_encouragement_length(encouragement):
assert len(encouragement) <= 140
def _test_encouragement_group(encouragements):
for encouragement in encouragements:
_test_encouragement_length(encouragement)
def test_encouragements_lengths():
encouragements_sets = [encourage_en, ]
for encouragements in encouragements_sets:
_test_encouragement_group(encouragements['all'])
|
UTF-8
|
Python
| false
| false
| 460
|
py
| 13
|
test_encouragement_length.py
| 8
| 0.754348
| 0.747826
| 0
| 16
| 27.75
| 56
|
KumarAmbuj/dynamic-programing-intermediate
| 13,142,599,947,384
|
ccca4c3dc88afd84c7326b22958ac90e854e9de6
|
d2fedd2085cbdbd5e54228abf0633001989787cc
|
/93.MAXIMUM SUM USING PREFIX ARRAY.py
|
ac10d6fa9ed3b8d48b495af946c54b7731fb4010
|
[] |
no_license
|
https://github.com/KumarAmbuj/dynamic-programing-intermediate
|
228d25205d370ebc329eaf6ffbcfbc2853b18abe
|
4b40f322f57762e0cf264fb2024ae56d1fa3243b
|
refs/heads/main
| 2023-02-23T20:42:26.865855
| 2021-01-23T11:10:44
| 2021-01-23T11:10:44
| 332,188,114
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
def findmaxsum(arr):
prefixsum=[]
sum=0
for i in range(len(arr)):
sum+=arr[i]
prefixsum.append(sum)
res=-99
minprefix=0
for i in range(len(arr)):
res=max(res,prefixsum[i]-minprefix)
minprefix=min(minprefix,prefixsum[i])
print(res)
arr=[-2, -3, 4, -1, -2, 1, 5, -3]
findmaxsum(arr)
arr=[4, -8, 9, -4, 1, -8, -1, 6]
findmaxsum(arr)
|
UTF-8
|
Python
| false
| false
| 412
|
py
| 74
|
93.MAXIMUM SUM USING PREFIX ARRAY.py
| 73
| 0.529126
| 0.480583
| 0
| 20
| 18.7
| 45
|
nguansak/gate-io
| 15,564,961,526,747
|
5c8cd8ec154c7caf3b328d74fbd1c0222a53b321
|
59878dc062c192a8bab1f6fd31afde32be70c953
|
/lib.py
|
cc795f33e3b03104bc379eb1afebef762f78f9f4
|
[] |
no_license
|
https://github.com/nguansak/gate-io
|
86014e0b0a3343371982ff2a28ee304b4ebd02e1
|
ab625e299f2b256c9decb840190c25984fe836c5
|
refs/heads/master
| 2023-04-18T22:30:05.197247
| 2021-05-09T13:46:40
| 2021-05-09T13:46:40
| 278,721,192
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import time
import os
import requests
import json
from counter import *
baseUrl = "http://192.168.1.100:5000"
# baseUrl = "http://127.0.0.1:5000"
counter = Counter(0.5)
def fileOpen(gate):
global f
f = open("raw_" + gate + ".csv", "a")
def logRaw(gate, no, sensor, epoch, action):
data = "{:.6f}".format(epoch)+","+gate+","+"{:d}".format(no)+","+sensor+","+action
print("raw:"+data)
f.write(data+"\n")
f.flush()
os.fsync(f)
def sendRawData(gate, no, sensor, epoch, action):
url = baseUrl + "/gate/" + gate + "/raw"
data = "{:.6f}".format(epoch)+","+gate+","+"{:d}".format(no)+","+sensor+","+action
response = requests.post(url, data=data)
return response.ok
def sendJsonData(gate, no, sensor, epoch, action):
url = baseUrl + "/gate/" + gate + "/json"
data = { "gate": gate, "no": no, "sensor": sensor, "epoch": epoch, "action": action }
try:
response = requests.post(url, json={"data":[data]})
return response.ok
except:
print ("Cannot send data to server")
def pressed(gate, no, sensor):
print(gate, no, sensor)
def sensorPressed(*arg):
now = time.time()
logRaw(gate, no, sensor, now, "pressed")
#sendJsonData(gate, no, sensor, now, "pressed")
counter.handleRealtime(gate, no, sensor, "pressed")
return sensorPressed
def released(gate, no, sensor):
print(gate, no, sensor)
def sensorReleased(*arg):
now = time.time()
logRaw(gate, no, sensor, now, "released")
#sendJsonData(gate, no, sensor, now, "released")
counter.handleRealtime(gate, no, sensor, "released")
return sensorReleased
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def fileClose():
f.close()
|
UTF-8
|
Python
| false
| false
| 1,831
|
py
| 130
|
lib.py
| 21
| 0.599126
| 0.583288
| 0
| 63
| 28.079365
| 89
|
jinpoon/11611_project
| 1,786,706,417,972
|
550a6ca5e875dd0c6e7521e5493273eed8df7e7b
|
df4dbc5f0637b07cb69ada8db523b87a266445de
|
/When_QG.py
|
4df6bae11f8f7c0a3121245a11f670341b1c8938
|
[] |
no_license
|
https://github.com/jinpoon/11611_project
|
b34d0b0fee569beb90fa51459bd639205dfc47b2
|
9e12e14db227c1778950dffe567b17bd2b27db5b
|
refs/heads/master
| 2018-12-13T16:23:24.476601
| 2018-04-26T22:00:25
| 2018-04-26T22:00:25
| 118,981,350
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
import os
import requests
from nltk.parse import stanford
from nltk.tokenize import word_tokenize
from collections import defaultdict
import json
from nltk.tree import *
import string
import nltk
import sys
from testscript import *
from coreNLP_wrapper import StanfordNLP
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
import string
from nltk.corpus import wordnet as wn
from nltk.stem import *
from nltk.stem.wordnet import WordNetLemmatizer
from What_Who_QG import getDecapitalized
def getNerSet(phrase):
sNLP = StanfordNLP()
return {t[1] for t in sNLP.ner(phrase)}
def findMainVerb(dep_tree):
for i in dep_tree:
if i[1] in ['nsubj', 'csubj', 'nsubjpass']:
return(i[0][0], i[0][1])
return (None,None)
def findAuxVerb(dep_tree, verb):
for i in dep_tree:
if i[0][0] == verb and i[1] in ["auxpass", "aux"]:
return (i[2][0], i[2][0] + " " + i[0][0])
return (None, None)
def construct_when(ques, dep_tree):
mVerb, tense = findMainVerb(dep_tree)
# print(mVerb, tense)
if mVerb is None or "VB" not in tense:
return None
auxVerb, auxMStr = findAuxVerb(dep_tree, mVerb)
# print(auxVerb, auxMStr)
if auxVerb is not None and auxMStr is not None:
que = ques.replace(auxMStr, mVerb)
# print(que)
que = ("When "+ auxVerb + " " + getDecapitalized(que))
else:
tenseVerb = ""
stemVerb = mVerb
if tense in ['VBD', 'VBN']:
tenseVerb = "did"
elif tense in ['VBZ']:
tenseVerb = "does"
stemVerb = WordNetLemmatizer().lemmatize(mVerb,'v')
que = ques.replace(mVerb, stemVerb)
que = ("When " + tenseVerb +" "+ getDecapitalized(que))
# print("que", que)
que_tokens = word_tokenize(que)
if que_tokens[-1] == "." or que_tokens[-1] == ",":
que_tokens[-1] = "?"
else:
que_tokens.append("?")
que = " ".join(que_tokens)
return que
def when_parseTraversal(sent, parent, question, structures):
thisSent = sent
for node in parent:
if type(node) is ParentedTree:
if node.label() == 'ROOT':
pass
else:
if (node.label() == "PP" or node.label() == "NP-TMP") and node.left_sibling() is None:
thisPP = " ".join(node.leaves())
nerSet = getNerSet(thisPP)
if "DATE" in nerSet or "TIME" in nerSet:
# thisPP = thisPP + ", "
thisPP = thisPP.replace(" ,", ",")
# print(thisPP)
thisSentence = thisSent.replace(thisPP, "").replace(", ","")
question.append(thisSentence)
break
if node.label() == "PP":
this = " ".join(node.leaves())
this = this.replace(" ,", ",")
nerSet = getNerSet(this)
if "DATE" in nerSet or "TIME" in nerSet:
structures.append((this, len(this)))
if node.parent() is not None: ### recursive to go in depth of the tree
when_parseTraversal(sent, node, question, structures)
def When_module(sent, sent_features):
question = []
structures = []
sNLP = StanfordNLP()
# print(sent_features)
# dep_parse = sNLP.dependency_parse(sent)
# dep_parse = dep_parse.__next__()
#
# dep_parse_list = list(dep_parse.triples())
parse = sNLP.parse(sent)
# parse.pretty_print()
# for t in dep_parse_list:
# print(t)
# print(sNLP.ner(sent))
# print(sNLP.pos(sent))
when_parseTraversal(sent, parse, question, structures)
# print(question)
# print(structures)
prev_min = float('Inf')
if len(structures) > 0:
whenPhrase = ""
for t in structures:
if t[1] < prev_min:
whenPhrase = t[0]
prev_min = t[1]
# print(sent)
# print(whenPhrase)
thisQ = sent.replace(whenPhrase, "")
dep_tree = sNLP.dependency_parse(thisQ)
dep_tree = dep_tree.__next__()
dep_tree_list = list(dep_tree.triples())
# for t in dep_tree_list:
# print(t)
return construct_when(thisQ, dep_tree_list)
for q in question:
dep_tree = sNLP.dependency_parse(q)
dep_tree = dep_tree.__next__()
dep_tree_list = list(dep_tree.triples())
# for t in dep_tree_list:
# print(t)
return construct_when(q,dep_tree_list)
# print()
pass
|
UTF-8
|
Python
| false
| false
| 4,643
|
py
| 15
|
When_QG.py
| 14
| 0.552875
| 0.548137
| 0
| 154
| 29.149351
| 102
|
a-ruban/songs_metadata_processor
| 4,561,255,282,369
|
127282b2cf1e6548a2b0bf1cea25939d650fb899
|
6d2f49c36ef57dc120223991c80c1f289e8d3451
|
/music_metadata/migrations/0001_initial.py
|
e4fb1765d783d739630e87f217ce2e03889e2789
|
[] |
no_license
|
https://github.com/a-ruban/songs_metadata_processor
|
37638f8f3e612a83c7e2254c830be4a490c5f2c7
|
b4ec4b2461032996401d4e75d10a61d225fc1b0f
|
refs/heads/master
| 2022-12-27T23:06:13.840151
| 2020-10-08T17:23:18
| 2020-10-08T17:23:18
| 302,413,666
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 3.1.2 on 2020-10-03 12:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contributor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=300, null=True)),
('iswc', models.CharField(blank=True, max_length=13, null=True)),
('contributors', models.ManyToManyField(related_name='contributed_songs', to='music_metadata.Contributor')),
],
),
]
|
UTF-8
|
Python
| false
| false
| 1,024
|
py
| 12
|
0001_initial.py
| 10
| 0.573242
| 0.550781
| 0
| 30
| 33.133333
| 124
|
noaimabari/Twitter-Sentiment-Analysis
| 11,072,425,706,875
|
78d7c0c48b44cf9c34a3205d0b9379174c6e4756
|
79fbd2fc3871396dc02bf217ad30a2eadecbeea3
|
/code.py
|
e1a3eddb1db55d462a1adfa505ba61ff393b9633
|
[] |
no_license
|
https://github.com/noaimabari/Twitter-Sentiment-Analysis
|
7d0bb22c066490e0c87998fed54b52d99434a2d6
|
2307022ccb922fdc8153387481c6d698a0d5811c
|
refs/heads/master
| 2021-03-29T08:34:07.273881
| 2020-04-10T12:59:27
| 2020-04-10T12:59:27
| 247,937,487
| 2
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
import string
## getting training data
df = pd.read_csv("0000000000002747_training_twitter_x_y_train.csv")
y = list(df["airline_sentiment"])
text = list(df["text"])
## training documents
documents = []
for i in range(len(y)):
documents.append((text[i].split(" "),y[i]))
documents[0:5] ## in the form of tuple with 1st element as a list of words and second as the sentiment, ie positive, negative or neutral
## processing the words:
## making a list of stopwords and punctuations to be removed from the tweets
stop = stopwords.words("english")
punctuations = list(string.punctuation)
stop = stop + punctuations ## to remove punctuations
## using nltk library
from nltk.corpus import wordnet
## fucntion to get pos_tag of a word
def get_simple_pos(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
from nltk import pos_tag
from nltk.stem import WordNetLemmatizer
lt = WordNetLemmatizer()
## fucntion to clean the words in the text
## removing stop words and punctuations
## performing lemmatization to get better insight on the meaning of the words
def clean_review(words):
output_words = []
for w in words:
if w.lower() not in stop:
try:
pos = pos_tag([w])
clean_word = lt.lemmatize(w,pos = get_simple_pos(pos[0][1]))
output_words.append(clean_word.lower())
except:
continue
return output_words
documents = [(clean_review(i),category) for i,category in documents]
y_train = [category for words,category in documents]
x_train = [" ".join(words) for words, categories in documents]
## using tfidf Vectorizer to remove words which do not hold much importance during classification
from sklearn.feature_extraction.text import TfidfVectorizer
token_vec = TfidfVectorizer(max_features = 3000, ngram_range = (1,3))
x_train_t = token_vec.fit_transform(x_train) ## we can use the sparse matrix directyly as training and testing data in our sklearn classifiers
## forming x_test
df = pd.read_csv("0000000000002747_test_twitter_x_test.csv")
text = list(df["text"])
x_test = []
for i in range(len(text)):
x_test.append(text[i])
x_test
x_test_t = token_vec.transform(x_test) ## we can use the sparse matrix directyly as training and testing data in our sklearn classifiers
## both training and testing data now ready
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train_t, y_train)
## classification using MultiNomial Naive Bayes
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
clf.fit(x_train, y_train)
print(clf.score(x_val, y_val)) ## checking the score on the validation data
## saving the predictions in a csv file
y_pred = clf.predict(x_test_t)
predictions = np.array(y_pred)
pd.DataFrame(predictions).to_csv("finall.csv") ## saving file
|
UTF-8
|
Python
| false
| false
| 3,282
|
py
| 1
|
code.py
| 1
| 0.710542
| 0.697441
| 0
| 106
| 29.95283
| 142
|
lishiyucn/MaliciousUrls
| 11,587,821,789,129
|
38fe8ab7b70bfe878206ed49396f0391b332b40b
|
86c5d5f955d62143976c448e6da8ca9e91e5c52a
|
/model.py
|
7817b9fd75cfd647e39df40d79210e505feb2fd9
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/lishiyucn/MaliciousUrls
|
7854ab2ab3725ab9e1dd901ac9195aab1610ceb8
|
c95a1956ddc4ca88cba769bddd508e287cab3202
|
refs/heads/master
| 2023-03-18T11:15:33.014415
| 2020-08-20T07:58:01
| 2020-08-20T07:58:01
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding:utf-8
import os
import time
import urllib
import pickle
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.cluster import KMeans
from sklearn.linear_model import LogisticRegression
from scipy.sparse import csr_matrix, lil_matrix, coo_matrix
# 定义样本文件位置
good_dir = 'data/good'
bad_dir = 'data/bad'
# kmeans聚合的维度
k = 80
# ngram系数
n = 2
# 是否使用kmeans
use_k = True
# 新定义输出方法,便于调试
def printT(word):
a = time.strftime('%Y-%m-%d %H:%M:%S: ', time.localtime(time.time()))
print(a + str(word))
# 读取数据
def getdata(filepath):
with open(filepath, 'r') as f:
data = [i.strip('\n') for i in f.readlines()[:]]
return data
# 遍历文件夹中文件以读取数据
def load_files(dir):
data = []
g = os.walk(dir)
for path, dirs, files in g:
for filname in files:
fulpath = os.path.join(path, filname)
printT("load file: " + fulpath)
t = getdata(fulpath)
data.extend(t)
return data
# 训练模型基类
class Baseframe(object):
def __init__(self):
pass
# 训练
def Train(self):
# 读取数据
printT("Loading Good Data:")
good_query_list = load_files(good_dir)
printT("Loading Bad Data:")
bad_query_list = load_files(bad_dir)
# 整合数据
data = [good_query_list, bad_query_list]
printT("Done, Good Numbers:" + str(len(data[0])) + " Bad Numbers:" + str(len(data[1])))
# 打标记
good_y = [0 for i in range(len(data[0]))]
bad_y = [1 for i in range(len(data[1]))]
y = good_y + bad_y
# 数据向量化预处理
# 定义矢量化实例
self.vectorizer = TfidfVectorizer(tokenizer=self.get_ngrams)
# 把不规律的文本字符串列表转换成规律的([i,j],weight)的矩阵X[url条数,分词总类的总数,理论上少于256^n]
# i表示第几条url,j对应于term编号(或者说是词片编号)
X = self.vectorizer.fit_transform(data[0] + data[1])
printT("Data Dimentions: " + str(X.shape))
# 通过kmeans降维
if use_k:
X = self.transform(self.kmeans(X))
printT("Kmeans Succeed")
printT("Devide Training Data")
# 使用train_test_split分割X,y列表(testsize表示测试占的比例)(random为种子)
# X_train矩阵的数目对应 y_train列表的数目(一一对应) -->> 用来训练模型
# X_test矩阵的数目对应(一一对应) -->> 用来测试模型的准确性
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
printT('Devide Succeed')
printT('Begin Training:')
printT(self.classifier)
self.classifier.fit(X_train, y_train)
# 使用测试值对模型的准确度进行计算
printT(self.getname() + 'Model Accuracy:{}'.format(self.classifier.score(X_test, y_test)))
# 保存训练结果
with open('model/' + self.getname() + '.pickle', 'wb') as output:
pickle.dump(self, output)
# 数据预处理裁剪字符格式
def get_ngrams(self, query):
tempQuery = str(query)
ngrams = []
for i in range(0, len(tempQuery)-n):
ngrams.append(tempQuery[i:i+n])
return ngrams
def kmeans(self, weight):
printT('Matrix before kmeans: ' + str(weight.shape))
weight = weight.tolil().transpose()
# 同一组数据 同一个k值的聚类结果是一样的。保存结果避免重复运算
try:
with open('model/k' + str(k) + '.label', 'r') as input:
printT('loading kmeans success')
a = input.read().split(' ')
self.label = [int(i) for i in a[:-1]]
except FileNotFoundError:
printT('Start Kmeans: ')
clf = KMeans(n_clusters=k, precompute_distances=False)
s = clf.fit(weight)
printT(s)
# 保存聚类的结果
self.label = clf.labels_
with open('model/k' + str(k) + '.label', 'w') as output:
for i in self.label:
output.write(str(i) + ' ')
printT('kmeans succeed,total: ' + str(k) + ' classes')
return weight
# 转换成聚类后结果输入转置后的矩阵返回转置好的矩阵
def transform(self, weight):
a = set()
# 用coo存可以存储重复位置的元素
row = []
col = []
data = []
# i代表旧矩阵行号label[i]代表新矩阵的行号
for i in range(len(self.label)):
if self.label[i] in a:
continue
a.add(self.label[i])
for j in range(i, len(self.label)):
if self.label[j] == self.label[i]:
temp = weight[j].rows[0]
col += temp
temp = [self.label[i] for t in range(len(temp))]
row += temp
data += weight[j].data[0]
newWeight = coo_matrix((data, (row, col)), shape=(k,weight.shape[1]))
return newWeight.transpose()
# 对新的请求列表进行预测
def predict(self, new_queries):
try:
with open('model/' + self.getname() + '.pickle', 'rb') as input:
self = pickle.load(input)
printT('loading ' + self.getname() + ' model success')
except FileNotFoundError:
printT('start to train the ' + self.getname() + ' model')
self.Train()
printT('start predict:')
# 解码
new_queries = [urllib.parse.unquote(url) for url in new_queries]
X_predict = self.vectorizer.transform(new_queries)
if use_k:
printT('Transform Data')
X_predict = self.transform(X_predict.tolil().transpose())
printT('Transform Succeed, Start Predicting:')
res = self.classifier.predict(X_predict)
printT('Predict Succeed, Total:' + str(len(res)))
result = {}
result[0] = []
result[1] = []
# 两个列表并入一个元组列表
for q, r in zip(new_queries, res):
result[r].append(q)
printT('good query: ' + str(len(result[0])))
printT('bad query: ' + str(len(result[1])))
return result
class SVM(Baseframe):
def getname(self):
if use_k:
return 'SVM__n'+str(n)+'_k'+str(k)
return 'SVM_n'+str(n)
def __init__(self):
# 定理逻辑回归方法模型
self.classifier = svm.SVC()
class LG(Baseframe):
def getname(self):
if use_k:
return 'LG__n'+str(n)+'_k'+str(k)
return 'LG_n'+str(n)
def __init__(self):
# 定理逻辑回归方法模型
self.classifier = LogisticRegression()
|
UTF-8
|
Python
| false
| false
| 7,065
|
py
| 5
|
model.py
| 3
| 0.549358
| 0.544922
| 0
| 224
| 27.174107
| 98
|
rexnie/bbtrack
| 16,037,407,931,096
|
e444e1c12e1cf1d3fc285557ccdcbedbe84da5e0
|
395f0ca9eeffd2e4a26bcc6078dcc966e6499728
|
/nsdevman/nsdevman/nstask/attrs_groupdevelop.py
|
82a8cb94a1b7f2e0bfa7397ad09538b20c7f6067
|
[] |
no_license
|
https://github.com/rexnie/bbtrack
|
b8f8ce4c73819df1123033d0531c344beda2d452
|
0b09dfc18f19c66b7eb7517f43bf12dd160a2f20
|
refs/heads/master
| 2016-07-27T14:44:04.119297
| 2014-04-13T07:18:15
| 2014-04-13T07:18:15
| 33,939,190
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
from nsdevman.nsroot import _
from nsdevman.nstask.attrs_basic import *
class NSTaskAttrDevelopProject(NSTaskRelatedProject):
dbGroup = DBTaskAttrDevelopGroup
dbGroupColumn = 'project'
label = _('RelatedProject')
def __init__(self, objTask, descTaskAttr):
NSTaskAttrDevelopProject.__base__.__init__(self, objTask, descTaskAttr)
class NSTaskAttrDevelopSource(NSTaskAttrStatus):
dbGroup = DBTaskAttrDevelopGroup
dbGroupColumn = 'source'
label = _('DevelopSource')
def __init__(self, objTask, descTaskAttr):
NSTaskAttrDevelopSource.__base__.__init__(self, objTask, descTaskAttr)
class NSTaskAttrDevelopMaker(NSTaskAttrOwner):
dbGroup = DBTaskAttrDevelopGroup
dbGroupColumn = 'maker'
label = _('DevelopMaker')
def __init__(self, objTask, descTaskAttr):
NSTaskAttrDevelopMaker.__base__.__init__(self, objTask, descTaskAttr)
class NSTaskAttrDevelopCategory(NSTaskAttrStatus):
dbGroup = DBTaskAttrDevelopGroup
dbGroupColumn = 'category'
label = _('DevelopCategory')
def __init__(self, objTask, descTaskAttr):
NSTaskAttrDevelopCategory.__base__.__init__(self, objTask, descTaskAttr)
class NSTaskAttrDevelopContents(NSTaskAttrTitle):
dbGroup = DBTaskAttrDevelopGroup
dbGroupColumn = 'contents'
label = _('DevelopContents')
def __init__(self, objTask, descTaskAttr):
NSTaskAttrDevelopContents.__base__.__init__(self, objTask, descTaskAttr)
class NSTaskAttrDevelopPriority(NSTaskAttrStatus):
dbGroup = DBTaskAttrDevelopGroup
dbGroupColumn = 'priority'
label = _('DevelopPriority')
def __init__(self, objTask, descTaskAttr):
NSTaskAttrDevelopPriority.__base__.__init__(self, objTask, descTaskAttr)
class NSTaskAttrDevelopFeedBack(NSTaskAttrTitle):
dbGroup = DBTaskAttrDevelopGroup
dbGroupColumn = 'feedback'
label = _('DevelopFeedBack')
def __init__(self, objTask, descTaskAttr):
NSTaskAttrDevelopFeedBack.__base__.__init__(self, objTask, descTaskAttr)
class NSTaskAttrDevelopType(NSTaskAttrStatus):
dbGroup = DBTaskAttrDevelopGroup
dbGroupColumn = 'type'
label = _('DevelopType')
def __init__(self, objTask, descTaskAttr):
NSTaskAttrDevelopType.__base__.__init__(self, objTask, descTaskAttr)
class NSTaskAttrDevelopEffect(NSTaskAttrTitle):
dbGroup = DBTaskAttrDevelopGroup
dbGroupColumn = 'effect'
label = _('DevelopEffect')
def __init__(self, objTask, descTaskAttr):
NSTaskAttrDevelopEffect.__base__.__init__(self, objTask, descTaskAttr)
|
UTF-8
|
Python
| false
| false
| 2,648
|
py
| 448
|
attrs_groupdevelop.py
| 138
| 0.694864
| 0.694864
| 0
| 74
| 34.72973
| 80
|
alexarse/InstaBooth
| 6,511,170,453,226
|
44169d96b8bbfdcc419b6b214da08feaaaa94cd9
|
3967f0821f5be413b06e6bfcf5a30c406b5f4f2b
|
/myapp.py
|
a403ab840e3861d42354ff8c468321b7111ce9fd
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
https://github.com/alexarse/InstaBooth
|
b06d5fe43ba5095e0183c8a10955e3917c5af392
|
23577b5b864b734ea885ed6c278e063672f7bb7b
|
refs/heads/master
| 2016-08-07T19:21:36.663801
| 2014-08-11T16:24:41
| 2014-08-11T16:24:41
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from instagram.client import InstagramAPI
import urllib
# Get your key/secret from http://instagram.com/developer/
# 'client_id': 'e14ed00c7cf54078882e9e57de522c0a',
# 'client_secret': '291afdbb1dff488eafc24631aabfd726',
# 'redirect_uri': 'http://alexarse.wordpress.com/'
INSTAGRAM_CLIENT_ID = 'e14ed00c7cf54078882e9e57de522c0a'
INSTAGRAM_CLIENT_SECRET = '291afdbb1dff488eafc24631aabfd726'
# plop those babies in!
api = InstagramAPI(client_id=INSTAGRAM_CLIENT_ID,
client_secret=INSTAGRAM_CLIENT_SECRET)
# popular_media = api.media_popular(count=20)
# popular_media = api.media_popular('hmosheaga')
# tag = api.tag(tag_name='test')
# popular_media = api.tag_search(tag);
# popular_media = tag_recent_media(count=20, tag_name='hmosheaga')
tag_search, next_tag = api.tag_search(q="meinabearcostumewithmygrildfriend")
print tag_search
# tag_recent_media, next = api.tag_recent_media(tag_name=tag_search[0].name)
# i = 0
# for tag_media in tag_recent_media:
# img_name = 'tt/' + str(i)
# # get_standard_resolution_url
# urllib.urlretrieve(tag_media.get_low_resolution_url(), img_name + ".jpg")
# i = i + 1
# print tag_media.get_standard_resolution_url()
# photos.append('<img src="%s"/>' % tag_media.get_standard_resolution_url())
# print popular_media
# print(popular_media)
#extract urls of popular images to a list
# photolist = []
# for media in popular_media:
# photolist.append(media.images['standard_resolution'].url)
# print 'Top photos from Instagram'
# html = ''
# #show the original image thumbnail
# for p in photolist:
# html = html + '<img src=' + p + ' width="150" />'
# for k in photolist:
# print k + "n";
|
UTF-8
|
Python
| false
| false
| 1,680
|
py
| 3
|
myapp.py
| 2
| 0.701786
| 0.654167
| 0
| 58
| 27.948276
| 77
|
fffk3045167/desktop2
| 163,208,761,306
|
5ec31038b9274378a6692c63335dff095171d26a
|
3360762f3c870acd245ab2a37954ef75b05247a8
|
/base/PIL/im_tools.py
|
11cf40beb55db81be201e9b3a5c82bcc71d3e0df
|
[] |
no_license
|
https://github.com/fffk3045167/desktop2
|
66d335e42100a5942e3beb43bf68b7006b14dcca
|
5c7b5e67659cae56b5a0093c0c9819fc622a67dc
|
refs/heads/master
| 2023-01-19T03:22:15.422108
| 2020-11-17T13:01:27
| 2020-11-17T13:01:27
| 269,377,880
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from PIL import Image
from pylab import *
def get_im_list(file_path):
"""返回目录中所有JPG图像的文件名列表"""
return [os.path.join(file_path, f) for f in os.listdir(path) if f.endswith(".jpg")]
def im_resize(im, sz):
"""使用PIL 对象重新定义图像数组的大小"""
pil_im = Image.fromarray(uint8(im))
return array(pil_im.size(sz))
def hist_eq(im, nbr_bins=256):
"""对一副灰度图像进行直方图均衡化"""
# 计算图像的直方图
im_hist, bins = histogram(im.flatten(), nbr_bins, normed=True)
cdf = im_hist.cumsum() # 累积分布函数,cdf = cumulative distribution function
cdf = 255 * cdf / cdf[-1] # 归一化
# 使用累计分布函数的插值,计算新的像素值
im2 = interp(im.flatten(), bins[:-1], cdf)
return im2.reshape(im.shape), cdf
def compute_average(im_list):
""" 计算图像列表的平均图像"""
# 打开第一幅图像,将其存储在浮点型数组中
average_im = array(Image.open(im_list[0]), 'f')
for im_name in im_list[1:]:
try:
average_im += array(Image.open(im_name))
finally:
print(im_name + '...skipped')
average_im /= len(im_list)
# 返回uint8 类型的平均图像
return array(average_im, 'uint8')
|
UTF-8
|
Python
| false
| false
| 1,314
|
py
| 79
|
im_tools.py
| 61
| 0.610075
| 0.596082
| 0
| 47
| 21.829787
| 87
|
Photometrics/PyVCAM
| 3,874,060,517,942
|
f55ddc89c66994f9626d61e2e4607784455aa6bd
|
5f75ec4ce4caef22e49f2b6c6511534b7a8f3adb
|
/tests/single_image_polling.py
|
649eab76992ebdc214b192cc4d9d1923f0f4808f
|
[
"MIT"
] |
permissive
|
https://github.com/Photometrics/PyVCAM
|
abf1c0f2c5c504fb63ee5298e99f69db4fca2e9e
|
1df94cb0437c129dda1f0fe35c18f64620b18a97
|
refs/heads/master
| 2023-09-04T09:31:40.526986
| 2023-08-17T12:05:30
| 2023-08-17T12:05:30
| 141,481,092
| 30
| 17
|
MIT
| false
| 2023-03-29T18:37:01
| 2018-07-18T19:31:07
| 2023-03-09T03:39:19
| 2023-03-29T18:36:51
| 355
| 22
| 13
| 3
|
Python
| false
| false
|
from pyvcam import pvc
from pyvcam.camera import Camera
def main():
# Initialize PVCAM and find the first available camera.
pvc.init_pvcam()
cam = [cam for cam in Camera.detect_camera()][0]
cam.open()
cam.speed_table_index = 0
for i in range(5):
frame = cam.get_frame(exp_time=20)
print("First five pixels of frame: {}, {}, {}, {}, {}".format(*frame[:5]))
cam.close()
pvc.uninit_pvcam()
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false
| false
| 494
|
py
| 23
|
single_image_polling.py
| 19
| 0.57085
| 0.558704
| 0
| 18
| 25.444444
| 82
|
stephanpoetschner/django-skeleton-project
| 13,125,420,082,145
|
95bf6cfaf4cd9a993205cbe892b9fdbcdf24edab
|
be1a7fc07665e738031431b6632a83869061144d
|
/myproject/settings.py
|
43e9d3a8c7db7988537b54fc4243ebb1d0e811ba
|
[
"Unlicense"
] |
permissive
|
https://github.com/stephanpoetschner/django-skeleton-project
|
88e94abda9085d7c7ef3ff94c7294e4e2af4bcad
|
04146c8f629052768d8095cd27a5151789b5bc7e
|
refs/heads/master
| 2021-01-23T00:14:56.297475
| 2013-10-03T08:48:29
| 2013-10-03T08:48:29
| 865,808
| 1
| 2
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import posixpath
from path import path
import sys
from django.core.urlresolvers import reverse_lazy
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SERVE_MEDIA = False
ENABLE_ERROR_VIEWS = False
USE_X_FORWARDED_HOST = True
PROJECT_ROOT = path(__file__).abspath().realpath().dirname()
sys.path.insert(0, PROJECT_ROOT / "apps")
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'woj)6x+lm$yz(bxh746fz9z064=iecd(v)=qc7fxij$=0e2mdw'
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': PROJECT_ROOT / 'dev.db',
}
}
SITE_ID = 1
DEFAULT_HTTP_PROTOCOL = "http"
PAGINATION_DEFAULT_PAGINATION = 25
PAGINATION_DEFAULT_WINDOW = 2
PAGINATION_DEFAULT_ORPHANS = 0
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Vienna'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'de-AT'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
ugettext = lambda s: s # rather hackish but suggested by...
## ... http://docs.djangoproject.com/en/1.1/topics/i18n/deployment/#how-django-discovers-language-preference
## to prevent circular dependancies
LANGUAGES = (
#('en', ugettext('English')),
('de', ugettext('German')),
)
MEDIA_ROOT = PROJECT_ROOT / 'assets' / 'uploaded' / '' # ensure trailing slash
MEDIA_URL = '/assets/uploaded/'
STATIC_ROOT = PROJECT_ROOT / "assets" / "static" / '' # ensure trailing slash
STATIC_URL = '/assets/static/'
STATICFILES_DIRS = (
PROJECT_ROOT / "static",
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'pagination.middleware.PaginationMiddleware',
)
ROOT_URLCONF = 'urls'
LOGIN_URL = reverse_lazy('login')
LOGIN_REDIRECT_URL = reverse_lazy('home')
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = PROJECT_ROOT / '..' / 'debug-mails'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'auth.backends.EmailModelBackend',
)
AUTH_PROFILE_MODULE = 'profiles.UserProfile'
TEMPLATE_DIRS = (
PROJECT_ROOT / "templates",
)
CONTEXT_SETTINGS = (
"DEBUG",
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"core.context_processors.global_settings",
"core.context_processors.site_url",
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.markup',
'django.contrib.messages',
#external
'compressor',
'django_extensions',
'pagination',
'easy_thumbnails',
'uni_form',
'south',
#internal
'core',
'auth',
'profiles',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING_PATH = PROJECT_ROOT / '..' / 'logs' / 'application.log'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s | File "%(pathname)s", line %(lineno)d, in %(funcName)s | process:%(process)d, thread:%(thread)d'
}
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'file_debug': {
'level': 'WARN',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'verbose',
'filename': LOGGING_PATH,
},
},
'loggers': {
'': {
'handlers': [ 'file_debug' ],
'level': 'DEBUG',
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from localsettings import *
except ImportError:
pass
|
UTF-8
|
Python
| false
| false
| 6,676
|
py
| 32
|
settings.py
| 14
| 0.677202
| 0.671959
| 0
| 233
| 27.652361
| 169
|
MrThanasiz/NWS_AS
| 15,513,421,900,516
|
ef26a6126d79b63ba079c52d8ca2d10f38998dc4
|
5147d7ab790c07c60b34eeb35f72e7f7c4a5acfd
|
/ClientFolder/ResponseProcessorClient.py
|
a6db648da17324f0a958d5191315e1c92cf7e6c4
|
[] |
no_license
|
https://github.com/MrThanasiz/NWS_AS
|
04f0a4eaea1bc625d004d373b709b839689a1645
|
061489ca414a82d28630b78bc39d3a35c9e23450
|
refs/heads/master
| 2020-08-15T15:16:19.877681
| 2019-12-09T18:27:16
| 2019-12-09T18:27:16
| 215,362,313
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import time
class responceProcessor:
def __init__(self):
self.state = "keyExchange"
def messageRouter(self, message, module):
if self.state == "keyExchange":
module.securityClient.messageRouter(message, module)
else:
contents = module.securityClient.decryptData(message)
print(contents.decode())
|
UTF-8
|
Python
| false
| false
| 367
|
py
| 10
|
ResponseProcessorClient.py
| 9
| 0.634877
| 0.634877
| 0
| 13
| 27.230769
| 65
|
uselessrepo/21jan
| 2,817,498,576,254
|
ceb1942f6a613eb938e009ad18ad0dacb85936f5
|
736b20096bc03211cb58d90bab0768015c4dd0d5
|
/p28.py
|
b9a400c67bc84f26cc5b6a4a6cd28ea36d870a6a
|
[] |
no_license
|
https://github.com/uselessrepo/21jan
|
1268d96bacf457a77966e84399751825e9cbbded
|
f5aa7bd772101c9163d8d75bfdd853d5f0d581c1
|
refs/heads/master
| 2020-12-15T21:04:57.571590
| 2020-01-21T04:12:53
| 2020-01-21T04:12:53
| 235,253,804
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
base = int(input("Enter base : "))
height = int(input("Enter height : "))
area = 1/2 *( base * height )
print(f'area is {area}')
|
UTF-8
|
Python
| false
| false
| 131
|
py
| 17
|
p28.py
| 17
| 0.59542
| 0.580153
| 0
| 4
| 31.25
| 38
|
MTrajK/coding-problems
| 5,119,601,032,098
|
02282bf083059458e810d0b19ac673b278887743
|
0dc1e63f90bc01c5d9c175e978207e1aee69823d
|
/Other/basic_calculator.py
|
f9baf9261d07a454bd356dcaf701d5e5cad09da5
|
[
"MIT"
] |
permissive
|
https://github.com/MTrajK/coding-problems
|
ad9ef6a4c6dfd063c7cc5c980afe0f9d000c3995
|
b1f074f97196c1159fec17419e980e419bc42845
|
refs/heads/master
| 2023-08-16T03:14:08.267016
| 2023-04-21T21:30:13
| 2023-04-21T21:30:13
| 186,043,715
| 3,343
| 682
|
MIT
| false
| 2023-08-04T12:49:09
| 2019-05-10T19:29:20
| 2023-08-04T08:06:12
| 2023-08-04T12:49:08
| 203
| 3,098
| 607
| 1
|
Python
| false
| false
|
'''
Basic Calculator
Implement a basic calculator to evaluate a simple expression string.
The expression string may contain open '(' and closing parentheses ')',
the plus '+' or minus sign '-', non-negative integers and empty spaces ' '.
Input: '(1+(4+5+2)-3)+(6+8)'
Output: 23
Input: ' 2-1 + 2 '
Output: 3
=========================================
Start from the first character and respect the math rules. When brackets come, go inside the brackets
and compute the inner result, after that continue with adding or subtracting.
Time Complexity: O(N)
Space Complexity: O(K) , much less than N (the deepest level of brackets)
'''
############
# Solution #
############
def basic_calculator(s):
return calculate(s, 0)[0]
def calculate(s, i):
sign = 1 # 1 means '+' and -1 means '-'
res = 0
num = 0
while i < len(s) and s[i] != ')':
if s[i] >= '0' and s[i] <= '9':
# find the whole number
num = num * 10 + int(s[i])
elif s[i] == '(':
# calculate inside the brackets
brackets = calculate(s, i + 1)
res += brackets[0] * sign
i = brackets[1] # continue from the new i
elif s[i] != ' ':
# add the previous number using the old sign
res += num * sign
num = 0
if s[i] == '-':
sign = -1
elif s[i] == '+':
sign = 1
i += 1
res += num * sign
return (res, i)
###########
# Testing #
###########
# Test 1
# Correct result => 23
print(basic_calculator('(1+(4+5+2)-3)+(6+8)'))
# Test 2
# Correct result => 3
print(basic_calculator(' 2-1 + 2 '))
|
UTF-8
|
Python
| false
| false
| 1,686
|
py
| 121
|
basic_calculator.py
| 119
| 0.513049
| 0.485765
| 0
| 69
| 23.449275
| 101
|
ctdurazo/BDex
| 3,350,074,510,881
|
f82d77eb654153998c957788dae9ae9a7a7ece59
|
d574c2f75e7f0aa98ffb1d07f54de4fcb2795ec9
|
/DAO/oauthDAO.py
|
7831104f5ff197a32e3b755740bdfa129d213a78
|
[] |
no_license
|
https://github.com/ctdurazo/BDex
|
d2c04c8b1d5195d63b93a0be7bc9c712d9ed8b80
|
2ae842f6ca97fee26f428a94e86b8cb6a22f3f29
|
refs/heads/master
| 2020-02-29T17:33:58.967474
| 2017-11-28T19:00:47
| 2017-11-28T19:00:47
| 41,808,405
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import sqlite3 as lite
# this will need to be changed to use google auth
def addOauth(_database, _userid, _googleaccesstoken, _hipchataccesstoke, _jiraaccesstoken, _twitterusername):
con = None
try:
# connect to sqlite3
con = lite.connect(_database)
with con:
cur = con.cursor()
cur.execute("SELECT * FROM oauth2store WHERE userid=?", (_userid,))
data = cur.fetchall()
if len(data) > 0:
return data[0][0]
else:
cur.execute("INSERT INTO oauth2store (userid, googleaccesstoken, hipchataccesstoken, jiraaccesstoken, "
"twitterusername) VALUES(?, ?, ?, ?, ?)",
(_userid, _googleaccesstoken, _hipchataccesstoke, _jiraaccesstoken, _twitterusername))
con.commit()
with con:
cur = con.cursor()
cur.execute("SELECT * FROM oauth2store WHERE userid=?", (_userid,))
data = cur.fetchall()
if len(data) > 0:
return data[0][0]
else:
return -1
except lite.Error:
return -1
finally:
if con:
con.close()
# get information
def getOauth(_database, _userID):
con = None
try:
# connect to sqlite3
con = lite.connect(_database)
with con:
con.rollback()
cur = con.cursor()
cur.execute("SELECT * FROM oauth2store WHERE userID = ?", (_userID,))
con.commit()
return cur.fetchall()
except lite.Error, e:
return str(e)
finally:
if con:
con.close()
def getHipChat(_database, _userID):
con = None
try:
# connect to sqlite3
con = lite.connect(_database)
with con:
con.rollback()
cur = con.cursor()
cur.execute("SELECT hipchataccesstoken FROM oauth2store WHERE userID = ?", (_userID,))
con.commit()
return cur.fetchall()
except lite.Error, e:
return str(e)
finally:
if con:
con.close()
def updateHipChat(_database, _userID, _hipchaturl):
con = None
try:
# connect to sqlite3
con = lite.connect(_database)
with con:
cur = con.cursor()
cur.execute("SELECT * FROM oauth2store WHERE userID=?", (_userID,))
data = cur.fetchall()
if len(data) > 0:
cur.execute("UPDATE oauth2store SET hipchataccesstoken = ? WHERE userID = ?", (_hipchaturl, _userID))
else:
cur.execute("INSERT INTO oauth2store (userID, hipchataccesstoken) VALUES(?, ?)", (_userID, _hipchaturl))
con.commit()
return 'Update Successful'
except lite.Error, e:
return str(e)
finally:
if con:
con.close()
def getTwitter(_database, _userID):
con = None
try:
# connect to sqlite3
con = lite.connect(_database)
with con:
cur = con.cursor()
cur.execute("SELECT twitterusername FROM oauth2store WHERE userID = ?", (_userID,))
con.commit()
return cur.fetchall()
except lite.Error, e:
return str(e)
finally:
if con:
con.close()
# update team information
def updateTwitter(_database, _twitterusername, _userID):
con = None
try:
# connect to sqlite3
con = lite.connect(_database)
with con:
cur = con.cursor()
cur.execute("SELECT * FROM oauth2store WHERE userID=?", (_userID,))
data = cur.fetchall()
if len(data) > 0:
cur.execute("UPDATE oauth2store SET twitterusername = ? WHERE userID = ?", (_twitterusername, _userID,))
else:
cur.execute("INSERT INTO oauth2store (userID, twitterusername) VALUES(?, ?)", (_userID, _twitterusername))
con.commit()
return 'Update Successful'
except lite.Error, e:
return str(e)
finally:
if con:
con.close()
|
UTF-8
|
Python
| false
| false
| 4,091
|
py
| 57
|
oauthDAO.py
| 27
| 0.540455
| 0.533366
| 0
| 146
| 27.020548
| 118
|
leodrew/leoaclab
| 5,755,256,214,913
|
97e89ea450a974f531a84d5c8a45d14ec35c4d29
|
a783897a3fe80583efae3b663ee3937239f340d9
|
/acapp/views.py
|
e653a0dd690871547c928d7895b9c5dca268d9e7
|
[] |
no_license
|
https://github.com/leodrew/leoaclab
|
fb4e25722e1878e6173a1b842e7f60071505effd
|
0094a99205ca0659c0c5faa30dab5abd29c97393
|
refs/heads/main
| 2023-03-12T10:32:31.692659
| 2021-02-25T14:29:44
| 2021-02-25T14:29:44
| 342,270,906
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from acapp.form import UserForm, Memberform, PostblogForm
# Create your views here.
from django.contrib.auth import authenticate,login,logout
from django.http import HttpResponseRedirect,HttpResponse
from django.urls import reverse
from django.contrib.auth.decorators import login_required
def index(request):
return render(request,'acapp/index.html')
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
member_form = Memberform(data=request.POST)
if user_form.is_valid() and member_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
member = member_form.save(commit=False)
member.user = user
member.save()
registered = True
else:
print(user_form.errors)
else:
user_form = UserForm()
member_form = Memberform()
return render(request, 'acapp/register.html',
{'user_form': user_form,
'member_form': member_form,
'registered': registered})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate (username=username,password = password)
if user :
if user.is_active:
login(request,user)
return HttpResponseRedirect(reverse('index'))
else:
return HttpResponse('你沒有註冊')
else:
print('有人想盜帳號')
print("帳號 {} and 密碼 {}".format(username,password))
return HttpResponse('錯誤的帳號密碼')
else:
return render(request,'acapp/login.html',{})
def postblog(request):
if request.method == 'POST':
postblog_form = PostblogForm(data=request.POST)
if postblog_form.is_valid():
postblog = postblog_form.save()
postblog.save()
return HttpResponseRedirect(reverse('index'))
else:
postblog_form = PostblogForm()
return render(request,'acapp/postblog.html',{'postblogform':postblog_form})
|
UTF-8
|
Python
| false
| false
| 2,444
|
py
| 11
|
views.py
| 7
| 0.607083
| 0.607083
| 0
| 77
| 30.181818
| 79
|
nordcap/learn-python
| 15,874,199,126,523
|
213723f80db962d1a62fb2905604e61e167fa77f
|
ab8677c1b763e348886d8a99fb9ac0ad08c7f86d
|
/Stepik-Поколение Python: курс для продвинутых/4 Вложенные списки/4.6 Матрицы 3/task_4.py
|
9b681313a85d2b05f4b18e99d0e6526ee3e7531b
|
[] |
no_license
|
https://github.com/nordcap/learn-python
|
96a0314a084e56f3a8d5802080c10eec07bcc8ea
|
5f01971d50964ce392ad6f0ee97fa83bdf922022
|
refs/heads/master
| 2022-05-18T23:19:09.023509
| 2022-05-07T08:10:29
| 2022-05-07T08:10:29
| 83,015,313
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Заполнение 2
На вход программе подаются два натуральных числа nn и mm. Напишите программу, которая создает матрицу размером n \times mn×m заполнив её в соответствии с образцом.
Формат входных данных
На вход программе на одной строке подаются два натуральных числа nn и mm — количество строк и столбцов в матрице.
Формат выходных данных
Программа должна вывести указанную матрицу в соответствии с образцом.
Примечание. Для вывода элементов матрицы как в примерах, отводите ровно 33 символа на каждый элемент. Для этого используйте строковый метод ljust(). Можно обойтись и без ljust(), система примет и такое решение 😇
'''
arr = [int(i) for i in input().split()]
n = arr[0]
m = arr[1]
matrix = [[0 for j in range(m)] for i in range(n)]
elem = 1
for j in range(m):
for i in range(n):
matrix[i][j] = elem
elem += 1
trans_matrix = [[matrix[i][j] for i in range(n - 1, -1, -1)] for j in range(m)]
for i in range(n):
for j in range(m):
print(str(matrix[i][j]).ljust(3), end='')
print()
|
UTF-8
|
Python
| false
| false
| 1,511
|
py
| 803
|
task_4.py
| 749
| 0.695821
| 0.684159
| 0
| 30
| 33.3
| 211
|
yurimalheiros/textflow
| 12,841,952,224,414
|
8af1c80bf9300e20aaef0d4c7f2fe7895a392092
|
13830825b25ec01ec2874094a10f36b4b1336ac7
|
/tf/triggers/tabstospaces.py
|
cd46a022669f74ac5429a1847eb980647f3384c2
|
[] |
no_license
|
https://github.com/yurimalheiros/textflow
|
db62047b43c44d43c6efc67ad94f8118f984b076
|
c21ddf8aba58dc83d58a8db960d58d91ee2e5c74
|
refs/heads/master
| 2016-09-10T14:47:18.159229
| 2011-11-02T14:26:31
| 2011-11-02T14:26:31
| 1,927,215
| 1
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#######################################################################
# Copyright © 2007-2009 Yuri Malheiros.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# the Free Software Foundation; version 2 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#######################################################################
"""
This module implements the trigger of "ctrl+2".
Thanks to Tab Convert Gedit Plugin by Frederic Back.
"""
import tf.app
shortcut = "ctrl+2"
sticky = False
class TabsToSpaces(object):
def activate(self):
"""
Operations before trigger activation.
"""
self.document_manager = tf.app.document_manager
document = self.document_manager.get_active_document()
buffer = document.view.buffer
tab_size = document.view.get_tab_width()
text = document.get_text()
text = text.expandtabs(tab_size)
buffer.begin_user_action()
start_iter = buffer.get_start_iter()
end_iter = buffer.get_end_iter()
buffer.delete(start_iter, end_iter)
buffer.insert(start_iter, text)
buffer.end_user_action()
return True
|
UTF-8
|
Python
| false
| false
| 1,524
|
py
| 107
|
tabstospaces.py
| 92
| 0.589626
| 0.581747
| 0
| 51
| 28.862745
| 71
|
Servoy/svyMultiLevelMenu
| 19,619,410,625,023
|
56f1323d2a91dc1a2d79200a5481101b91c26566
|
5e730b57e1682bfef9cb5868e63bd480026c38fd
|
/multilevelmenu/multilevelmenu/multilevelmenu.spec
|
e512d5c723a472376afb5196cafe9142199376a5
|
[
"MIT"
] |
permissive
|
https://github.com/Servoy/svyMultiLevelMenu
|
2f7abc87de65263df1566ae359d103e484a40054
|
23ffbdc2419f34750fcd232f57dbcfe95f50942f
|
refs/heads/master
| 2020-09-10T14:36:29.856779
| 2019-11-22T16:52:05
| 2019-11-22T16:52:05
| 221,722,019
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
{
"name": "multilevelmenu-multilevelmenu",
"displayName": "multilevelmenu",
"version": 1,
"definition": "multilevelmenu/multilevelmenu/multilevelmenu.js",
"serverscript" : "multilevelmenu/multilevelmenu/multilevelmenu_server.js",
"libraries": [],
"model":
{
"menu" : {"type": "MenuItem[]" },
"brandText" : {"type": "tagstring", "default": ""},
"brandIconStyleClass" : {"type": "styleclass", "default": ""},
"brandImage" : {"type": "media", "default": ""},
"brandImageStyleClass" : {"type": "styleclass", "default": ""},
"styleClass" : {"type": "styleclass" },
"barStyle" : {"type": "styleclass" }
},
"types": {
"MenuItem": {
"itemId" : {"type": "string"},
"menuItems" : {"type": "MenuItem[]" },
"text" : {"type": "tagstring"},
"enabled" : {"type": "enabled", "default": true},
"isDivider" : {"type": "boolean","default": false},
"styleClass" : {"type": "styleclass" },
"iconStyleClass" : {"type": "styleclass" },
"imageSrc" : {"type": "media"},
"imageStyleClass" : {"type": "string", "default": "navbar-default"}
}
},
"handlers":
{
"onMenuItemSelected" : {
"parameters" : [
{ "name" : "menuItemId", "type" : "object" },
{ "name" : "event", "type" : "JSEvent" }
],
"returns" : "boolean"
}
},
"api":
{
"setRootMenuItems":
{
"parameters":
[
{ "name": "menuItems", "type": "MenuItem[]" }
]
},
"insertMenuItem" :{
"parameters":[
{
"name": "pv_menuItem",
"type": "MenuItem"
},
{
"name": "pv_parentID",
"type": "string"
}
],
"returns": "boolean"
},
"getMenuItem" :{
"parameters":[
{
"name": "menuItemId",
"type": "object"
}
],
"returns": "MenuItem"
},
"addMenuItem" :{
"parameters":[
{
"name": "menuItem",
"type": "MenuItem"
},
{
"name": "menuItemId",
"type": "object",
"optional" : true
},
{
"name": "index",
"type" : "int",
"optional" : true
}
],
"returns" : "boolean"
},
"removeMenuItem" :{
"parameters":[
{
"name": "menuItemId",
"type": "object"
}
],
"returns" : "boolean"
}
}
}
|
UTF-8
|
Python
| false
| false
| 2,427
|
spec
| 12
|
multilevelmenu.spec
| 4
| 0.459827
| 0.459415
| 0
| 103
| 22.572816
| 75
|
SamarEA/BasicsPython
| 1,082,331,800,667
|
c68cc3dc3b65eb4c80b3c80570434712e4958023
|
2a9ea49d97a9aab291a45c5fc4e398debc2aef99
|
/KaratsupaMultipli.py
|
de9bbd6ae59cb41a7f8f0d6713f4f74bf63c0d10
|
[] |
no_license
|
https://github.com/SamarEA/BasicsPython
|
c027cb85dd79ab8d90a462853fbbb0493eac72a9
|
6d2d93b0f32a3c63fecc0bf9c329d5b144bf5a2f
|
refs/heads/master
| 2017-12-10T04:31:37.395535
| 2017-07-16T19:15:59
| 2017-07-16T19:15:59
| 78,938,795
| 0
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Created on Sun Jul 16 08:40:35 2017
@author: Samar Elaraby
"""
# Implementation of Karatsuba Multiplication algorithm
# It is restricted to the multiplication of two n-digit numbers
def RecMulti(num1,num2):
x = str(num1)
y = str(num2)
if len(x) <= 2 or len(y) <= 2: #base case
return num1 * num2
else:
n = int(len(x))
m = int(len(y))
# Dividing the two numbes in a proper way, which means that the right
# halves have the same number of digits
if (n//2)%2 != 0 or n%2 == 0:
a = int(x[: n//2])
b = int(x[n//2 :])
else:
a = int(x[: n//2 + 1])
b = int(x[n//2+ 1 :])
if (m//2)%2 != 0 or m%2 == 0:
c = int(y[: m//2])
d = int(y[m//2 :])
else:
c = int(y[: m//2 + 1])
d = int(y[m//2+ 1 :])
# The Recursive Algorithm
step1 = RecMulti(a, c)
step2 = RecMulti(b, d)
step3 = RecMulti(a+b, c+d)
#print(a, b, c, d, step1, step2, step3)
step4 = step3 - step2 - step1
# Determining the position of the left halves
if len(str(a)) + len(str(b)) == n or len(str(c)) + len(str(d)) == m:
order = len(str(b))*2 if len(str(b)) > len(str(d)) else len(str(d))*2
else:
order = (n - len(str(a)))*2
# The result
return 10**(order) * step1 + 10**(order//2) * step4 + step2
M = 108
N = 109
RecMulti(M, N)
|
UTF-8
|
Python
| false
| false
| 1,526
|
py
| 3
|
KaratsupaMultipli.py
| 2
| 0.468545
| 0.423329
| 0
| 51
| 28.921569
| 81
|
romeieee/roam
| 7,825,430,421,994
|
213b416bf8eb485960cf232f3dea5d0ffd4bc582
|
49b1652b2f3c6adcc1a4b2f8b19c4ee2b293cdf2
|
/roam.py
|
4208d86f6868de3c541170f9c25a64dff72582c1
|
[] |
no_license
|
https://github.com/romeieee/roam
|
2852f21f29b60e8adcf1b3c7af03f950ff22806e
|
53e75a17b5abfa6f09485d63739736f0ca5aef08
|
refs/heads/master
| 2016-08-03T16:25:06.741569
| 2015-07-14T00:03:20
| 2015-07-14T00:03:20
| 39,043,565
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# this software was developed by Romaine Gordon. its sole intention is
# to control a RC truck from a laptop via RF links connecting 2 microcontroller
# one attached to the laptop and the other to the truck.
# July 1, 2015
from Tkinter import *
from ScrolledText import ScrolledText
import ttk
import serial, time
def donothing():
filewin = Toplevel(root)
button = Button(filewin, text="Do nothing button")
button.pack()
#this function when called allows the user to move the truck with onscreen keys
def call_control():
bf=Tk()
buttframe=Frame(bf)
bf.title("Controll")
Label(bf, text="On screen controll").grid(row=0, column=0, columnspan=3)
Label(bf, text="").grid(row=1)
button1 = Button(bf, width=10, text="Forward")
button2 = Button(bf, width=10, text="Backward")
button3 = Button(bf, width=10, text="Turn left")
button4 = Button(bf, width=10, text="Turn right")
button5 = Button(bf, width=10, text="Arrow keys", command=quit)
button1.grid(row=2, column=1,)
button2.grid(row=4, column=1,)
button3.grid(row=3, column=0,)
button4.grid(row=3, column=2,)
button5.grid(row=3, column=1,)
#frame ends here
# building comm port frame so the user can select the available comm port which the microcontroler is connected to
def call_comm():
cf=Tk()
comm_frame=Frame(cf)
cf.title("Comm ports")
cf.geometry("200x200")
# code to find available comm ports
l_comm=Label(cf, text="Available serial comm ports:")
l_comm.grid(row=0, columnspan=2, sticky=N+S+E+W, padx=10)
b1_comm=Button(cf, text="Find")
b2_comm=Button(cf, text="Cancel")
b3_comm=Button(cf, text="Apply")
b1_comm.grid(row=1, column=0, padx=10, sticky=W)
b2_comm.grid(row=3, column=1, padx=10, sticky=E)
b3_comm.grid(row=3, column=0, padx=10, sticky=W)
# this function when called allows user toset the baud rate for the transmission of data
def call_settings(*args):
cs=Tk()
cs_frame=Frame(cs)
cs.title("Settings")
cs.geometry("300x300")
#add drop down list
def get_val(*args):
baud_rate.set(var.get())
return var.get()
Label(cs, text=" ").grid(row=0)
Label(cs, text="Baud rate automatically set to 9600").grid(row=1, column=0, columnspan=3, sticky=W, padx=8)
Label(cs, text="select desired baud rate").grid(row=2, column=0, columnspan=3, sticky=W, padx=8)
var = StringVar(cs)
var.set("9600")
option = OptionMenu(cs, var, "9600", "14400", "19200", "28800", command=get_val)
Label(cs, text=" ").grid(row=3)
option.grid(row=4, column=2, padx=8, sticky=W)
get_val()
def send_serial_data():
data=serial_out_data.get()
print data
def get_b_voltage(*args):
# get battery voltage
b_voltage=StringVar()
b_voltage.set("0.0") # set default battery voltage
b_val=StringVar()
b_val=str(b_voltage.get()) + 'V'
return b_val
def get_sc_voltage(*args):
sc_voltage=StringVar()
sc_voltage.set("0.0") # set default battery voltage
sc_val=StringVar()
sc_val=str(sc_voltage.get()) + 'V'
return sc_val
def get_charging_voltage():
charging_voltage=StringVar()
charging_voltage.set("0.0") # set default battery voltage
charging_val=StringVar()
charging_val=str(charging_voltage.get()) + 'V'
return charging_val
def get_lat(*args):
lat=StringVar()
lat.set("no signal")
return lat.get()
def get_long(*args):
lon=StringVar()
lon.set("no signal")
return lon.get()
def get_compass(*args):
compass=StringVar()
compass.set("no signal")
return compass.get()
def send_serial():
txt=file
txt=open("textarea.txt", "a")
#import serial, time
ser=StringVar()
ser = serial.Serial()
#ser.port = "/dev/ttyACM1"
ser.port = "/dev/ttyACM0"
ser.baudrate = baud_rate.get()
ser.bytesize = serial.EIGHTBITS #number of bits per bytes
ser.parity = serial.PARITY_NONE #set parity check: no parity
ser.stopbits = serial.STOPBITS_ONE #number of stop bits
#ser.timeout = None #block read
ser.timeout = 0 #non-block read
#ser.timeout = 2 #timeout block read
ser.xonxoff = False #disable software flow control
ser.rtscts = False #disable hardware (RTS/CTS) flow control
ser.dsrdtr = False #disable hardware (DSR/DTR) flow control
ser.writeTimeout = 2 #timeout for write
try:
ser.open()
except Exception, e:
txt.write("\n>> error open serial port: " + str(e))
if ser.isOpen():
try:
ser.flushInput() #flush input buffer, discarding all its contents
ser.flushOutput()#flush output buffer, aborting current output
#and discard all that is in buffer
#write data
temp=serial_out_data.get()
ser.write(temp)
ser.close()
except Exception, e1:
txt.write("\n>> error communicating...: " + str(e1))
else:
txt.write("\n>> cannot open serial port ")
# print_data=StringVar()
# print_data.set=send_serial()
#widget=Label(root, textvariable=serial_out_data, width=30, height=2).grid(row=2, column=0, columnspan=6)
txt_var=StringVar()
txt_var1=StringVar()
txt.write("\n"+">> " + serial_out_data.get())
txt.close()
read_txt()
def send_pwm(*args):
pwm=StringVar()
pwm=pwm_var
print pwm_var.get()
def send_mvoltage(*args):
motor=StringVar()
motor=mvoltage_var
print mvoltage_var.get()
def read_txt(*args):
read_txt=file
read_txt=open("textarea.txt", "r")
txt_output=StringVar()
txt_output=str(read_txt.read(100000))
read_txt.close()
txt_a.delete(INSERT)
qoute=StringVar()
quote=str(txt_output)
txt_a.insert(END, quote)
# txt_a.delete()
root = Tk()
root.title("Roam")
root.geometry("455x450")
#add a menu bar
menubar=Menu(root)
#set up and display file
file_menu=Menu(menubar, tearoff=0)
file_menu.add_command(label="Save", command=donothing)
file_menu.add_command(label="Exit", command=donothing)
menubar.add_cascade(label="File", menu=file_menu)
#setup and display tools
tools_menu=Menu(menubar, tearoff=0)
tools_menu.add_command(label="Comm port", command=call_comm)
tools_menu.add_command(label="Settings", command=call_settings)
menubar.add_cascade(label="Tools", menu=tools_menu)
#set up and display controls frame
control_menu=Menu(menubar, tearoff=0)
control_menu.add_command(label="On screen", command=call_control)
control_menu.add_command(label="Keyboard")
menubar.add_cascade(label="Control", menu=control_menu)
#adding text area to the main window
Label(root, text="Send serial command to arduino device").grid(row=0, column=0, columnspan=3, padx=8, sticky=W )
Label(root, text="Baud rate").grid(row=1, column=0, padx=8, sticky=W)
baud_rate=StringVar()
baud_rate.set("9600")
Label(root, textvariable=baud_rate).grid(row=1, column=1, padx=1, sticky=W )
serial_out_data=StringVar()
send_data = ttk.Entry(root, width=23, textvariable=serial_out_data)
send_data.grid(row=1, column=2, columnspan=5, padx=8, sticky=W)
send_button=Button(text="Send", command=send_serial)
send_button.grid(row=1, column=5, padx=8)
Label(root, text="").grid(row=2)
txt_a = ScrolledText(root, width=60, height=13)
txt_a.grid(row=3, column=0, columnspan=6)
txt_a.delete(INSERT)
#txt=Message(root, textvariable=serial_out_data, width=100).grid(row=3, column=0, columnspan=6)
Label(root, text=" ").grid(row=4)
Label(root, text="VOLTAGE MONITOR").grid(row=5, column=0,columnspan=3, padx=8, sticky=W+S+E+N)
Label(root, text="Battery ").grid(row=6, column=0, padx=8, sticky=W)
Label(root, text="Solar cell").grid(row=6, column=1, padx=8, sticky=W)
Label(root, text="Charging ").grid(row=6, column=2, padx=8, sticky=W)
Label(root, text=" ").grid(row=6, column=3, padx=8, sticky=W)
Label(root, text=" ").grid(row=6, column=4, padx=8, sticky=W)
Label(root, text=" ").grid(row=6, column=5, padx=8, sticky=W)
# set the voltages to 0.0V for battery, solar and charging
#battery
#pwm slider
pwm_var=StringVar()
pwm=Scale(root, from_=100, to=255, variable=pwm_var, command=send_pwm)
pwm.grid(row=7, column=3, rowspan=6)
mvoltage_var=StringVar()
motor_voltage=Scale(root, from_=5, to=12, variable=mvoltage_var, command=send_mvoltage)
motor_voltage.grid(row=7, column=5, rowspan=6, sticky=W)
battery_voltage=StringVar()
battery_voltage.set(get_b_voltage())
Label(root, textvariable=battery_voltage).grid(row=7, column=0, padx=8, sticky=W )
#solar cell
s_cell_voltage=StringVar()
s_cell_voltage.set(get_sc_voltage())
Label(root, textvariable=s_cell_voltage).grid(row=7, column=1, padx=8, sticky=W )
#charging
charge_voltage=StringVar()
charge_voltage.set(get_charging_voltage())
Label(root, textvariable=charge_voltage).grid(row=7, column=2, padx=8, sticky=W )
#position monitor
Label(root, text=" ").grid(row=9, column=0, padx=8, sticky=W)
Label(root, text="POSITION MONITOR").grid(row=9, column=0, columnspan=3, sticky=W+S+E+N)
Label(root, text="Latitude").grid(row=11, column=0, padx=8, sticky=W)
Label(root, text="Longitude").grid(row=11, column=1, padx=8, sticky=W)
Label(root, text="Facing").grid(row=11, column=2, padx=8, sticky=W)
#displaying latitude longitude ans compass information
#latitude
lat_value=StringVar()
lat_value.set(get_lat())
Label(root, textvariable=lat_value).grid(row=12, column=0, padx=8, sticky=W )
#longitude
longitude_value=StringVar()
longitude_value.set(get_long())
Label(root, textvariable=longitude_value).grid(row=12, column=1, padx=8, sticky=W )
#compass
compass_value=StringVar()
compass_value.set(get_long())
Label(root, textvariable=compass_value).grid(row=12, column=2, padx=8, sticky=W )
Label(root, text="MOTOR").grid(row=5, column=3, columnspan=3, padx=8, sticky=W+S+N+E)
Label(root, text="PWM").grid(row=6, column=3, padx=8, sticky=E)
Label(root, text="Voltage").grid(row=6, column=5, padx=8, sticky=W)
Label(root, text=" ").grid(row=13, column=0, padx=8, sticky=W)
root.config(menu=menubar)
root.mainloop()
|
UTF-8
|
Python
| false
| false
| 9,711
|
py
| 1
|
roam.py
| 1
| 0.701781
| 0.673051
| 0
| 290
| 32.486207
| 114
|
faustow/vcpy
| 12,077,448,080,173
|
60f00cccf6db28a943f516f173be82d49f9bcd3e
|
dc1d54f9bfe1e4f7cd3eef2d12d280f4eadc21f3
|
/verifiable_credentials/helpers.py
|
68c4f15eaefec5ed2daea8297dce00441c65d2a9
|
[
"MIT"
] |
permissive
|
https://github.com/faustow/vcpy
|
eb006ba15d525673bb8f1efc7dc79bcce9033e83
|
d8b56c7d4a470346893b1481c5e7e476eb4a7c5d
|
refs/heads/master
| 2020-12-29T22:25:29.616416
| 2020-02-25T20:21:56
| 2020-02-25T20:21:56
| 238,754,459
| 0
| 0
|
MIT
| true
| 2020-02-25T20:21:58
| 2020-02-06T18:19:13
| 2020-02-06T18:19:15
| 2020-02-25T20:21:57
| 37
| 0
| 0
| 0
| null | false
| false
|
import hashlib
from datetime import datetime, timezone
from typing import List, Any, Dict, AnyStr, Union, Generator
from chainpoint.chainpoint import MerkleTools
from jsonpath_rw import parse, Root, Child, Fields, DatumInContext
from pycoin.serialize import h2b
def hash_byte_array(data):
hashed = hashlib.sha256(data).hexdigest()
return hashed
def ensure_string(value: AnyStr) -> str:
if isinstance(value, str):
return value
return value.decode('utf-8')
class MerkleTree(object):
"""Representation of a Merkle Tree.
More at https://en.wikipedia.org/wiki/Merkle_tree.
"""
def __init__(self):
self.tree = MerkleTools(hash_type='sha256')
def populate(self, node_generator: Generator) -> None:
"""
Populate Merkle Tree with data from node_generator. This requires that node_generator yield byte[] elements.
Hashes, computes hex digest, and adds it to the Merkle Tree
:param node_generator:
:return:
"""
for data in node_generator:
hashed = hash_byte_array(data)
self.tree.add_leaf(hashed)
def get_root(self, binary=False) -> bytearray:
"""
Finalize tree and return the Root, a byte array to anchor on a blockchain tx.
:return:
"""
self.tree.make_tree()
merkle_root = self.tree.get_merkle_root()
if binary:
return h2b(ensure_string(merkle_root))
return ensure_string(merkle_root)
def get_proof_generator(self, tx_id: AnyStr, signature_type: AnyStr, chain_name: AnyStr) -> Dict:
"""
Returns a generator of Merkle Proofs in insertion order.
:param tx_id: blockchain transaction id
:return:
"""
root = ensure_string(self.tree.get_merkle_root())
node_count = len(self.tree.leaves)
for index in range(0, node_count):
proof = self.tree.get_proof(index)
proof2 = []
for p in proof:
dict2 = dict()
for key, value in p.items():
dict2[key] = ensure_string(value)
proof2.append(dict2)
target_hash = ensure_string(self.tree.get_leaf(index))
merkle_proof = {
"type": ['MerkleProof2017', 'Extension'],
"merkleRoot": root,
"targetHash": target_hash,
"proof": proof2,
"anchors": [{
"sourceId": tx_id,
"type": signature_type,
"chain": chain_name
}]}
yield merkle_proof
def validate_required_fields(some_object: Any, required_fields: List) -> None:
"""Raise an exception if any of the required fields are missing from the object."""
for field in required_fields:
if not some_object.__getattribute__(field):
raise Exception(
f"The field '{field}' is required for object of class '{some_object.__class__.__name__}'."
)
def validate_required_fields_interactively(some_object: Any, required_fields: List) -> None:
"""Ask for user input if any of the required fields is missing."""
for field_name in required_fields:
ask_input_if_missing(some_object, field_name)
def ask_input_if_missing(some_object: Any, field_name: AnyStr, attempt: int = 0, max_retries: int = 2) -> None:
"""Asks the user for input if any of the required fields are missing from the object."""
if attempt >= max_retries:
raise Exception(
f"The field '{field_name}'is missing even after asking for user input, for "
f"object of class '{some_object.__class__.__name__}'."
)
attempt += 1
if not some_object.__getattribute__(field_name):
value = input(
f"The required field '{field_name}' cannot be empty for '{some_object.__class__.__name__}', please enter a "
f"valid value: "
)
some_object.__setattr__(field_name, value)
if not some_object.__getattribute__(field_name):
ask_input_if_missing(some_object, field_name, attempt=attempt, max_retries=max_retries)
def factor_in_new_try(number: Union[int, float], try_count: int) -> Union[int, float]:
"""Increase the given number with 10% with each try."""
factor = float(f"1.{try_count}")
return int(number * factor)
def create_iso8601_tz() -> str:
"""Get the current datetime in ISO 8601 format."""
ret = datetime.now(timezone.utc)
return ret.isoformat()
def get_path(match: 'DatumInContext') -> None:
"""Return an iterator based upon MATCH.PATH. Each item is a path component, start from the outer most item."""
if match.context is not None:
for path_element in get_path(match.context):
yield path_element
yield str(match.path)
def recurse(child: 'Child', fields_reverse: List) -> None:
"""Recurse fields."""
if isinstance(child, Fields):
fields_reverse.append(child.fields[0])
else:
if not isinstance(child, Child):
raise Exception('Unexpected input while recursing for additional fields.')
if not isinstance(child.left, Root):
recurse(child.left, fields_reverse)
recurse(child.right, fields_reverse)
def update_dict(raw_dict: Dict, path: AnyStr, value: AnyStr) -> Dict:
"""Update dictionary's PATH with VALUE. Return updated dict"""
try:
first = next(path)
if first.startswith('[') and first.endswith(']'):
try:
first = int(first[1:-1])
except ValueError:
pass
raw_dict[first] = update_dict(raw_dict[first], path, value)
return raw_dict
except StopIteration:
return value
def set_dict_field(raw_dict: Dict, path: AnyStr, value: AnyStr) -> Dict:
"""Set dictionary's `path` with `value`. Return the updated dict"""
jp = parse(path)
matches = jp.find(raw_dict)
if matches:
for match in matches:
jsonpath_expr = get_path(match)
raw_dict = update_dict(raw_dict, jsonpath_expr, value)
else:
fields = []
recurse(jp, fields)
temp_json = raw_dict
for idx, f in enumerate(fields):
if f in temp_json:
temp_json = temp_json[f]
elif idx == len(fields) - 1:
temp_json[f] = value
else:
raise (Exception(f"Invalid path : '{'.'.join(fields)}' while setting dict field."))
return raw_dict
NOW = create_iso8601_tz()
|
UTF-8
|
Python
| false
| false
| 6,603
|
py
| 13
|
helpers.py
| 7
| 0.597153
| 0.590792
| 0
| 187
| 34.31016
| 120
|
bumps/bumps
| 3,917,010,199,457
|
d84ed2f4c3117f3665c4f67536e353fdf22aaa96
|
ce60f76c6ad4c48fd6182240b302ee057809cc66
|
/run.py
|
5d48f72c580192a0c4b7e917cbe0d19a5cbb9eea
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
https://github.com/bumps/bumps
|
8ae10e8d15c0aa64e0bab6e00e7fabb2ca1b0860
|
2594e69567d534b434dc0eae727b77fdeff411d4
|
refs/heads/master
| 2023-08-22T17:56:49.987181
| 2023-07-26T14:22:23
| 2023-07-26T14:22:23
| 2,799,064
| 48
| 28
|
NOASSERTION
| false
| 2023-07-26T14:22:24
| 2011-11-17T22:22:02
| 2023-06-02T16:18:40
| 2023-07-26T14:22:23
| 13,245
| 51
| 27
| 63
|
Python
| false
| false
|
#!/usr/bin/env python
"""
Build and run bumps.
Usage:
./run.py [bumps cli args]
"""
import os
import sys
def addpath(path):
"""
Add a directory to the python path environment, and to the PYTHONPATH
environment variable for subprocesses.
"""
path = os.path.abspath(path)
if 'PYTHONPATH' in os.environ:
PYTHONPATH = path + os.pathsep + os.environ['PYTHONPATH']
else:
PYTHONPATH = path
os.environ['PYTHONPATH'] = PYTHONPATH
sys.path.insert(0, path)
from contextlib import contextmanager
@contextmanager
def cd(path):
old_dir = os.getcwd()
os.chdir(path)
yield
os.chdir(old_dir)
def prepare():
# Make sure that we have a private version of mplconfig
#mplconfig = os.path.join(os.getcwd(), '.mplconfig')
#os.environ['MPLCONFIGDIR'] = mplconfig
#if not os.path.exists(mplconfig):
# os.mkdir(mplconfig)
# To avoid cluttering the source tree with .pyc or __pycache__ files, you
# can suppress the bytecode generation when running in place. Unfortunately
# this is a pretty big performance hit on Windows, so we are going to
# suppress this behaviour and rely on .gitignore instead
#sys.dont_write_bytecode = True
#import numpy as np; np.seterr(all='raise')
root = os.path.abspath(os.path.dirname(__file__))
# Add the root to the system path
addpath(root)
# Make sample data and models available
os.environ['BUMPS_DATA'] = os.path.join(root, 'bumps', 'gui', 'resources')
if __name__ == "__main__":
import multiprocessing
multiprocessing.freeze_support()
prepare()
import bumps.cli
bumps.cli.main()
|
UTF-8
|
Python
| false
| false
| 1,659
|
py
| 211
|
run.py
| 177
| 0.664256
| 0.663653
| 0
| 65
| 24.523077
| 79
|
gabrielsp20/Ex_Python
| 7,662,221,688,496
|
1b8ce58fab3e73566e984d75f9e505541262cf83
|
d7897acf9bc369d4668e00a73f85d1a0efdc7a7b
|
/068.py
|
854340bfd35d83f1388ac3ad2e9137a512a411e1
|
[] |
no_license
|
https://github.com/gabrielsp20/Ex_Python
|
db723d9a1344c79a2cd7a6b9b95d85754cf54d3d
|
5264de0002755ca5d58dc8fe74dfe1cea458c731
|
refs/heads/main
| 2023-05-06T21:53:26.568941
| 2021-05-03T16:14:20
| 2021-05-03T16:14:20
| 363,985,168
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from random import randint
from time import sleep
print('=' * 30)
print('VAMOS JOGR PAR OU IMPAR')
print('=' * 30)
computado = randint(1, 10)
v = pi = soma = ganhou = 0
while True:
v = int(input('Digite um valor: '))
pi = str(input('Par ou Impar? [P/I] ')).upper()
print('-' * 30)
soma = v + computado
result = soma % 2
if result == 0:
if pi == 'P':
print(f'Você jogou {v} e o computado {computado}.Total de {soma} DEU PAR')
print('-' * 30)
print('VOCÊ GANHOU')
print('Vamos jogar novamente...')
sleep(2)
print('-' * 30)
ganhou += 1
else:
if pi == 'I':
print('-' * 30)
print('VOCÊ GANHOU')
print('Vamos jogar novamente...')
sleep(2)
print('-' * 30)
ganhou += 1
if result == 0:
if pi == 'I':
print(f'Você jogou {v} e o computado {computado}.Total de {soma} DEU PAR')
print('-' * 30)
print('VOCÊ PERDEU')
print('-' * 30)
break
else:
if pi == 'P':
print(f'Voce jogou {v} e o computado {computado}.Total de {soma} DEU IMPAR')
print('-' * 30)
print('VOCÊ PERDEU.....')
sleep(3)
print('-' * 30)
break
print(f'GAMER OVER! você venceu {ganhou} vezes. ')
|
UTF-8
|
Python
| false
| false
| 1,437
|
py
| 39
|
068.py
| 39
| 0.455944
| 0.432168
| 0
| 48
| 28.4375
| 88
|
chrisoyer/music-review-pipeline
| 15,169,824,506,293
|
279d5dd1b9631cc3724116ea4014cfdaeb7a6945
|
171d7bc5047679d943db51926cc25ae6322d4eb1
|
/src/data/scrapy/rym_scrape.py
|
12749702d2506f47f8c993e8bd7cb5b108b8f946
|
[] |
no_license
|
https://github.com/chrisoyer/music-review-pipeline
|
0999a5488c8a285ecb7ef7edc0562b0b75a08cd5
|
81bd0e56aa6c73409ec5b35356b96b52193e9c82
|
refs/heads/main
| 2023-01-19T06:51:08.269846
| 2020-11-24T22:20:16
| 2020-11-24T22:20:16
| 306,819,120
| 0
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import rymscraper as rym
import pandas as pd
# create connection
network = rym.RymNetwork()
# get charts as source for artists
rym_url = rym.RymUrl.RymUrl()
chart_infos = network.get_chart_infos(rym_url=rym_url, max_page=3)
df = pd.DataFrame(chart_infos)
# get list of albums for artists
# get album details
# close & quit browser to avoid memory leaks
network.browser.close()
network.browser.quit()
|
UTF-8
|
Python
| false
| false
| 430
|
py
| 10
|
rym_scrape.py
| 6
| 0.744186
| 0.74186
| 0
| 22
| 18.545455
| 66
|
rajanpawar90/Sagemaker-Projects
| 8,744,553,435,925
|
8f4ffdc7854fe1ff74d73ed867ce4dd50fd92bfe
|
8203f0d7b014f654a0e1061d417ad68d2bea6913
|
/sklearn_sagemaker_deploy/sklearn_sagemaker_models.py
|
bd104b5e9cfd3213004f3e602562cee11ae4d569
|
[] |
no_license
|
https://github.com/rajanpawar90/Sagemaker-Projects
|
8a9cf49b39220ab4a99f9b4514efd780efc2922f
|
ab03d6c334d295785fbf83ca8da7a11ba7d8a9dc
|
refs/heads/main
| 2023-02-14T17:46:15.945260
| 2021-01-03T23:30:03
| 2021-01-03T23:30:03
| 326,520,736
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#sample notebooks reviewed
# https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-python-sdk/scikit_learn_randomforest/Sklearn_on_SageMaker_end2end.ipynb
# https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-python-sdk/scikit_learn_iris/scikit_learn_estimator_example_with_batch_transform.ipynb
# https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-python-sdk/scikit_learn_inference_pipeline/Inference%20Pipeline%20with%20Scikit-learn%20and%20Linear%20Learner.ipynb
import os
#import ML_Utils.ML_Utils
from ML_Utils.ML_Utils import *
import sklearn
from sklearn.datasets import california_housing, \
load_breast_cancer, load_wine, load_iris
import sagemaker
import boto3
import re
from sagemaker import image_uris
from sagemaker import TrainingInput
import pandas as pd
import numpy as np
region = boto3.Session().region_name
bucket= 'sklearn-sagemaker-data'
prefix_wine = 'wine'
prefix_cali = 'california_housing'
prefix_breast_cancer = 'breast_cancer'
prefix_iris = 'iris'
prefix_output = 'output'
sagemaker_session = sagemaker.Session()
iam = boto3.client('iam')
SageMakerRole = iam.get_role(RoleName='AmazonSageMaker-ExecutionRole-20200112T192472')['Role']['Arn']
s3client = boto3.client('s3')
#Prepare training data in S3
##Iris data in local folder
iris_data = sklearn.datasets.load_iris()
joined_iris = np.insert(iris_data.data, 0, iris_data.target, axis=1) #insert targets before index 0 along axis=1
os.makedirs('AWS_Sagemaker/sklearn_sagemaker_deploy/data', exist_ok=True)
np.savetxt('AWS_Sagemaker/sklearn_sagemaker_deploy/data/iris_data.csv', joined_iris, delimiter=',', fmt='%1.1f, %1.3f, %1.3f, %1.3f, %1.3f')
local_breast_cancer_dir= 'AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer'
local_iris_dir = 'AWS_Sagemaker/sklearn_sagemaker_deploy/data/iris'
#copy contents of local directory into s3 bucket using sagemaker_session.upload_data
train_input_iris = sagemaker_session.upload_data(path= local_iris_dir,
bucket='sklearn-sagemaker-data',
key_prefix='iris')
#Download and save breast cancer data to local directory
breast_cancer_data = sklearn.datasets.load_breast_cancer()
pd.DataFrame(breast_cancer_data.get('data')).to_csv('AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/bc_features.csv') #save features as csv file
pd.DataFrame(breast_cancer_data.get('target')).to_csv('AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/bc_targets.csv') #save features as csv file
bc_features = pd.read_csv('AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/bc_features.csv')
bc_targets = pd.read_csv('AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/bc_targets.csv')
bc_features = bc_features.drop(bc_features.columns[0], axis=1) #drop first column
bc_targets = bc_targets.drop(bc_targets.columns[0], axis=1) #drop first column
bc_train_X, bc_test_X, bc_train_y, bc_test_y = get_train_test_datasets(bc_features,bc_targets)
bc_train_X.to_csv('AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/train/bc_train_X.csv')
bc_train_y.to_csv('AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/train/bc_train_y_1.csv')
bc_test_X.to_csv('AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/test/bc_test_X.csv')
bc_test_y.to_csv('AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/test/bc_test_y.csv')
###convert dataframes to pickle####################################################################################
bc_train_y.to_pickle('AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/train/bc_train_y')
train_y = pd.read_pickle('AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/train/bc_train_y')
#Upload train and test directories of BC data to S3 using sagemaker api
train_input_bc = sagemaker_session.upload_data(path= 'AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/train',
bucket='sklearn-sagemaker-data',
key_prefix='breast_cancer/train')
test_input_bc = sagemaker_session.upload_data(path= 'AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/test',
bucket='sklearn-sagemaker-data',
key_prefix='breast_cancer/test')
train_input_breast_cancer = 's3://sklearn-sagemaker-data/breast_cancer/train' #'/' is not given in the file name in the end since it will be joined to files using os.path.join
test_input_breast_cancer = 's3://sklearn-sagemaker-data/breast_cancer/test'
#Upload individual feature and target files to s3 bucket using s3client
s3client.upload_file(Filename='AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/bc_features.csv', #complete filename in local dir
Bucket='sklearn-sagemaker-data',
Key='breast_cancer/bc-features.csv') #complete path to file in s3 which is a file name
s3client.upload_file(Filename='AWS_Sagemaker/sklearn_sagemaker_deploy/data/breast_cancer/bc_targets.csv', #complete filename in local dir
Bucket='sklearn-sagemaker-data',
Key='breast_cancer/bc-targets.csv')
#Use this code to download specific file from s3 to local directory
#bc_s3_download = s3client.download_file(Bucket='sklearn-sagemaker-data',
# Key='breast_cancer/bc-features.csv', #complete path of file in s3
# Filename='AWS_Sagemaker/sklearn_sagemaker_deploy/bc_s3_features.csv') #complete path for file in local dir
# Below code not working for uploading .Bunch object to S3 directly
# s3client.put_object(Body=sklearn.datasets.load_breast_cancer(),
# Bucket='sklearn-sagemaker-data',
# key='sklearn-sagemaker-data/breast-cancer/bc-data.Bunch')
#Create sagemaker estimator for iris
from sagemaker.sklearn.estimator import SKLearn
FRAMEWORK_VERSION = "0.23-1"
script_path_iris = 'AWS_Sagemaker/sklearn_sagemaker_deploy/sklearn_script_iris.py'
script_path_breast_cancer = 'AWS_Sagemaker/sklearn_sagemaker_deploy/sklearn_script_breast_cancer.py'
sklearn_path_random_model = 'AWS_Sagemaker/sklearn_sagemaker_deploy/sklearn_script_random_model.py'
sklearn_estimator_iris = SKLearn(entry_point= script_path_iris,
framework_version= FRAMEWORK_VERSION,
instance_type= "ml.c4.xlarge",
role=SageMakerRole,
sagemaker_session= sagemaker_session,
hyperparameters={'max_leaf_nodes': 30})
sklearn_estimator_breast_cancer = SKLearn(entry_point= script_path_breast_cancer,
framework_version= FRAMEWORK_VERSION,
instance_type= "ml.c4.xlarge",
role=SageMakerRole,
sagemaker_session= sagemaker_session,
hyperparameters={'max_leaf_nodes': 30})
sklearn_estimator_random_model = SKLearn(entry_point= sklearn_path_random_model,
framework_version= FRAMEWORK_VERSION,
instance_type= "ml.c4.xlarge",
role=SageMakerRole,
sagemaker_session= sagemaker_session)
#This will start a SageMaker Training job that will download the
# data for us, invoke our scikit-learn code (in the provided script
# file), and save any model artifacts that the script creates.
sklearn_estimator_iris.fit({'train': train_input_iris})
sklearn_estimator_breast_cancer.fit({'train': train_input_breast_cancer}) #always provide directory of s3 training/testing data which are parsed for the training
sklearn_estimator_random_model.fit({'train': train_input_breast_cancer, 'test': test_input_breast_cancer})
#Deploy the trained iris model to make inference requests
predictor_iris = sklearn_estimator_iris.deploy(initial_instance_count=1, instance_type="ml.m5.xlarge")
import itertools
import pandas as pd
shape = pd.read_csv("AWS_Sagemaker/sklearn_sagemaker_deploy/data/iris_data.csv", header=None)
a = [50*i for i in range(3)]
b = [40+i for i in range(10)]
indices = [i+j for i,j in itertools.product(a,b)]
test_data = shape.iloc[indices[:-1]]
test_X = test_data.iloc[:,1:]
test_y =test_data.iloc[:,0]
pred_y = predictor_iris.predict(test_X.values)
from sklearn.metrics import accuracy_score
print(accuracy_score(test_y,pred_y))
#Iris Endpoint cleanup
predictor_iris.delete_endpoint()
#Iris Batch transform jobs
##We can also use the trained model for asynchronous batch inference on S3 data using SageMaker Batch Transform.
transformer=sklearn_estimator_iris.transformer(instance_count=1,
instance_type='ml.m5.large')
|
UTF-8
|
Python
| false
| false
| 8,999
|
py
| 11
|
sklearn_sagemaker_models.py
| 6
| 0.693522
| 0.682743
| 0
| 173
| 51.017341
| 189
|
loganathanengrr/Django-Rest-Core
| 14,594,298,892,059
|
b7e0fbb59c734e3ed312579d15b34c5119a6a058
|
e755f96437b264e54b30d58f706990eb17cc3944
|
/src/accounts/views/views.py
|
3d8d7d34836357fd45eaabc8a4deaeeb44b71490
|
[
"MIT"
] |
permissive
|
https://github.com/loganathanengrr/Django-Rest-Core
|
a5b1ba72a44e8e1da3957450d8266aec9bf852e2
|
928c2d816c0aa48453dde8642ef1b263f76ae39d
|
refs/heads/master
| 2022-12-13T15:35:49.143116
| 2018-10-23T05:06:13
| 2018-10-23T05:06:13
| 154,263,367
| 1
| 0
|
MIT
| false
| 2022-12-08T02:48:16
| 2018-10-23T04:40:37
| 2020-02-18T11:09:56
| 2022-12-08T02:48:15
| 1,109
| 1
| 0
| 12
|
Python
| false
| false
|
import uuid
from collections import OrderedDict
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.tokens import default_token_generator
from django.utils.timezone import now
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status,exceptions
from rest_framework_jwt.views import (
ObtainJSONWebToken,
VerifyJSONWebToken,
RefreshJSONWebToken
)
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from accounts.email import (
get_user_email,
get_user_email_field_name,
ActivationEmail,
ConfirmationEmail,
PasswordResetEmail,
)
from accounts import permissions
from accounts import serializers
from accounts import utils
from accounts import signals
from accounts.backends import JWTAuthentication
from project import rest_views
User = get_user_model()
class UserCreateView(rest_views.CreateAPIView):
"""
Use this endpoint to register new user.
"""
serializer_class = serializers.UserCreateSerializer
permission_classes = (permissions.AnonPermissionOnly,)
def perform_create(self, serializer):
user = serializer.save()
signals.user_registered.send(
sender=self.__class__, user=user, request=self.request
)
context = {'user': user}
to = [get_user_email(user)]
if settings.SEND_ACTIVATION_EMAIL:
ActivationEmail(self.request, context).send(to)
elif settings.SEND_CONFIRMATION_EMAIL:
ConfirmationEmail(self.request, context).send(to)
class UserView(rest_views.RetrieveUpdateAPIView):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = serializers.CurrentUserSerializer
def get_object(self, *args, **kwargs):
return self.request.user
def perform_update(self, serializer):
super(UserView, self).perform_update(serializer)
user = serializer.instance
if settings.SEND_ACTIVATION_EMAIL and not user.is_active:
context = {'user': user}
to = [get_user_email(user)]
ActivationEmail(self.request, context).send(to)
class TokenCreateView(utils.ActionViewMixin, rest_views.GenericAPIView):
"""
Use this endpoint to obtain user authentication token.
"""
serializer_class = serializers.TokenCreateSerializer
permission_classes = (permissions.AnonPermissionOnly,)
def _action(self, serializer):
user = serializer.user
token = utils.login_user(self.request, user)
auth_token = getattr(token, 'key', None)
return Response(
utils.token_response_payload_handler(auth_token, user, self.request),
status=status.HTTP_200_OK
)
class TokenDestroyView(rest_views.APIView):
"""
Use this endpoint to logout user (remove user authentication token).
"""
permission_classes = (permissions.IsAuthenticated,)
def post(self, request):
utils.logout_user(request)
return Response(status=status.HTTP_204_NO_CONTENT)
class PasswordResetView(utils.ActionViewMixin, generics.GenericAPIView):
"""
Use this endpoint to send email to user with password reset link.
"""
serializer_class = serializers.PasswordResetSerializer
permission_classes = (permissions.AnonPermissionOnly,)
_users = None
def _action(self, serializer):
for user in self.get_users(serializer.data['email']):
self.send_password_reset_email(user)
return Response(status=status.HTTP_204_NO_CONTENT)
def get_users(self, email):
if self._users is None:
email_field_name = get_user_email_field_name(User)
users = User._default_manager.filter(**{
email_field_name + '__iexact': email
})
self._users = [
u for u in users if u.is_active and u.has_usable_password()
]
return self._users
def send_password_reset_email(self, user):
context = {'user': user}
to = [get_user_email(user)]
PasswordResetEmail(self.request, context).send(to)
class PasswordResetConfirmView(utils.ActionViewMixin, generics.GenericAPIView):
"""
Use this endpoint to finish reset password process.
"""
permission_classes = (permissions.AnonPermissionOnly,)
serializer_class = serializers.PasswordResetConfirmSerializer
token_generator = default_token_generator
def _action(self, serializer):
serializer.user.set_password(serializer.data['new_password'])
if hasattr(serializer.user, 'last_login'):
serializer.user.last_login = now()
serializer.user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class ActivationView(utils.ActionViewMixin, generics.GenericAPIView):
"""
Use this endpoint to activate user account.
"""
serializer_class = serializers.ActivationSerializer
permission_classes = (permissions.AnonPermissionOnly,)
token_generator = default_token_generator
def _action(self, serializer):
user = serializer.user
user.is_active = True
user.save()
signals.user_activated.send(
sender=self.__class__, user=user, request=self.request
)
if settings.SEND_CONFIRMATION_EMAIL:
context = {'user': user}
to = [get_user_email(user)]
ConfirmationEmail(self.request, context).send(to)
return Response(status=status.HTTP_204_NO_CONTENT)
class PasswordChangeView(utils.ActionViewMixin, generics.GenericAPIView):
"""
Use this endpoint to change user password.
"""
permission_classes = (permissions.IsAuthenticated,)
serializer_class = serializers.PasswordChangeSerializer
def _action(self, serializer):
self.request.user.set_password(serializer.data.get('new_password'))
self.request.user.save()
if settings.LOGOUT_ON_PASSWORD_CHANGE:
utils.logout_user(self.request)
return Response(status=status.HTTP_204_NO_CONTENT)
class UsernameChangeView(utils.ActionViewMixin, generics.GenericAPIView):
"""
Use this endpoint to change user username.
"""
permission_classes = (permissions.IsAuthenticated,)
serializer_class = serializers.UsernameChangeSerializer
def _action(self, serializer):
user = self.request.user
new_username = serializer.data.get('new_' + User.USERNAME_FIELD)
setattr(user, User.USERNAME_FIELD, new_username)
if settings.SEND_ACTIVATION_EMAIL:
user.is_active = False
context = {'user': user}
to = [get_user_email(user)]
ActivationEmail(self.request, context).send(to)
user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
|
UTF-8
|
Python
| false
| false
| 6,207
|
py
| 31
|
views.py
| 29
| 0.762204
| 0.758821
| 0
| 197
| 30.497462
| 79
|
KeyStorke/CHM01
| 6,889,127,577,062
|
6cbe2f490d1431c6fc9f441f8f6480f976d0e039
|
62f448fe3f09a287b498acb6262834784c1f348a
|
/constants.py
|
87c1904f2c8c4edecf1d6012d8be2198a573c39d
|
[] |
no_license
|
https://github.com/KeyStorke/CHM01
|
7ad332ba1710858e52bb11502d7a115122198809
|
d6de100e5ad1db69be2c93bded5adc7068d78a55
|
refs/heads/master
| 2016-08-01T02:15:08.574835
| 2015-04-02T21:36:52
| 2015-04-02T21:36:52
| 33,320,699
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# 4x^6 - 6x^4 - 24x - 10
func_glob = lambda x: 4 * x ** 6 - 6 * x ** 4 - 24 * x - 10
# 24x^5 - 24x^3 - 24
func_frst = lambda x: 24 * (x ** 5 - x ** 3 - 1)
a1 = -1
b1 = 1
e = 0.1 ** 1000000000000000
|
UTF-8
|
Python
| false
| false
| 199
|
py
| 3
|
constants.py
| 3
| 0.472362
| 0.21608
| 0
| 8
| 23.875
| 59
|
rddaz2013/bastelpython
| 4,526,895,558,648
|
fc4ec76754e26d168bb3fb795709572ec70e6088
|
b431798e7027ddaa8dc4bfe460a81a415c14692f
|
/BastelPython/src/Argon/Part_2/triplot.py
|
2484678a122e473f4aeb2018178817cc0ab77d74
|
[] |
no_license
|
https://github.com/rddaz2013/bastelpython
|
1afe3a742bfdc598304d989469567a85e9953140
|
eff40b4ee9bce98de75a89c5c576b4cbd735625d
|
refs/heads/master
| 2016-08-08T13:41:10.369834
| 2014-04-04T13:13:31
| 2014-04-04T13:13:31
| 32,955,283
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# Copyright (C) 2009 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""An example embedding program. Veusz needs to be installed into
the Python path for this to work (use setup.py)
This animates a sin plot, then finishes
"""
from WxTernary import *
import pylab as p
from SelfKin_hlp import *
# C02+H20 / CO / Luft 1bar 20_Grad S119
data = [[2.6,11.6,85.8],[21.5,13.7,64.8],[39.9,15.2,44.9],
[47.2,16.5,36.3],[48.1,17.3,34.6],[49.0,17.9,33.1],
[49.0,19.1,31.9],[47.3,21.4,31.3],[44.6,25.4,30.0],
[40.0,30.2,29.8],[2.6,72.9,24.5]]
# C02+H20 / CO / Luft 1bar 300_Grad S127
data2 = [[2.3,6.8,90.9],[12.2,7.9,79.9],[21.8,8.5,69.7],
[31.4,9.3,59.3],[42.4,9.8,47.8],[52.4,10.7,36.9],
[61.6,11.1,27.3],[65.2,12.5,22.3],[61.7,17.7,20.6],
[52.6,27.5,19.9],[42.7,38.0,19.3],[32.7,49.2,18.1],
[22.6,59.8,17.6],[12.6,71.0,16.4],[2.3,82.4,15.3]]
fig = p.figure()
ax = fig.add_subplot(1,1,1, xticks=[],yticks=[],title='',frame_on=False,clip_on=True)
plot_grid(ax,achsentyp=False,achsen=['Brenngas','Inertgas','Oxidator'])
def AX(daten):
return (daten[2],daten[0],daten[1])
def Umwandel_BAM(daten):
Sauerstoff = daten[2]*0.21
Inert = daten[0] + (daten[2]*0.79)
Brenngas = daten[1]
return AX([Inert,Brenngas,Sauerstoff])
#Orginaldaten mit Luft
for i_count in data:
plot_point(ax,AX(i_count),color="red",ls="",marker="o")
for i in range(len(data)-1):
plot_lines(ax,AX(data[i]),AX(data[i+1]),color="red",ls="-",lw=0.25)
for i_count in data2:
plot_point(ax,AX(i_count),color="blue",ls="",marker="o")
for i in range(len(data2)-1):
plot_lines(ax,AX(data2[i]),AX(data2[i+1]),color="blue",ls="-",lw=0.25)
for i_count in data:
print Umwandel_BAM(i_count)
plot_point(ax,Umwandel_BAM(i_count),color="white",ls="",marker="o")
for i in range(len(data)-1):
plot_lines(ax,Umwandel_BAM(data[i]),Umwandel_BAM(data[i+1]),color="white",ls="-",lw=0.25)
for i_count in data2:
print Umwandel_BAM(i_count)
plot_point(ax,Umwandel_BAM(i_count),color="white",ls="",marker="o")
for i in range(len(data2)-1):
plot_lines(ax,Umwandel_BAM(data2[i]),Umwandel_BAM(data2[i+1]),color="black",ls="-",lw=0.25)
plot_lines(ax,[0,0,1],[0.2,0.8,0],color="black",ls="-",lw=0.55)
xx = load_result_safekinex('Messpunkte.txt')
my_array = list_to_array('CO',xx).astype("float")
my_array2 = list_to_array('O2',xx).astype("float")
my_array3 = list_to_array('N2',xx).astype("float")
my_array4 = list_to_array('CO2',xx).astype("float")
for i in range(len(my_array)):
plot_point(ax,AX([my_array4[i]+my_array3[i],my_array[i],my_array2[i]]),color="green",ls="",marker="o")
plot_point(ax,AX([59.1+19.6,13.3,5.4]),color="red",ls="",marker="o")
#plot_point(ax,inputs2,color="blue",ls="",marker="o")
p.show()
|
UTF-8
|
Python
| false
| false
| 3,814
|
py
| 255
|
triplot.py
| 131
| 0.602779
| 0.513896
| 0
| 103
| 35.009709
| 106
|
lizhencmb/forest
| 5,437,428,622,613
|
5b2e6e0c23d2364dcc89f369d16f0f061449b7fd
|
551f917f3eac76a0754c1da3cfebe2a7f0ea35f6
|
/tools/genefamily.py
|
082cb60acb55092d26fc31bc808398c5a12efad9
|
[] |
no_license
|
https://github.com/lizhencmb/forest
|
add452880108ee2bb33577749a5b5eea5fc5c632
|
e9c0a58606bdcd22153963317e8ca7a9bea81f38
|
refs/heads/main
| 2023-04-07T05:54:13.478252
| 2021-04-19T13:59:14
| 2021-04-19T13:59:14
| 352,376,977
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
import os
import logging
import subprocess as sp
from Bio import SeqIO
from Bio import AlignIO
from Bio import Phylo
from Bio.Seq import Seq
def read_gene_families(gftxt, protfile = None, cdsfile = None, wrkdir = None):
"""
Read gene families and sequences and put them into the GeneFamily Class
"""
gene_families = []
if protfile is None and cdsfile is None:
logging.info("Gene families need to have sequences!")
with open(gftxt, 'r') as f:
for line in f:
line = line.rstrip()
x = line.split()
gf_id = x.pop(0)[:-1]
gf_genes = x
gene_families.append(GeneFamily(gf_id=gf_id, gf_members=gf_genes))
return gene_families
if protfile is not None:
prot = SeqIO.to_dict(SeqIO.parse(protfile, "fasta"))
if cdsfile is not None:
cds = SeqIO.to_dict(SeqIO.parse(cdsfile, "fasta"))
with open(gftxt, 'r') as handle:
for line in handle:
line = line.rstrip()
x = line.split()
gf_id = x.pop(0)[:-1]
gf_genes = x
gf_prot = {}
gf_cds = {}
for gid in x:
if prot[gid][-1:].seq == '*':
gf_prot[gid] = prot[gid][:-1]
else:
gf_prot[gid] =prot[gid]
if cds[gid][-3:].seq == "TAA" or \
cds[gid][-3:].seq == "TAG" or \
cds[gid][-3:].seq == "TGA":
gf_cds[gid] = cds[gid][:-3]
else:
gf_cds[gid] = cds[gid]
gene_families.append(GeneFamily(gf_id = gf_id, gf_members = gf_genes,
prot_seqs = gf_prot, cds_seqs = gf_cds, wrkdir=wrkdir))
return gene_families
def split_gene_families(gftxt, max=200, min=4, num=2):
gfs = read_gene_families(gftxt)
gfsets = [[None] * 1 for i in range(num)]
for i in range(len(gfs)):
l = len(gfs[i].members)
if l < min or l > max:
continue
else:
m = i % num
fname = 'GeneFamilySet' + str(m) + '.txt'
with open(fname, "a") as out:
out.write('%s: %s\n' % (gfs[i].id, ' '.join(gfs[i].members)))
def _write_fasta(fname, seq_dict):
with open(fname, "w") as f:
for i, s in seq_dict.items():
#print(i)
#print(s.seq)
f.write(">%s\n%s\n" % (i, s.seq))
return fname
def _mkdir(dirname):
if os.path.isdir(dirname):
logging.warning("dir {} exists!".format(dirname))
else:
os.makedirs(dirname)
return dirname
class GeneFamily(object):
def __init__(self, gf_id, gf_members, prot_seqs=None, cds_seqs=None,
#prot_aln = None, cds_aln = None,
wrkdir = None, phylo = None,
aligner = "muscle", phylotool = "raxml"):
self.id = gf_id
self.members = gf_members
self.prot = prot_seqs
self.cds = cds_seqs
#self.prot_aln = prot_aln
#self.cds_aln = cds_aln
self.aligner = aligner
self.phylotool = phylotool
if wrkdir == None:
self.wrkdir = os.path.join('run', gf_id)
else:
self.wrkdir = os.path.join(wrkdir, gf_id)
def get_wrkdir(self):
print(self.wrkdir)
def get_id(self):
print(self.id)
def get_prot(self):
#print(self.id)
pep = self.id + ".pep"
for m in self.members:
#print(m)
#print(self.prot[m].seq)
print(self.prot[m].format("fasta").rstrip())
def get_cds(self):
#print(self.id)
cds = self.id + ".cds"
for m in self.members:
#print(m)
#print(self.cds[m].seq)
print(self.cds[m].format("fasta").rstrip())
def obtain_msa(self):
wd = _mkdir(self.wrkdir)
fname = os.path.join(self.wrkdir, self.id + ".pep")
_write_fasta(fname, self.prot)
fname = os.path.join(wd, self.id + ".cds")
_write_fasta(fname, self.cds)
if (self.aligner == "muscle"):
self.run_muscle()
elif (self.aligner == "mafft"):
pass
#run_mafft()
self.run_trimal()
def run_muscle(self):
infile = os.path.join(self.wrkdir, self.id + ".pep")
alnfile = infile + ".fasta"
cmd = ["muscle", "-seqtype", "protein", "-quiet",
"-in", infile, "-out", alnfile]
out = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
logging.warning(out.stderr.decode())
#self.prot_aln = AlignIO.read(alnfile, "fasta")
def run_trimal(self):
alnfile = os.path.join(self.wrkdir, self.id + ".pep.fasta")
cdsfile = os.path.join(self.wrkdir, self.id + ".cds")
outfile = os.path.join(self.wrkdir, self.id + ".cds.fasta.trimal")
cmd = ["trimal", "-in", alnfile, "-backtrans", cdsfile,
"-out", outfile, "-fasta", "-automated1"]
#print(' '.join(cmd))
out = sp.run(cmd, stdout = sp.PIPE, stderr = sp.PIPE)
logging.warning(out.stderr.decode())
def build_phylo(self):
if self.phylotool == "raxml":
self.run_raxml()
#print(self.phylo)
#Phylo.draw_ascii(self.phylo)
elif self.phylotool == "phyml":
pass
def run_raxml(self):
wd = os.path.join(os.path.abspath('.'), self.wrkdir)
aln = os.path.join(wd, self.id + ".cds.fasta.trimal")
#cmd = ["raxmlHPC-PTHREADS", "-T", "4", "-f", "a",
cmd = ["raxmlHPC-PTHREADS", "-T", "4", "-f", "a",
"-x", "601376", "-p", "601376", "-#", "100", "-w", wd,
# "-x", "601376", "-p", "601376", "-#", "10", "-w", wd,
"-m", "GTRGAMMA", "-s", aln, "-n", self.id]
logging.info(' '.join(cmd))
out = sp.run(cmd, stderr = sp.PIPE, stdout = sp.PIPE)
logging.warning(out.stderr.decode())
treefile = os.path.join(wd, 'RAxML_bipartitions.' + self.id)
self.phylo = Phylo.read(treefile, 'newick')
|
UTF-8
|
Python
| false
| false
| 6,157
|
py
| 4
|
genefamily.py
| 3
| 0.507228
| 0.499269
| 0
| 176
| 33.982955
| 82
|
zinedine/pyMagic
| 3,135,326,131,166
|
fc1531a9f88f8a2b9fcce7dbe92d74e6f56e09e9
|
25329c21eecddd0f4027dbaacf2fad5090319795
|
/widgets/geometry_helper.py
|
0e85ed8513a9ac6ef1ff1f223c4ac948ffdfebd6
|
[] |
no_license
|
https://github.com/zinedine/pyMagic
|
77a5322c043a170783343f8dd22fcebdd7044f87
|
a94b92803eb3070ef8ce14b7a1fb0f10b0c06ed6
|
refs/heads/master
| 2020-05-31T13:18:48.087940
| 2012-06-28T02:01:13
| 2012-06-28T02:01:13
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from PyQt4.QtCore import QSettings, QByteArray
class GeometryHelper(object):
def __init__(self):
pass
def save(self, data, name):
info = QSettings()
info.setValue("geo/" + name, data)
def load(self, name, default_value = None):
info = QSettings()
key = "geo/" + name
if info.contains(key):
return info.value(key, QByteArray()).toByteArray()
return None
|
UTF-8
|
Python
| false
| false
| 435
|
py
| 96
|
geometry_helper.py
| 83
| 0.586207
| 0.583908
| 0
| 16
| 26.25
| 62
|
sangeeths/be
| 8,315,056,721,377
|
9e460ad14cbd6508168b505de09bb9e518399c4a
|
64fca990f99abb1308397e7c13104bc1fc023240
|
/dir1/test_f1.py
|
5751e587b919efe96dce14b5ae42f6f00c5c3d5c
|
[] |
no_license
|
https://github.com/sangeeths/be
|
b242b250d64b768fe9ec1a0840543ec4908bffe7
|
a24ba2a1cdd59b26062f1b50334c6b2180ee7fc3
|
refs/heads/master
| 2020-05-05T06:54:26.403786
| 2013-08-04T00:10:16
| 2013-08-04T00:10:16
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
print 'this is file #1'
|
UTF-8
|
Python
| false
| false
| 24
|
py
| 33
|
test_f1.py
| 27
| 0.666667
| 0.625
| 0
| 1
| 23
| 23
|
RedHatInsights/insights-core
| 4,629,974,763,787
|
5d48984f724be55bb8a302420c9c9adb8afc62df
|
b8d80a23cb27af08a1c4d34b478c76228ae5fbb4
|
/insights/parsr/query/tests/test_boolean.py
|
3af943ed4bb5bd593178c089fe694e6239fba8d1
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/RedHatInsights/insights-core
|
bb243e2bf8a52446fefb95ebe05478d6e35efe2e
|
b0ea07fc3f4dd8801b505fe70e9b36e628152c4a
|
refs/heads/master
| 2023-09-04T21:15:40.456257
| 2023-09-04T10:46:56
| 2023-09-04T10:46:56
| 92,518,221
| 144
| 290
|
Apache-2.0
| false
| 2023-09-14T02:40:13
| 2017-05-26T14:23:11
| 2023-09-05T06:25:30
| 2023-09-14T02:40:12
| 16,723
| 132
| 193
| 185
|
Python
| false
| false
|
from insights.parsr.query.boolean import TRUE, FALSE
def test_and():
q = TRUE & TRUE
assert q(None)
q = TRUE & FALSE
assert not q(None)
q = FALSE & TRUE
assert not q(None)
q = FALSE & FALSE
assert not q(None)
def test_or():
q = TRUE | TRUE
assert q(None)
q = TRUE | FALSE
assert q(None)
q = FALSE | TRUE
assert q(None)
q = FALSE | FALSE
assert not q(None)
def test_not():
q = ~FALSE
assert q(None)
q = ~TRUE
assert not q(None)
|
UTF-8
|
Python
| false
| false
| 519
|
py
| 1,667
|
test_boolean.py
| 1,482
| 0.55684
| 0.55684
| 0
| 36
| 13.416667
| 52
|
alisonrib17/bees-tomato
| 11,665,131,201,965
|
95f2eee4e68f85615a43f8547b1e9ea2c42be816
|
dde823d2e293a60ad49c98153d2606a99136c374
|
/GenusCodes/feature_extraction_gen.py
|
b59bf08b93449a759f53fdeb8ec60b2520bd478a
|
[] |
no_license
|
https://github.com/alisonrib17/bees-tomato
|
0a78fa02b7039bb8bee21b3ad1908d4430f89b34
|
53cf7424b99c7b48475ec9021c080dbc1a62cc57
|
refs/heads/master
| 2023-08-28T14:00:38.131992
| 2021-09-17T18:03:49
| 2021-09-17T18:03:49
| 392,415,944
| 4
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import csv
import pickle
import librosa
import pathlib
import pandas as pd
import numpy as np
N_MFCC = 41 # Number of features
path_features = '/home/alison/Documentos/TomatoBeesRepository/DatasetMFCC/dataset_genus.csv'
def feature_extraction():
header = 'filename'
for i in range(1, N_MFCC):
header += f' mfcc{i}'
header += ' Annotation'
header += ' label'
header = header.split()
file = open(path_features, 'w', newline='')
with file:
writer = csv.writer(file)
writer.writerow(header)
genera = ['Augchloropsis', 'Bombus', 'Centris', 'Eulaema', 'Exomalopis', 'Melipona', 'Pseudoalglochloropsi', 'Xylocopa']
path_table = '/home/alison/Documentos/TomatoBeesRepository/Data/TableRecordings.xlsx'
table = pd.read_excel(path_table)
for g in genera:
for filename in os.listdir(f'/home/alison/Documentos/TomatoBeesRepository/Data/GenusRecordings/{g}'):
songname = f'/home/alison/Documentos/TomatoBeesRepository/Data/GenusRecordings/{g}/{filename}'
y, sr = librosa.load(songname, mono=True)
audioname = os.path.splitext(filename)[0]
df = table[table.Audio == audioname]
size = int(table.shape[0])
start_time = table['Begin Time (s)']
end_time = table['End Time (s)']
low_freq = table['Low Freq (Hz)']
annotation = table['Annotation']
for i in range(size):
to_append = f'{filename}'
start = float(start_time[i])
end = float(end_time[i])
FMIN = float(low_freq[i])
FMAX = sr/2.0
start_index = librosa.time_to_samples(start)
end_index = librosa.time_to_samples(end)
required_slice = y[start_index:end_index]
required_mfcc = librosa.feature.mfcc(y=required_slice, sr=sr, n_mfcc=N_MFCC, fmin=FMIN, fmax=FMAX)
for e in required_mfcc:
to_append += f' {np.mean(e)}'
to_append += f' {peso}'
to_append += f' {tamanho}'
to_append += f' {annotation[i]}'
to_append += f' {g}'
file = open(path_features, 'a', newline='')
with file:
writer = csv.writer(file)
writer.writerow(to_append.split())
if __name__ == '__main__':
feature_extraction()
|
UTF-8
|
Python
| false
| false
| 2,088
|
py
| 15
|
feature_extraction_gen.py
| 3
| 0.663793
| 0.660441
| 0
| 75
| 26.853333
| 121
|
m4xc4v413r4/python-blizzardapi
| 9,852,655,012,993
|
21028e45aa32c648f9d03187e96de5a696cbeb61
|
3680d05cdb683eb1ae5753dabeb0e201ef004d2d
|
/blizzardapi/wow/wow_profile_api.py
|
b10a405d0858c685d9a49e05a95c71aa03d55c53
|
[
"MIT",
"Python-2.0"
] |
permissive
|
https://github.com/m4xc4v413r4/python-blizzardapi
|
1492980520ad3e73128d51538d25f86d417c4918
|
dd1c225fc713ad4e0cb4e13d0703434279353658
|
refs/heads/main
| 2023-06-21T04:32:11.241254
| 2021-05-14T20:43:16
| 2021-05-14T20:43:16
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""wow_profile_api.py file."""
from ..api import Api
class WowProfileApi(Api):
"""All Wow Profile API methods.
Attributes:
client_id: A string client id supplied by Blizzard.
client_secret: A string client secret supplied by Blizzard.
"""
def __init__(self, client_id, client_secret):
"""Init WowProfileApi."""
super().__init__(client_id, client_secret)
# Account Profile API
def get_account_profile_summary(self, region, locale, access_token):
"""Return a profile summary for an account."""
resource = "/profile/user/wow"
query_params = {
"namespace": f"profile-{region}",
"locale": locale,
"access_token": access_token,
}
return super().get_resource(resource, region, query_params)
def get_protected_character_profile_summary(
self, region, locale, access_token, realm_id, character_id
):
"""Return a protected profile summary for a character."""
resource = f"/profile/user/wow/protected-character/{realm_id}-{character_id}"
query_params = {
"namespace": f"profile-{region}",
"locale": locale,
"access_token": access_token,
}
return super().get_resource(resource, region, query_params)
def get_account_collections_index(self, region, locale, access_token):
"""Return an index of collection types for an account."""
resource = "/profile/user/wow/collections"
query_params = {
"namespace": f"profile-{region}",
"locale": locale,
"access_token": access_token,
}
return super().get_resource(resource, region, query_params)
def get_account_mounts_collection_summary(self, region, locale, access_token):
"""Return a summary of the mounts an account has obtained."""
resource = "/profile/user/wow/collections/mounts"
query_params = {
"namespace": f"profile-{region}",
"locale": locale,
"access_token": access_token,
}
return super().get_resource(resource, region, query_params)
def get_account_pets_collection_summary(self, region, locale, access_token):
"""Return a summary of the battle pets an account has obtained."""
resource = "/profile/user/wow/collections/pets"
query_params = {
"namespace": f"profile-{region}",
"locale": locale,
"access_token": access_token,
}
return super().get_resource(resource, region, query_params)
# Character Achievements API
def get_character_achievements_summary(
self, region, locale, realm_slug, character_name
):
"""Return a summary of the achievements a character has completed."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/achievements"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
def get_character_achievement_statistics(
self, region, locale, realm_slug, character_name
):
"""Return a character's statistics as they pertain to achievements."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/achievements/statistics"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Appearance API
def get_character_appearance_summary(
self, region, locale, realm_slug, character_name
):
"""Return a summary of a character's appearance settings."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/appearance"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Collections API
def get_character_collections_index(
self, region, locale, realm_slug, character_name
):
"""Return an index of collection types for a character."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/collections"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
def get_character_mounts_collection_index(
self, region, locale, realm_slug, character_name
):
"""Return a summary of the mounts a character has obtained."""
resource = (
f"/profile/wow/character/{realm_slug}/{character_name}/collections/mounts"
)
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
def get_character_pets_collection_index(
self, region, locale, realm_slug, character_name
):
"""Return a summary of the battle pets a character has obtained."""
resource = (
f"/profile/wow/character/{realm_slug}/{character_name}/collections/pets"
)
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Encounters API
def get_character_encounters_summary(
self, region, locale, realm_slug, character_name
):
"""Return a summary of a character's encounters."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/encounters"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
def get_character_dungeons(self, region, locale, realm_slug, character_name):
"""Return a summary of a character's completed dungeons."""
resource = (
f"/profile/wow/character/{realm_slug}/{character_name}/encounters/dungeons"
)
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
def get_character_raids(self, region, locale, realm_slug, character_name):
"""Return a summary of a character's completed raids."""
resource = (
f"/profile/wow/character/{realm_slug}/{character_name}/encounters/raids"
)
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Equipment API
def get_character_equipment_summary(
self, region, locale, realm_slug, character_name
):
"""Return a summary of the items equipped by a character."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/equipment"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Hunter Pets API
def get_character_hunter_pets_summary(
self, region, locale, realm_slug, character_name
):
"""If the character is a hunter, returns a summary of the character's hunter pets."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/hunter-pets"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Media API
def get_character_media_summary(self, region, locale, realm_slug, character_name):
"""Return a summary of the media assets available for a character (such as an avatar render)."""
resource = (
f"/profile/wow/character/{realm_slug}/{character_name}/character-media"
)
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Mythic Keystone Profile API
def get_character_mythic_keystone_profile_index(
self, region, locale, realm_slug, character_name
):
"""Return the Mythic Keystone profile index for a character."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/mythic-keystone-profile"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
def get_character_mythic_keystone_profile_season_details(
self, region, locale, realm_slug, character_name, season_id
):
"""Return the Mythic Keystone season details for a character."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/mythic-keystone-profile/season/{season_id}"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Professions API
def get_character_professions_summary(
self, region, locale, realm_slug, character_name
):
"""Return a summary of professions for a character."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/professions"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Profile API
def get_character_profile_summary(self, region, locale, realm_slug, character_name):
"""Return a profile summary for a character."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
def get_character_profile_status(self, region, locale, realm_slug, character_name):
"""Return the status and a unique ID for a character."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/status"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Pvp API
def get_character_pvp_bracket_statistics(
self, region, locale, realm_slug, character_name, pvp_bracket
):
"""Return the Pvp bracket statistics for a character."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/pvp-bracket/{pvp_bracket}"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
def get_character_pvp_summary(self, region, locale, realm_slug, character_name):
"""Return a Pvp summary for a character."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/pvp-summary"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Quests API
def get_character_quests(self, region, locale, realm_slug, character_name):
"""Return a character's active quests as well as a link to the character's completed quests."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/quests"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
def get_character_completed_quests(
self, region, locale, realm_slug, character_name
):
"""Return a list of quests that a character has completed."""
resource = (
f"/profile/wow/character/{realm_slug}/{character_name}/quests/completed"
)
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Reputations API
def get_character_reputations_summary(
self, region, locale, realm_slug, character_name
):
"""Return a summary of a character's reputations."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/reputations"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Specializations API
def get_character_specializations_summary(
self, region, locale, realm_slug, character_name
):
"""Return a summary of a character's specializations."""
resource = (
f"/profile/wow/character/{realm_slug}/{character_name}/specializations"
)
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Statistics API
def get_character_statistics_summary(
self, region, locale, realm_slug, character_name
):
"""Return a statistics summary for a character."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/statistics"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Character Titles API
def get_character_titles_summary(self, region, locale, realm_slug, character_name):
"""Return a summary of titles a character has obtained."""
resource = f"/profile/wow/character/{realm_slug}/{character_name}/titles"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
# Guild API
def get_guild(self, region, locale, realm_slug, name_slug):
"""Return a single guild by its name and realm."""
resource = f"/data/wow/guild/{realm_slug}/{name_slug}"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
def get_guild_activity(self, region, locale, realm_slug, name_slug):
"""Return a single guild's activity by name and realm."""
resource = f"/data/wow/guild/{realm_slug}/{name_slug}/activity"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
def get_guild_achievements(self, region, locale, realm_slug, name_slug):
"""Return a single guild's achievements by name and realm."""
resource = f"/data/wow/guild/{realm_slug}/{name_slug}/achievements"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
def get_guild_roster(self, region, locale, realm_slug, name_slug):
"""Return a single guild's roster by its name and realm."""
resource = f"/data/wow/guild/{realm_slug}/{name_slug}/roster"
query_params = {"namespace": f"profile-{region}", "locale": locale}
return super().get_resource(resource, region, query_params)
|
UTF-8
|
Python
| false
| false
| 14,912
|
py
| 24
|
wow_profile_api.py
| 20
| 0.647264
| 0.647264
| 0
| 325
| 44.883077
| 117
|
pulumi/pulumi-azure-native
| 11,897,059,438,296
|
74ec9ee2cc0ab57b9cc1678cb2e0a9b763d4cc0c
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/providerhub/v20210501preview/outputs.py
|
60700306ce2cf515b0efdb4da213d36fa5614f22
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
https://github.com/pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| false
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
| 2023-09-11T17:08:08
| 2023-09-14T13:16:52
| 2,507,628
| 104
| 26
| 377
|
Python
| false
| false
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'OperationsDefinitionResponseDisplay',
]
@pulumi.output_type
class OperationsDefinitionResponseDisplay(dict):
"""
Display information of the operation.
"""
def __init__(__self__, *,
description: str,
operation: str,
provider: str,
resource: str):
"""
Display information of the operation.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "operation", operation)
pulumi.set(__self__, "provider", provider)
pulumi.set(__self__, "resource", resource)
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter
def operation(self) -> str:
return pulumi.get(self, "operation")
@property
@pulumi.getter
def provider(self) -> str:
return pulumi.get(self, "provider")
@property
@pulumi.getter
def resource(self) -> str:
return pulumi.get(self, "resource")
|
UTF-8
|
Python
| false
| false
| 1,388
|
py
| 13,948
|
outputs.py
| 5,665
| 0.606628
| 0.605908
| 0
| 52
| 25.653846
| 80
|
Andreyglass1989/Swift_Avia_Group
| 6,914,897,386,915
|
fc5b85e1cec799ef0e7577200787f8083243ba60
|
cbfd2f8cbc31573ab54455f3f5398dffcf13f2af
|
/advanced/to_Slava_serializers.py
|
82ffa7d1bfe56a7cea2cd4764ce236f390176f55
|
[] |
no_license
|
https://github.com/Andreyglass1989/Swift_Avia_Group
|
ccaf9f1a9bdff8fd9837345ba45915814b17a2a4
|
2c89e170a0fe9a5401fd15552c85c5d73abe1c21
|
refs/heads/master
| 2022-07-27T09:42:29.863236
| 2019-07-18T04:17:54
| 2019-07-18T04:17:54
| 102,249,849
| 0
| 0
| null | false
| 2022-07-15T20:30:50
| 2017-09-03T08:04:54
| 2019-07-18T04:18:11
| 2022-07-15T20:30:50
| 19,623
| 0
| 0
| 8
|
HTML
| false
| false
|
from LK.models import Parcel, Pack, ParcelPackProduct
# from rest_framework import serializers
from django.http import JsonResponse
import json
def pack_today(date):
q = Parcel.objects.filter(date_added__contains = date, parcel_status_id=2)
# for zz in q:
# z = Pack.objects.filter(parcelpackproduct__parcel_id)
pack_today=[]
full_data = []
for n in q:
p = ParcelPackProduct.objects.filter(parcel_id=n.parcel_id).first()
if p.pack_id not in pack_today:
pack_today.append(p.pack_id)
for p in pack_today:
data = Pack.objects.get(pack_id=p)
parcel_number=[]
dict_00 = {"date": data.date_added.strftime("%Y-%m-%d"),
"weight": data.weight,
"customer": data.customer.external_id,
"comment": data.comment,
"group": data.category_group.category_group_id,
}
# print(p, data.date_added.strftime("%Y-%m-%d"), data.weight, data.customer.external_id, data.comment, data.category_group.category_group_id)
parcel = Parcel.objects.filter(parcelpackproduct__pack_id=data.pack_id).order_by('parcel_id')
for z in parcel:
if z.external_id not in parcel_number:
parcel_number.append(z.external_id)
# print(z.external_id)
dict_00['parcel'] = parcel_number
full_data.append(dict_00)
return JsonResponse(full_data, safe=False)
# response = JsonResponse(full_data, safe=False)
# d = json.loads(response.content)
# return d
list = ['a', 'b', 'c']
dict={}
dict['full'] = list
|
UTF-8
|
Python
| false
| false
| 1,441
|
py
| 159
|
to_Slava_serializers.py
| 94
| 0.686329
| 0.681471
| 0
| 51
| 27.27451
| 146
|
dpgailey/sourcereader
| 9,431,748,199,156
|
c8421594d410089fadf5fcecb182167dc87613df
|
5b51bf14323b0949eba8e726acbebf889b35ccd6
|
/dateGuesser.py
|
9ee110bc401ad8d5a7c7e0c132ffce494c4f10b2
|
[] |
no_license
|
https://github.com/dpgailey/sourcereader
|
32432e21696e5cd826749c4987cd6a6d5730d237
|
e494937fbd415e0bfbede4c591a6a4af74ee412b
|
refs/heads/master
| 2021-05-26T18:10:13.649556
| 2011-12-07T19:03:58
| 2011-12-07T19:03:58
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
from alogClient import *
from datetime import date
from datetime import timedelta
def urlDateGuesser(url):
## all one number
today = date.today()
dateMatch1 = re.search('\d{8}', url)
if dateMatch1 != None:
allOneNumber = dateMatch1.group(0)
#is year rational? Within a few years of now?
urlYear = int(allOneNumber[0:4])
urlMonth = int(allOneNumber[4:6])
urlDay = int(allOneNumber[6:8])
if urlYear <= today.year and urlYear > today.year - 10 and urlMonth <= 12 and urlDay <= 31:
urlDate = date(urlYear, urlMonth, urlDay)
age = (today - urlDate).days
return(age)
dateMatch2 = re.search('(\d{4})[_/-](\d{1,2})[_/-](\d{1,2})', url)
if dateMatch2 != None:
urlYear = int(dateMatch2.group(1))
urlMonth = int(dateMatch2.group(2))
urlDay = int(dateMatch2.group(3))
#account for possible month / day reversal
if urlMonth > 12 and urlDay <= 12:
newMonth = urlDay
newDay = urlMonth
urlDay = newDay
urlMonth = newMonth
if urlYear <= today.year and urlYear > today.year - 10 and urlMonth <= 12 and urlDay <= 31:
urlDate = date(urlYear, urlMonth, urlDay)
age = (today - urlDate).days
return(age)
return(None)
|
UTF-8
|
Python
| false
| false
| 1,385
|
py
| 114
|
dateGuesser.py
| 104
| 0.568953
| 0.540072
| 0
| 41
| 32.560976
| 99
|
JiangLiNSCC/eHPC
| 1,872,605,763,550
|
3c09f7279dc09f2faa4402ced69cbd0350c05c56
|
d23a7446f0b8dc83c88808ac47ec9a5568572ee2
|
/newt-p3/job/migrations/0002_auto_20161110_0004.py
|
386997787f124c2bcebe48644f4406369df1b9eb
|
[
"BSD-2-Clause"
] |
permissive
|
https://github.com/JiangLiNSCC/eHPC
|
a651aac0a77e3a4d3bf7a1a3195f6abcbcc33c79
|
16fb9bdf76875ff154173184aa83979b37a91f5a
|
refs/heads/master
| 2021-01-17T16:03:56.334913
| 2017-05-03T09:19:05
| 2017-05-03T09:19:05
| 61,087,990
| 3
| 1
| null | false
| 2017-01-04T04:01:54
| 2016-06-14T03:10:39
| 2016-11-18T11:33:38
| 2017-01-04T04:01:54
| 2,702
| 2
| 1
| 10
|
Python
| null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-10 06:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='hpcjob',
name='jobid',
field=models.IntegerField(default=-1, null=True),
),
migrations.AlterField(
model_name='hpcjob',
name='scale_Nodes',
field=models.IntegerField(default=1, null=True),
),
migrations.AlterField(
model_name='hpcjob',
name='scale_cores',
field=models.IntegerField(default=1, null=True),
),
migrations.AlterField(
model_name='hpcjob',
name='scale_memGB',
field=models.IntegerField(default=1, null=True),
),
migrations.AlterField(
model_name='hpcjob',
name='time_create',
field=models.DateTimeField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='hpcjob',
name='time_end',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='hpcjob',
name='time_hold',
field=models.TimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='hpcjob',
name='time_limit',
field=models.TimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='hpcjob',
name='time_start',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='hpcjob',
name='time_submit',
field=models.DateTimeField(blank=True, null=True),
),
]
|
UTF-8
|
Python
| false
| false
| 1,969
|
py
| 79
|
0002_auto_20161110_0004.py
| 62
| 0.53936
| 0.526663
| 0
| 65
| 29.292308
| 76
|
national-basketball-association/NPS
| 16,758,962,398,444
|
eb03608e3e08e6e0ed63f16d055874ecab52080e
|
a3d076c7d029d2cfebbbc6ad2e9062e6051c14b2
|
/storeCSV.py
|
ad8552c4bfed51f72f0ad9672445ab56daf40b6e
|
[] |
no_license
|
https://github.com/national-basketball-association/NPS
|
282266507b6459b79e5c9f7c7bf95bb1d86079cf
|
cd1561a0330cdf79d61101a2052cf162ec1920a3
|
refs/heads/master
| 2022-12-09T23:49:39.571201
| 2019-04-26T07:17:55
| 2019-04-26T07:17:55
| 169,655,875
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import csv
from pymongo import MongoClient
import glob
import sys
from pprint import PrettyPrinter
#connect to the database
client = MongoClient("mongodb+srv://rmohamme:green12@cluster0-8eolw.mongodb.net/test?retryWrites=true")
db = client["NPS"]
filepath = sys.argv[1] + '/' if len(sys.argv) == 2 else ''
def storePlayerStats():
col = db["PLAYER_STATS"]
for filename in glob.glob(filepath + 'datasets/player_stats/*.csv'):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',', strict=True)
line_count = 0
playerObj = {}
for row in csv_reader:
if line_count == 0:
#This is the header of the csv_file
#print(f'Column names {", ".join(row)}')
line_count += 1
continue
elif line_count == 1:
playerObj["_id"] = row[0]
player_name = filename.split('/')[-1]
player_name = player_name[:-10]
player_name = player_name[13:]
# print("player name is {}".format(player_name))
tokens = player_name.split("_")
f_name = tokens[0]
l_name = ""
if len(tokens) > 1:
l_name = tokens[1]
playerObj["l_name"] = l_name
playerObj["f_name"] = f_name
playerObj["playerName"] = player_name
playerObj["seasons"] = []
i = 3
season = {
"LEAGUE_ID": row[2],
"TEAM_ID": row[3],
"TEAM_ABBREVIATION": row[4],
"PLAYER_AGE": row[5],
"GP": row[6],
"GS": row[7],
"MIN": str(round(float(row[8])/float(row[6]), 2)),
"FGM": row[9],
"FGA": row[10],
"FG_PCT": row[11],
"FG3M": row[12],
"FG3A": row[13],
"FG3_PCT": row[14],
"FTM": row[15],
"FTA": row[16],
"FT_PCT": row[17],
"OREB": row[18],
"DREB": row[19],
"REB": row[20],
"AST": row[21],
"STL": row[22],
"BLK": row[23],
"TOV": row[24],
"PF": row[25],
"PTS": row[26]
}
playerObj["seasons"].append(season)
line_count += 1
if playerObj:
# print(row[i])
# i += 1
# #print("\n")
# print(row[i])
# i += 1
# #print("\n")
# print(row[i])
# i += 1
# #print("\n")
# print(row[i])
# i += 1
#print("\n")
print(playerObj)
col.replace_one({'_id':playerObj['_id']}, playerObj, upsert=True)
def storeTeamStats():
col = db["TEAM_STATS"]
for filename in glob.glob(filepath + 'datasets/team_stats/*.csv'):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',', strict=True)
teamObj = {}
line_count = 0
for row in csv_file:
if line_count == 0:
#print(f'Coloumn names {", ".join(row)}')
line_count += 1
continue
elif line_count == 1:
row = row.split(',')
teamObj["_id"] = row[0]
teamObj["teamCity"] = row[1]
teamObj["teamName"] = row[2]
teamObj["years"] = []
else:
row = row.split(',')
year = {
"YEAR": row[3],
"GP": row[4],
"WINS": row[5],
"LOSSES": row[6],
"WIN_PCT": row[7],
"CONF_RANK": row[8],
"DIV_RANK": row[9],
"PO_WINS": row[10],
"PO_LOSSES": row[11],
"CONF_COUNT": row[12],
"DIV_COUNT": row[13],
"NBA_FINALS_APPEARANCE": row[14],
"FGM": row[15],
"FGA": row[16],
"FG_PCT": row[17],
"FG3M": row[18],
"FG3A": row[19],
"FG3_PCT": row[20],
"FTM": row[21],
"FTA": row[22],
"FT_PCT": row[23],
"OREB": row[24],
"DREB": row[25],
"REB": row[26],
"AST": row[27],
"PF": row[28],
"STL": row[29],
"TOV": row[30],
"BLK": row[31],
"PTS": row[32],
"PTS_RANK": row[33]
}
teamObj["years"].append(year)
line_count += 1
if teamObj:
#print(teamObj)
col.replace_one({'_id':teamObj['_id']}, teamObj, upsert=True)
def storeBoxScores():
col = db["BOX_SCORES"]
col2 = db["TEAM_ROSTERS"]
for filename in glob.glob(filepath + 'datasets/*.csv'):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',', strict=True)
teamObj = {}
rosterObj = {}
line_count = 0
for row in csv_file:
if line_count == 0:
line_count += 1
teamObj["games"] = []
#print(row)
else:
row = row.split(',')
#print(row[1])
teamObj["_id"] = row[1]
teamObj["TEAM_ABBREVIATION"] = row[2]
teamObj["TEAM_NAME"] = row[3]
rosterObj["TEAM_NAME"] = row[3]
rosterObj["_id"] = row[1]
game = {
"SEASON_ID": row[0],
"GAME_ID": row[4],
"GAME_DATE": row[5],
"MATCHUP": row[6],
"WL": row[7],
"MIN": row[8],
"PTS": row[9],
"FGM": row[10],
'FGA': row[11],
"FG_PCT": row[12],
"FG3M": row[13],
"FG3A": row[14],
"FG3_PCT": row[15],
"FTM": row[16],
"FTA": row[17],
"FT_PCT": row[18],
"OREB": row[19],
"DREB": row[20],
"REB": row[21],
"AST": row[22],
"STL": row[23],
"BLK": row[24],
"TOV": row[25],
"PF": row[26],
"PLUS_MINUS": row[27],
"NUM_WINS": row[28],
"NUM_LOSSES": row[29][:-1]
}
teamObj["games"].append(game)
#print(teamObj)
if teamObj:
col.replace_one({'_id':teamObj['_id']}, teamObj, upsert=True)
col2.replace_one({'_id':teamObj['_id']}, rosterObj, upsert=True)
# print(teamObj["TEAM_NAME"])
# print(teamObj["TEAM_ABBREVIATION"])
# print(teamObj["_id"])
# print(teamObj["games"][0])
#
# def storePredictions(teamPredictions):
# """
# use the other one
# :param teamPredictions:
# :return:
# """
# pp = PrettyPrinter(indent=4)
# pp.pprint(teamPredictions)
# sys.exit(1)
# if(teamPredictions):
# col = db["TEAM_PREDICTIONS"]
# col.replace_one({"_id": 1000}, teamPredictions, upsert=True)
def storeTeamPredictions(teamPredictions):
"""
Stores the predictions made by NPS system for every team playing tonight in MongoDB
Every team should have its own document, and the document should be updated every time
that team plays a game and receives new predictions
:param teamPredictions: a nested dictionary, mapping team abbreviations to information, such as opponent and stats
:return:
"""
global db
pp = PrettyPrinter(indent=4)
# iterate over the team predictions and add them to the database
# each team should be its own document
for key, value in teamPredictions.items():
team_abbreviation = key
team_info = value
team_id = value["id"]
del value["id"]
# print(value)
col = db["TEAM_PREDICTIONS"]
document = col.find_one({"_id": team_id}) # get the current document associated with this team in the db
if document == None:
# there is no document for this team, need to make one from scratch
print("There is no document for {}!".format(value["full_name"]))
predictions = [] # a list of the predictions made for this team, each prediction should be a dictionary
to_insert = {} # the dictionary to insert in the database
to_insert["full_name"] = value["full_name"]
current_prediction = {}
current_prediction["winPrediction"] = value["winPrediction"]
current_prediction["homeGame"] = value["homeGame"]
current_prediction["opponentFullName"] = value["opponentFullName"]
current_prediction["opponentId"] = value["opponentId"]
current_prediction["predictedAssists"] = value["predictedAssists"]
current_prediction["predictedTurnovers"] = value["predictedTurnovers"]
current_prediction["predictedRebounds"] = value["predictedRebounds"]
current_prediction["predictedBlocks"] = value["predictedBlocks"]
current_prediction["predictedAssistTurnoverRatio"] = int(value["predictedAssists"]) / int(value["predictedTurnovers"])
current_prediction["predictedSteals"] = value["predictedSteals"]
current_prediction["predictedFouls"] = value["predictedFouls"]
current_prediction["predictedThreePtPercentage"] = value["predictedThreePtPercentage"]
current_prediction["predictedFreeThrowPercentage"] = value["predictedFreeThrowPercentage"]
current_prediction["date"] = value["date"]
# finished formatting the most recent prediction made for this team
predictions.append(current_prediction)
to_insert["predictions"] = predictions
# the document to insert should have been formatted, try inserting into the database
col.replace_one({"_id": team_id}, to_insert, upsert=True)
pass
else:
# get the current predictions that are stored in the database
stored_predictions = document["predictions"] # this should be an array
# format the most recent prediction into a dictionary
current_prediction = {}
current_prediction["winPrediction"] = value["winPrediction"]
current_prediction["homeGame"] = value["homeGame"]
current_prediction["opponentFullName"] = value["opponentFullName"]
current_prediction["opponentId"] = value["opponentId"]
current_prediction["predictedAssists"] = value["predictedAssists"]
current_prediction["predictedTurnovers"] = value["predictedTurnovers"]
current_prediction["predictedRebounds"] = value["predictedRebounds"]
current_prediction["predictedBlocks"] = value["predictedBlocks"]
current_prediction["predictedAssistTurnoverRatio"] = int(value["predictedAssists"]) / int(
value["predictedTurnovers"])
current_prediction["predictedSteals"] = value["predictedSteals"]
current_prediction["predictedFouls"] = value["predictedFouls"]
current_prediction["predictedThreePtPercentage"] = value["predictedThreePtPercentage"]
current_prediction["predictedFreeThrowPercentage"] = value["predictedFreeThrowPercentage"]
current_prediction["date"] = value["date"]
stored_predictions.append(current_prediction)
document["predictions"] = stored_predictions
# the predictions array for this team has been updated with the most recent game
# should be able to insert it into the database
col.replace_one({"_id": team_id}, document, upsert=True)
def store(teamPredictions):
storeTeamPredictions(teamPredictions)
storePlayerStats()
storeTeamStats()
storeBoxScores()
|
UTF-8
|
Python
| false
| false
| 13,255
|
py
| 521
|
storeCSV.py
| 9
| 0.466164
| 0.450472
| 0
| 329
| 39.288754
| 130
|
ocakafred/CHAI
| 10,376,641,034,577
|
11bc4776afbaee8445aa0eb0f3cebf7ce83314a3
|
1498debedf2f834a0e9d4a53c0d2a197e7ebceee
|
/sql_script.py
|
0db344f10b7bd7702774bd298e8e6aff937d4bd3
|
[] |
no_license
|
https://github.com/ocakafred/CHAI
|
3667581dffb2c1381411c4a1deb8c3a258dbaa75
|
b539e988545fea9616fc3ce7ae6d67d1275650e8
|
refs/heads/master
| 2022-11-12T16:52:58.447182
| 2020-06-18T16:57:21
| 2020-06-18T16:57:21
| 272,387,092
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Created on 15th June, 2020
@author: Alfred Ocaka
This class connects to SQL server, query data from the tables, generate indicators
and save in a CSV file
Two packages are required for the script to run; pandas and Pyodbc
i.e pyodbc for connecting to the SQL database and pandas for data analysis
"""
import pandas as pd
import pyodbc
"""[SQL Connection]
Creating connection to the SQL server
"""
class Malaria:
df_result = ""
def __init__ (self):
pass
def dataframes(self, df_result):
"""[summary]
Creates connection to the SQL Server
Query the four tables and put them into dataframes
Merge the four tables and put them in to a dataframe
"""
sql_conn = pyodbc.connect('DRIVER={ODBC Driver for SQL Server};
SERVER=CHAI;
DATABASE=MALARIA_FOR_COUNTRY_X;
Trusted_Connection=yes')
# quering FOCUS_AREA table and putting it in to dataframe
df_focus_area = pd.read_sql("SELECT * FROM [FOCUS_AREA]", sql_conn)
# quering HOUSE_HOLDS table and putting it in to dataframe
df_household = pd.read_sql("SELECT * FROM [HOUSE_HOLDS]", sql_conn)
# # quering HOUSE_HOLD_MEMBERS table and putting it in to dataframe
df_household_members = pd.read_sql("SELECT * FROM [HOUSE_HOLD_MEMBERS]", sql_conn)
# # quering HOUSE_HOLD_MEMBERS table and putting it in to dataframe
df_blood_screening = pd.read_sql("SELECT * FROM [BLOOD_SCREENING_INFO]", sql_conn)
# Merging HOUSE_HOLD_MEMBERS and HOUSE_HOLDS dataframes
df_hh = pd.merge(df_household_members,df_household ,on="HLD_ID", how="inner")
# Merging df_hh results with blood screening dataframes
self.df_result = pd.merge(df_hh,df_blood_screening ,on="MEMBER_ID", how="inner")
def agg_results(self):
"""[]
This methods aggregate the output indicators
Save the ouput to a csv file
"""
df_result["Total Members"] = self.df_result["MEMBER_ID"]
df_result["Total Provinces (based on house holds added)"] = self.df_result["FOCUS_AREA_ID "]
df_result["Total Houses"] = self.df_result["HLD_ID"]
df_result["Total Spend Night outside"] = self.df_result["SPENDS_NIGHT_OUTDOOR "]
df_result["Total Tests Done"] = self.df_result["IS_TESTED "] is 1
df_result["Total Positive Cases"] = self.df_result["TEST_RESULT "] is 'Positive Pf '
df_result["Month-Year"] = self.df_result['DATE_CREATED _x'].dt.strftime('%b-%Y')
result = df_result.groupby(df_result["Month-Year"]).agg({ 'Total Provinces (based on house holds added)':len,'Total Houses':len, 'Total Members':sum,'Total Spend Night outside':sum,'Total Tests Done':len,'Total Positive Cases':sum }).rename(columns={})
result.to_csv("myresult.csv")
malara = Malaria()
malara.agg_results
|
UTF-8
|
Python
| false
| false
| 2,993
|
py
| 3
|
sql_script.py
| 2
| 0.630805
| 0.628466
| 0
| 86
| 33.767442
| 260
|
arjunreddyt/Computational-Economics
| 2,920,577,767,526
|
9d007f4106a6c9aa35c2326dec9faefff236e5c6
|
5bcb8ca369f398b9cc710e2a5b2f4be3aba0dab1
|
/Monopoly-Probabilites/Monopoly.py
|
3c60d7d1607523e70f9eb3b6dc609898c13ae303
|
[] |
no_license
|
https://github.com/arjunreddyt/Computational-Economics
|
7ac6c17684a48114e209ac9b81e7747f82c44f97
|
f66072d04adc3314bfccce751b0257e1d4eb885f
|
refs/heads/master
| 2021-01-20T15:44:54.685812
| 2017-05-09T22:09:41
| 2017-05-09T22:09:41
| 90,795,118
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
POS = ["GO "
,"Mediterranean Avenue "
,"Community Chest 1 "
,"Baltic Avenue "
,"Income Tax "
,"Reading Railroad "
,"Oriental Avenue "
,"Chance 1 "
,"Vermont Avenue "
,"Connecticut Avenue "
,"Jail "
,"St. Charles Place "
,"Electric Company "
,"States Avenue "
,"Virginia Avenue "
,"Pennsylvania Railroad "
,"St. James Place "
,"Community Chest 2 "
,"Tennessee Avenue "
,"New York Avenue "
,"Free Parking "
,"Kentucky Avenue "
,"Chance 2 "
,"Indiana Avenue "
,"Illinois Avenue "
,"B. & O. Railroad "
,"Atlantic Avenue "
,"Ventnor Avenue "
,"Water Works "
,"Marvin Gardens "
,"Go To Jail "
,"Pacific Avenue "
,"North Carolina Avenue "
,"Community Chest 3 "
,"Pennsylvania Avenue "
,"Short Line "
,"Chance 3 "
,"Park Place "
,"Luxury Tax "
,"Boardwalk "
]
REPEATS = int(10e5)
class Monopoly:
position = 0
doubles = 0
board = []
def __init__(self):
self.position = 0
self.doubles = 0
# Initialize the board
self.board = range(40)
for i in range(40):
self.board[i] = 0
def show(self):
for i in range(40):
print(str(POS[i])+" | "+str(self.board[i]/float(REPEATS)))
def move(self):
rolled = self.roll()
if (rolled):
self.position = (self.position + rolled) % 40
self.register()
def roll(self):
die1 = random.randint(1,6)
die2 = random.randint(1,6)
if (die1 == die2):
self.doubles += 1
else:
self.doubles = 0
if (self.doubles >= 3):
self.doubles = 0
self.gotoJail()
return None
else:
return die1 + die2
def register(self):
self.board[self.position] += 1
# Community Chest
if (self.position == 2 or
self.position == 17 or
self.position == 33):
self.communityChest()
# Chance
elif (self.position == 7 or
self.position == 22 or
self.position == 36):
self.chance()
# Goto Jail
elif (self.position == 30):
self.gotoJail()
def gotoJail(self):
self.position = 10
self.register()
def advanceToGo(self):
self.position = 0
self.register()
def advanceToUtility(self):
if (self.position <= 12 or self.position > 28):
self.position = 12
else:
self.position = 28
self.register()
def advanceToStation(self):
if (self.position <= 5 or self.position > 35):
self.position = 5
elif (self.position <= 15):
self.position = 15
elif (self.position <= 25):
self.position = 25
elif (self.position <= 35):
self.position = 35
self.register()
def communityChest(self):
card = random.randint(0,17)
# Advance to go
if (card == 0):
self.advanceToGo()
elif (card == 4):
self.gotoJail()
def chance(self):
card = random.randint(0,16)
if (card == 0):
self.advanceToGo()
# Go to Illimois Avenue
elif (card == 1):
self.position = 24
self.register()
elif (card == 2):
self.advanceToUtility()
elif (card == 3):
self.advanceToStation()
# Go to St Charles Place
elif (card == 4):
self.position = 11
self.register()
# Go back three spaces
elif (card == 7):
self.position = (self.position - 3) % 40
self.register()
elif (card == 8):
self.gotoJail()
# Go to Reading Railroad
elif (card == 11):
self.position = 5
self.register()
# Go to Broadwalk
elif (card == 12):
self.position = 39
self.register()
if __name__=='__main__':
random.seed()
game = Monopoly()
for _ in range(REPEATS):
game.move()
game.show()
|
UTF-8
|
Python
| false
| false
| 4,220
|
py
| 6
|
Monopoly.py
| 3
| 0.494313
| 0.469431
| 0
| 193
| 20.849741
| 64
|
progpyftk/projeto_sky
| 5,935,644,837,343
|
2c1b9b44ba1325c5f249639ed7a36aab92231f38
|
57936dba18017f6a8ba05b552292c22d0ceaad2d
|
/Tela_Principal/tela_principal.py
|
a7c22672b3315887a27476347bcd1593a9011295
|
[] |
no_license
|
https://github.com/progpyftk/projeto_sky
|
a13604d4584d02d4705ce4b0d72fbd82377ffff2
|
0251a5abb0c8e7569d349f038c2eb9a01cf345f0
|
refs/heads/master
| 2016-09-21T01:08:25.531315
| 2016-09-08T19:20:42
| 2016-09-08T19:20:42
| 67,542,770
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'lorenzo.cabrini'
import sys
sys.path.insert(0, r'C:\Users\lorenzo.cabrini\Google Drive\Programação\Codes\Projeto Sky\Tela_Principal')
from PyQt5.QtWidgets import QDialog, QWidget, QMainWindow, QApplication, QLayout, QBoxLayout, QDesktopWidget
from PyQt5.Qt import qApp
from PyQt5 import QtWidgets
from PyQt5.QtCore import QTimer, QEvent, QRect, QPoint, QPropertyAnimation, QSize, QEasingCurve, Qt
from Arquivo_Ui.ui_tela_principaL import Ui_TelaPrincipal
from Tela_Buscar_Cliente.tela_buscar_cliente import TelaBuscarCliente
from Tela_Cadastro_Cliente.tela_cadastro_cliente import TelaCadastroCliente
from Tela_Principal.Arquivo_Ui.ui_tela_principaL import Ui_TelaPrincipal
from Tela_Realizar_Venda.tela_realizar_venda import TelaRealizarVenda
from PyQt5.examples.widgets.calculator import Calculator
import qtawesome as qta
from Inicializador.File_Starter_Alchemy import DadosClientes, Paths, InicializarDB
from sqlalchemy import create_engine, and_, or_
from sqlalchemy.orm import relationship, sessionmaker
from MessageBox.message_box_alerta import MessageBoxAtencao
from MessageBox.message_box_sucesso import MessageBoxSucesso
# Base de dados
engine = create_engine(Paths().base_dados)
session = sessionmaker()
session.configure(bind=engine)
s = session()
class TelaPrincipal(QMainWindow, QDialog, Ui_TelaPrincipal):
def __init__(self, parent=None):
super(TelaPrincipal, self).__init__(parent)
QDialog.__init__(self, parent)
QMainWindow.__init__(self, parent)
Ui_TelaPrincipal.__init__(self)
self.setupUi(self)
self.setWindowFlags(Qt.FramelessWindowHint)
inicializador = InicializarDB()
# -- Animacoes e Eventos -- #
# Atencao: o plano central esta com opacidade 0
# ---- barra lateral dinamica ---- #
self.largura_barra = 80
self.largura_barra_botao = 30
# instala um eventfilter na tela principal, que ira habilitar o eventos
qApp.installEventFilter(self)
self.plano_central.setMouseTracking(1)
self.plano_botao_barra.setMouseTracking(1)
self.setMouseTracking(1)
self.plano_principal.setMouseTracking(1)
self.widget_2.setMouseTracking(1)
# o movimento da barra lateral envolve a barra com botao, barra lateral e o plano centro, os 3 devem estar em sintonia
self.animacao_plano_barra = QPropertyAnimation(self.plano_barra, "geometry".encode())
self.animacao_plano_botao_barra = QPropertyAnimation(self.plano_botao_barra, "geometry".encode())
self.animacao_plano_central = QPropertyAnimation(self.plano_central, "geometry".encode())
#self.esconder_barra_lateral(1)
# precisamos dessa variavel para saber qual velocidade usar na animacao
self.abertura = True
self.setar_icones()
# Aplicação de hover nos botoes
self.widget_2.setStyleSheet("QPushButton:hover:!pressed { border: 3px solid white;}")
# chamada das funcoes de botoes
self.pushButton_9.clicked.connect(self.tela_cadastro_cliente)
self.pushButton_14.clicked.connect(self.tela_buscar_cliente)
self.pushButton_18.clicked.connect(self.close)
self.pushButton_12.clicked.connect(self.showMinimized)
self.pushButton_13.clicked.connect(self.maximizar)
self.pushButton_17.clicked.connect(self.tela_realizar_venda)
# Maximiza a tela ao iniciar
self.showMaximized()
def maximizar(self):
if self.isMaximized():
self.showNormal()
else:
self.showMaximized()
def setar_icones(self):
# Setagem dos icones centrai
# Balanço
fa_icon = qta.icon('fa.balance-scale',
color='white',
options=[{'scale_factor': 0.7}]
)
self.pushButton_7.setIcon(fa_icon)
self.pushButton_7.setIconSize(QSize(70, 70))
# Estoque
fa_icon = qta.icon('fa.stack-overflow',
color='white',
options=[{'scale_factor': 0.7}]
)
self.pushButton_10.setIcon(fa_icon)
self.pushButton_10.setIconSize(QSize(70, 70))
# Realizar Venda
fa_icon = qta.icon('fa.credit-card-alt',
color='white',
options=[{'scale_factor': 0.7}]
)
self.pushButton_17.setIcon(fa_icon)
self.pushButton_17.setIconSize(QSize(70, 70))
# Buscar cliente
fa_icon = qta.icon('fa.users',
color='white',
options=[{'scale_factor': 0.7}]
)
self.pushButton_14.setIcon(fa_icon)
self.pushButton_14.setIconSize(QSize(70, 70))
# Ordens de Compra
fa_icon = qta.icon('fa.shopping-cart',
color='white',
options=[{'scale_factor': 0.7}]
)
self.pushButton_11.setIcon(fa_icon)
self.pushButton_11.setIconSize(QSize(70, 70))
# Contas a Pagar
fa_icon = qta.icon('fa.list-alt',
color='white',
options=[{'scale_factor': 0.7}]
)
self.pushButton_16.setIcon(fa_icon)
self.pushButton_16.setIconSize(QSize(70, 70))
# Cadastrar Cliente
fa_icon = qta.icon('fa.pencil-square-o',
color='white',
options=[{'scale_factor': 0.7}]
)
self.pushButton_9.setIcon(fa_icon)
self.pushButton_9.setIconSize(QSize(70, 70))
# Estatisticas
fa_icon = qta.icon('fa.line-chart',
color='white',
options=[{'scale_factor': 0.7}]
)
self.pushButton_8.setIcon(fa_icon)
self.pushButton_8.setIconSize(QSize(70, 70))
# Contas a Receber
fa_icon = qta.icon('fa.money',
color='white',
options=[{'scale_factor': 0.7}]
)
self.pushButton_15.setIcon(fa_icon)
self.pushButton_15.setIconSize(QSize(70, 70))
# Encomendas
fa_icon = qta.icon('fa.gift',
color='white',
options=[{'scale_factor': 0.7}]
)
self.pushButton_20.setIcon(fa_icon)
self.pushButton_20.setIconSize(QSize(70, 70))
# Cheques
fa_icon = qta.icon('fa.bars',
color='white',
options=[{'scale_factor': 0.7}]
)
self.pushButton_6.setIcon(fa_icon)
self.pushButton_6.setIconSize(QSize(70, 70))
# Consulta Serasa
fa_icon = qta.icon('fa.search',
color='white',
options=[{'scale_factor': 0.7}]
)
self.pushButton_21.setIcon(fa_icon)
self.pushButton_21.setIconSize(QSize(70, 70))
# Icones botoes janela
# Sair
fa_icon = qta.icon('fa.times',
color='white',
options=[{'scale_factor': 0.5}]
)
self.pushButton_18.setIcon(fa_icon)
self.pushButton_18.setIconSize(QSize(50, 50))
# Minimizar
fa_icon = qta.icon('fa.minus',
color='white',
options=[{'scale_factor': 0.5}]
)
self.pushButton_12.setIcon(fa_icon)
self.pushButton_12.setIconSize(QSize(50, 50))
# Diminur
fa_icon = qta.icon('fa.square-o',
color='white',
options=[{'scale_factor': 0.5}]
)
self.pushButton_13.setIcon(fa_icon)
self.pushButton_13.setIconSize(QSize(50, 50))
def esconder_barra_lateral(self, velocidade):
# --- esconde a barra -- #
self.animacao_plano_barra.setDuration(velocidade)
# leitura da posicao e dimensao inicial da barra
posicao_inicial = QRect(self.plano_barra.geometry())
# queremos esconder a barra, para isso setamos o seu inicio no final da largura da janela princial, pois ela eh construida da esquerd para direita
posicao_final = QRect(self.width(),0, self.largura_barra, self.height())
# define as animacoes
self.animacao_plano_barra.setStartValue(posicao_inicial)
self.animacao_plano_barra.setEndValue(posicao_final)
# -- leva o botao ate a margem, com a barra ja escondida ---
self.animacao_plano_botao_barra.setDuration(velocidade)
posicao_inicial_2 = QRect(self.plano_botao_barra.geometry())
posicao_final_2 = QRect(self.width()-self.largura_barra_botao,(self.height()/4),self.largura_barra_botao,300)
posicao_final_2 = QRect(self.width()-self.largura_barra_botao,(self.height()/4),self.largura_barra_botao,300)
self.animacao_plano_botao_barra.setStartValue(posicao_inicial_2)
self.animacao_plano_botao_barra.setEndValue(posicao_final_2)
# movimenta o plano central
# o plano central comeca da margem esquerda da janela principal e vai ate o final subtraido da largura do botao da barra, na hora que ela esta escondida
self.animacao_plano_central.setDuration(velocidade)
posicao_inicial_3 = QRect(self.plano_central.geometry())
posicao_final_3 = QRect(0,0,self.width()-self.largura_barra_botao,self.height())
self.animacao_plano_central.setStartValue(posicao_inicial_3)
self.animacao_plano_central.setEndValue(posicao_final_3)
# faz aa animacoes
self.animacao_plano_barra.setEasingCurve(QEasingCurve.Linear)
self.animacao_plano_botao_barra.start()
self.animacao_plano_barra.start()
self.animacao_plano_central.start()
def mostrar_barra_lateral(self):
# mostra a barra lateral
self.animacao_plano_barra.setDuration(800)
posicao_inicial = QRect(self.plano_barra.geometry())
posicao_final = QRect(self.width()-self.largura_barra,0,self.largura_barra, self.height())
self.animacao_plano_barra.setStartValue(posicao_inicial)
self.animacao_plano_barra.setEndValue(posicao_final)
## o botao fica escondido embaixo da barra, talvez vamos ter colocar uma opacidade nele para fica invisivel
# falta escondelo
# recuamos o plano central
posicao_inicial_3 = QRect(self.plano_central.geometry())
posicao_final_3 = QRect(0,0,self.width()-self.largura_barra,self.height())
self.animacao_plano_central.setStartValue(posicao_inicial_3)
self.animacao_plano_central.setEndValue(posicao_final_3)
self.animacao_plano_barra.setEasingCurve(QEasingCurve.Linear)
self.animacao_plano_barra.start()
self.animacao_plano_central.start()
def eventFilter(self, source, event):
if event.type() == QEvent.MouseMove:
if source == self.plano_botao_barra:
self.mostrar_barra_lateral()
if source == self.plano_central or source== self.widget_2:
if self.abertura == True:
self.esconder_barra_lateral(1)
self.abertura = False
else:
self.esconder_barra_lateral(200)
return QMainWindow.eventFilter(self, source, event)
def tela_cadastro_cliente(self):
tela = TelaCadastroCliente()
tela.exec_()
def tela_buscar_cliente(self):
tela = TelaBuscarCliente('TelaPrincipal')
tela.exec_()
def tela_realizar_venda(self):
tela = TelaRealizarVenda()
tela.exec_()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
main = TelaPrincipal()
sys.exit(app.exec())
|
UTF-8
|
Python
| false
| false
| 12,095
|
py
| 25
|
tela_principal.py
| 16
| 0.599256
| 0.581969
| 0
| 294
| 40.112245
| 160
|
soniloi/impostor
| 15,264,313,811,886
|
b142523ca79a6b324b0600e096d60efbc64b65e8
|
49684435913a235cf10543823ca4cf985799dcf6
|
/utilities/test/test_build_sources.py
|
53fa9835df9ab6828144aa5f51980d62c4118267
|
[] |
no_license
|
https://github.com/soniloi/impostor
|
10918dc1d02878751460892e9d8c275c74dd7fa1
|
b87e519823edd58955bf3fd1e5cfa18a19a0d86f
|
refs/heads/master
| 2021-01-17T09:16:20.496697
| 2017-09-09T15:52:49
| 2017-09-09T15:52:49
| 40,811,250
| 7
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from utilities.build_sources import SourceBuilder
class TestSourceBuilder(unittest.TestCase):
def setUp(self):
self.output_dir_base = "output/"
self.builder = SourceBuilder(self.output_dir_base, False)
existing_filenames = ["mollusc.src", "lemon.src", "quercus.src", "nobody.txt"]
merge_content = [
"mollusc\tmollusc_\tsnail",
"quercus\toak",
]
self.builder.configure_nicks(existing_filenames, merge_content)
def test_init_empty(self):
builder = SourceBuilder("output/", False)
self.assertFalse(builder.only_existing)
self.assertEquals(len(builder.existing_nicks), 0)
self.assertEquals(len(builder.aliases), 0)
def test_init_configured(self):
self.assertFalse(self.builder.only_existing)
self.assertEquals(len(self.builder.existing_nicks), 3)
self.assertEquals(len(self.builder.aliases), 5)
self.assertEquals(self.builder.aliases["mollusc"], "mollusc")
self.assertEquals(self.builder.aliases["mollusc_"], "mollusc")
self.assertEquals(self.builder.aliases["snail"], "mollusc")
self.assertEquals(self.builder.aliases["quercus"], "quercus")
self.assertEquals(self.builder.aliases["oak"], "quercus")
def test_process_line_empty(self):
line = "12.34.56 [lemon]"
output = self.builder.process_line(line)
self.assertFalse(output)
def test_process_line_only_whitespace(self):
line = "12.34.56 [lemon] "
output = self.builder.process_line(line)
self.assertFalse(output)
def test_process_line_short(self):
line = "12.34.56 [lemon] hello"
output = self.builder.process_line(line)
self.assertFalse(output)
def test_process_line_no_timestamp(self):
line = "[lemon] hello there"
output = self.builder.process_line(line)
self.assertFalse(output)
def test_process_line_lookback_length(self):
line = "12.34.56 [lemon] hello there"
(output_filepath, output_line) = self.builder.process_line(line)
self.assertEquals(output_filepath, self.output_dir_base + "lemon.src")
self.assertEquals(output_line, "hello there")
def test_process_line_long(self):
line = "12.34.56 [lemon] hello there, from inside my shell"
(output_filepath, output_line) = self.builder.process_line(line)
self.assertEquals(output_line, "hello there, from inside my shell")
def test_process_line_normalizing_content_whitespace(self):
line = "12.34.56 [lemon] hello\t there "
(output_filepath, output_line) = self.builder.process_line(line)
self.assertEquals(output_line, "hello there")
def test_process_line_normalizing_content_case(self):
line = "12.34.56 [lemon] Hello ThErE"
(output_filepath, output_line) = self.builder.process_line(line)
self.assertEquals(output_line, "hello there")
def test_process_line_normalizing_content_smiley(self):
line = "12.34.56 [lemon] hello there :D"
(output_filepath, output_line) = self.builder.process_line(line)
self.assertEquals(output_line, "hello there :D")
def test_process_line_alias(self):
line = "12.34.56 [mollusc_] hello there"
(output_filepath, output_line) = self.builder.process_line(line)
self.assertEquals(output_filepath, self.output_dir_base + "mollusc.src")
self.assertEquals(output_line, "hello there")
def test_process_line_only_existing_known(self):
builder = SourceBuilder(self.output_dir_base, True)
existing_filenames = ["lemon.src"]
builder.configure_nicks(existing_filenames, [])
line = "12.34.56 [lemon] hello there"
(output_filepath, output_line) = builder.process_line(line)
self.assertEquals(output_filepath, self.output_dir_base + "lemon.src")
self.assertEquals(output_line, "hello there")
def test_process_line_only_existing_unknown(self):
builder = SourceBuilder(self.output_dir_base, True)
existing_filenames = ["lemon.src"]
builder.configure_nicks(existing_filenames, [])
line = "12.34.56 [mollusc] hello there"
output = builder.process_line(line)
self.assertFalse(output)
def test_process_line_normalize_nick_whitespace(self):
line = "12.34.56 [ lemon ] hello there"
(output_filepath, output_line) = self.builder.process_line(line)
self.assertEquals(output_filepath, self.output_dir_base + "lemon.src")
self.assertEquals(output_line, "hello there")
def test_process_line_normalize_nick_case(self):
line = "12.34.56 [LemOn] hello there"
(output_filepath, output_line) = self.builder.process_line(line)
self.assertEquals(output_filepath, self.output_dir_base + "lemon.src")
self.assertEquals(output_line, "hello there")
def test_process_line_normalize_nick_punctuation(self):
line = "12.34.56 [@lemon] hello there"
(output_filepath, output_line) = self.builder.process_line(line)
self.assertEquals(output_filepath, self.output_dir_base + "lemon.src")
self.assertEquals(output_line, "hello there")
if __name__ == "__main__":
unittest.main()
|
UTF-8
|
Python
| false
| false
| 5,047
|
py
| 18
|
test_build_sources.py
| 17
| 0.694076
| 0.676441
| 0
| 181
| 26.878453
| 82
|
yandongliu/language-benchmarking
| 188,978,604,356
|
74187fc59515cee0bef326b5c5f20278dd8aa7c0
|
92a7da0c557331f77bd09b9d0b9f159da35fbce0
|
/web_server_fetch/coroutine.py
|
341d6ed7209a407e82c94c3adfd28d564e59e76a
|
[] |
no_license
|
https://github.com/yandongliu/language-benchmarking
|
bd056ce2fe6d11d43a540161d2795188e54fd309
|
49a3dfa6d3da5f3597d5f4b351bf73115e907681
|
refs/heads/master
| 2021-01-01T05:44:10.650500
| 2016-04-25T00:10:21
| 2016-04-25T00:10:21
| 56,989,038
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import requests
from tornado import gen
import tornado.ioloop
import tornado.web
LISTEN_PORT = 8080
def urls_to_check():
return [
'http://google.com',
'http://wikipedia.org',
'http://amazon.com',
]
@gen.coroutine
def check_status(url):
try:
raise gen.Return(requests.get(url).status_code)
except gen.Return:
raise
except Exception as e:
raise gen.Return(('error', str(e)))
class StatusCheckHandler(tornado.web.RequestHandler):
@gen.coroutine
def get(self):
statuses = {}
for url in urls_to_check():
statuses[url] = yield check_status(url)
self.write(statuses)
def get_app():
return tornado.web.Application([
(r'/', StatusCheckHandler),
], debug=True, autoreload=True)
if __name__ == '__main__':
app = get_app()
app.listen(LISTEN_PORT)
print('Listening on port {}'.format(LISTEN_PORT))
tornado.ioloop.IOLoop.current().start()
|
UTF-8
|
Python
| false
| false
| 981
|
py
| 10
|
coroutine.py
| 6
| 0.61366
| 0.609582
| 0
| 47
| 19.87234
| 55
|
Koellewe/distribution-solver
| 14,955,076,145,830
|
8bbd7e1b803085ca6fa7cc66ce4b2531d29fe019
|
6e2b838915e9f0ab66a10d8bacaa74f258a1354f
|
/main.py
|
0ed76167d8532211da08b4a16cda29ef6d2054ad
|
[] |
no_license
|
https://github.com/Koellewe/distribution-solver
|
a39226285c800768c150f0387e9aceea7bf3a330
|
3a9df8a3bb6369dd6559c2e87884b2d07cfb70dc
|
refs/heads/master
| 2020-06-24T05:59:16.643782
| 2019-07-26T12:30:58
| 2019-07-26T12:30:58
| 198,870,415
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import meta_random
import meta_random_indef
import meta_shuffle_indef
import yaml
from mpi4py import MPI
cfg = yaml.safe_load(open('config.yml'))
# static vars
comm = MPI.COMM_WORLD
current_rank = comm.rank
total_ranks = comm.size
if __name__ == '__main__':
if cfg['SHUFFLE_MODE']:
meta_shuffle_indef.main()
elif cfg['INDEFINITE']:
meta_random_indef.main()
else:
t = meta_random.main()
if current_rank == 0:
print('runtime:', round(t, 2), 's, with', total_ranks, 'processes')
|
UTF-8
|
Python
| false
| false
| 537
|
py
| 10
|
main.py
| 8
| 0.627561
| 0.621974
| 0
| 23
| 22.304348
| 79
|
pulinau/sumo_rl_driving
| 8,091,718,391,172
|
c635c903d49c8adc940c634ae13433cb7c1da952
|
23ec39bfedc0e6c33d527d8270b0be9972be32ad
|
/test.py
|
8a3cec1ab28ff3e9d353cb668b0b4fbfe84904db
|
[] |
no_license
|
https://github.com/pulinau/sumo_rl_driving
|
75080544ca7c869ad8c5721943e13cb0c187b0a1
|
afc91b82ab6c860841e4be8ab587505e171f4b2e
|
refs/heads/main
| 2023-02-10T01:33:16.668580
| 2018-11-08T05:42:17
| 2018-11-08T05:42:28
| 310,592,918
| 9
| 2
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from include import *
from sumo_cfgs import *
from sumo_gym import *
from observation import *
from action import *
SUMO_BIN = "/home/ken/project/sumo-bin/bin/sumo-gui"
NET_XML_FILE = "/home/ken/project/sumo-rl/sumo_openai_gym/roundabout/roundabout.net.xml"
ROU_XML_FILE_LIST = ["/home/ken/project/sumo-rl/sumo_openai_gym/roundabout/test" + str(i) + ".rou.xml" for i in range(1)]
SUMO_CMD = [SUMO_BIN,
#"-c", "/home/ken/project/sumo-rl/sumo_openai_gym/traffic/test.sumocfg",
"--no-warnings", "true",
"--time-to-teleport", "-1",
"--collision.action", "warn",
"--collision.check-junctions", "true",
"--xml-validation", "never",
"--step-length", str(SUMO_TIME_STEP),
"-n", NET_XML_FILE,
"-r"]
sumo_cfg = SumoCfg(
# sumo
SUMO_CMD,
SUMO_TIME_STEP,
NET_XML_FILE,
ROU_XML_FILE_LIST,
EGO_VEH_ID,
MAX_VEH_ACCEL,
MAX_VEH_DECEL,
MAX_VEH_SPEED,
# observation
NUM_LANE_CONSIDERED,
NUM_VEH_CONSIDERED,
MAX_TTC_CONSIDERED,
OBSERVATION_RADIUS,
# reward
MAX_COMFORT_ACCEL_LEVEL,
MAX_COMFORT_DECEL_LEVEL,
DEFAULT_COLOR,
YIELD_COLOR)
sumo_cfg.SUMO_CMD = SUMO_CMD
env = MultiObjSumoEnv(sumo_cfg)
obs = env.reset(0)
env.agt_ctrl = False
for _ in range(600):
obs, reward_list, env_state, action_dict = \
env.step({"lane_change": ActionLaneChange.NOOP, "accel_level": ActionAccel.NOOP})
if env_state != EnvState.NORMAL:
env.reset(0)
print(obs)
|
UTF-8
|
Python
| false
| false
| 1,735
|
py
| 36
|
test.py
| 22
| 0.542939
| 0.538905
| 0
| 56
| 30
| 121
|
RodrigoOSiqueira/echo
| 10,651,518,908,814
|
6234dfb3a620eb0c7c2ce3bb3542d76fa2a8bf46
|
edc1a0aad809f53e38e366e42f36d3e088de5ac8
|
/accounts/views.py
|
149707480fe9c4753aec563b3d94e46c821bd657
|
[] |
no_license
|
https://github.com/RodrigoOSiqueira/echo
|
e5a9e0bb2653baba213c13053c2703f65c891669
|
9f07a9155cddb72b4584737d6752dcee82379845
|
refs/heads/master
| 2020-04-09T08:51:31.014829
| 2018-12-06T18:18:52
| 2018-12-06T18:18:52
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render,redirect,get_object_or_404
from django.contrib.auth import login,logout,authenticate, get_user_model
from django.contrib.auth.forms import UserCreationForm,SetPasswordForm
from django.http import HttpResponse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import *
from .models import PasswordReset
from .utils import generate_hash_key
# Create your views here.
User = get_user_model()
def gate(request):
template_name ='gate.html'
form = Login()
context = {'form':form}
return render (request,template_name,context)
def log(request):
context={}
if request.method=='POST':
form = Login(request.POST)
if form.is_valid():
context['is_valid'] =True
username =form.cleaned_data['userNameLogin']
password =form.cleaned_data['passwordLogin']
user =authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return redirect('index:index')
else:
print(user)
messages.warning(request,'Usuário ou Senha Inválidos')
return redirect('accounts:gate')
else:
pass
def regView(request):
template_name ="register.html"
context={}
form1 =RegisterForm()
form2 =ProfileForm()
context['formReg'] = form1
context['formProf'] = form2
return render(request,template_name,context)
def regUser(request):
if request.method == 'POST':
form = RegisterForm(request.POST)
form2 = ProfileForm(request.POST)
if form.is_valid():
user = form.save()
user=authenticate(
username=user.username,
password=form.cleaned_data['password1']
)
login(request,user)
if form2.is_valid():
request.user.profile.name = form2.cleaned_data['name']
request.user.profile.phone= form2.cleaned_data['phone']
request.user.profile.save()
return redirect('index:index')
else:
print(form.errors )
messages.warning(request, form.errors)
return redirect('accounts:regView')
else:
pass
def password_reset(request):
template_name ='passwordReset.html'
context ={}
form =PasswordResetForm(request.POST or None)
#or None ve se request.POST é vazio e trata o
# a chamada como form = PasswordResetForm()
# e com o form vazio ps form.is_valid() é falso
if request.method=='POST':
if form.is_valid():
form.save()
messages.success(request,'Verifique seu Email')
else:
messages.warning(request,form.errors)
context['form'] =form
return render(request,template_name,context)
def password_reset_confirm(request,key):
template_name='password_change_form.html'
context = {}
reset = get_object_or_404(PasswordReset,key=key)
form = SetPasswordForm(user=reset.user,data= request.POST or None)
print(form)
if request.method=='POST':
if form.is_valid():
form.save()
messages.success(request,'Senha alterada com sucesso')
return redirect('accounts:gate')
else:
messages.warning(request,form.errors)
context['form']=form
return render(request,template_name,context)
@login_required
def leave(request):
logout(request)
return redirect('index:index')
|
UTF-8
|
Python
| false
| false
| 3,709
|
py
| 22
|
views.py
| 13
| 0.608637
| 0.604588
| 0
| 110
| 32.690909
| 90
|
lee-shun/learn_py
| 3,092,376,501,302
|
38d49295a9117136310da2058195fe870407b9cd
|
9062776019f11cd2a251c8c94ed9a7a71b0c1ddb
|
/section2/use_property.py
|
a65cfc458bd32be048cf021ab6c5c3c7179f4690
|
[] |
no_license
|
https://github.com/lee-shun/learn_py
|
434505029dd578934c2f00354a76ed7af827ec5c
|
7380d9c3cf1d98dd71bb6d85a6c453d387abfe4a
|
refs/heads/master
| 2022-11-21T04:37:11.428433
| 2020-07-27T09:32:02
| 2020-07-27T09:32:02
| 282,578,093
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
class Student(object):
@property
def score(self):
return self._score
@score.setter
def score(self, value):
if not isinstance(value, int):
raise ValueError('score must be int!')
if value < 0 or value > 100:
raise ValueError("score must be in 1 to 100")
self._score = value
print("set score to:", self._score)
"""
注意:这里的s1.score并不是单纯的属性绑定,而是使用@proptery给函数做出来的
score是方法,虽然很像动态的属性!!!!
"""
s1 = Student()
s1.score = 60
print(s1.score)
s1.score = 101
|
UTF-8
|
Python
| false
| false
| 620
|
py
| 24
|
use_property.py
| 23
| 0.60566
| 0.571698
| 0
| 25
| 20.2
| 57
|
karthikwebdev/oops-infytq-prep
| 4,526,895,542,537
|
5e56b19f6fc482877f4bb3801eab67c2d59ddec3
|
4b18ffeca5d362e43cf3a00bbfa6180a8b96c409
|
/hackerearth/nosubpal.py
|
b13e487e4bc3395b0d2f3e4e5de543b858031e8f
|
[] |
no_license
|
https://github.com/karthikwebdev/oops-infytq-prep
|
075c9e1553a7a0d71907602d3da6bfadae5285e3
|
4f855184875d52efc2bfab18686b0fde74e0cfd6
|
refs/heads/master
| 2020-12-22T08:22:47.501134
| 2020-02-18T15:28:45
| 2020-02-18T15:28:45
| 236,723,927
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
word = input()
if(word == word[::-1]):
while(word):
word = word[:len(word)-1]
if(word == word[::-1]):
pass
else:
break
print(len(word))
else:
print(len(word))
|
UTF-8
|
Python
| false
| false
| 219
|
py
| 63
|
nosubpal.py
| 57
| 0.442922
| 0.429224
| 0
| 11
| 18.909091
| 33
|
ciromoraismedeiros/image-gallery
| 4,226,247,869,258
|
dd1246fe8cfb1448ab916a4cf3e9442d6e862fa3
|
9fa4f73ba1d04b5a92c4ccd06138b11187459cdd
|
/galleryproj/gallery/urls.py
|
90f8e25e7e39ae2e546e1fe71f3769de83df6a3a
|
[] |
no_license
|
https://github.com/ciromoraismedeiros/image-gallery
|
40aef6585ea8fe820287c8015a1bc54cab39548a
|
e3c392b11e37a923fd19af24db07d53e74ffd488
|
refs/heads/master
| 2021-06-01T13:54:16.624866
| 2020-01-23T09:47:29
| 2020-01-23T09:47:29
| 153,814,907
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('sign-s3/', views.sign_s3, name='sign-s3'),
path('submit-form/', views.submit_form, name='submit'),
path('upload/', views.upload, name='upload'),
path('like/', views.like, name='like'),
path('dislike/', views.dislike, name='dislike'),
path('approve/', views.approve, name='approve'),
path('see-photos/', views.see_photos, name='see-photos'),
]
|
UTF-8
|
Python
| false
| false
| 484
|
py
| 19
|
urls.py
| 12
| 0.630165
| 0.623967
| 0
| 14
| 33.5
| 61
|
vitaly-krugl/pika-perf
| 10,900,627,010,137
|
af1a8c03c1040bbee33f42db8dfef6a3c2ecdf5f
|
9f108fb178e38e53a9b9a434c18893918cf49a2c
|
/haigha_perf.py
|
3f6c9600880fd0235f7ee2e05d81f2052f28a6e6
|
[] |
no_license
|
https://github.com/vitaly-krugl/pika-perf
|
8aef9c56183de676a520cc9992b04845e0eabe72
|
18fcb2a1d29b2d4049c0b2399812568b718d86a9
|
refs/heads/master
| 2016-09-06T06:00:01.750983
| 2015-05-23T06:01:49
| 2015-05-23T06:01:49
| 33,105,420
| 0
| 0
| null | false
| 2015-03-30T16:56:30
| 2015-03-30T05:43:17
| 2015-03-30T11:06:19
| 2015-03-30T16:56:29
| 0
| 0
| 0
| 0
|
Python
| null | null |
"""Performance tests for haigha
"""
import collections
import logging
from optparse import OptionParser
import socket
import sys
from haigha.connections.rabbit_connection import RabbitConnection
from haigha.message import Message
from haigha.transports import socket_transport
g_log = logging.getLogger("haigha_perf")
#logging.root.setLevel(logging.DEBUG)
ROUTING_KEY = "test"
def main():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)-15s %(name)s(%(process)s) - %(levelname)s - %(message)s',
disable_existing_loggers=False)
topHelpString = (
"\n"
"\t%prog COMMAND OPTIONS\n"
"\t%prog --help\n"
"\t%prog COMMAND --help\n"
"\n"
"Supported COMMANDs:\n"
"\tpublish - publish messages.\n"
"\taltpubcons - Alternate publishing/consuming one message at a time."
)
topParser = OptionParser(topHelpString)
if len(sys.argv) < 2:
topParser.error("Missing COMMAND")
command = sys.argv[1]
if command == "publish":
_handlePublishTest(sys.argv[2:])
elif command == "altpubcons":
_handleAlternatingPubConsumeTest(sys.argv[2:])
elif not command.startswith("-"):
topParser.error("Unexpected action: %s" % (command,))
else:
try:
topParser.parse_args()
except:
raise
else:
topParser.error("Unknown command=%s" % command)
def _handlePublishTest(args):
""" Parse args and invoke the publish test using the requested connection
class
:param args: sequence of commandline args passed after the "publish" keyword
"""
helpString = (
"\n"
"\t%%prog publish OPTIONS\n"
"\t%%prog publish --help\n"
"\t%%prog --help\n"
"\n"
"Publishes the given number of messages of the\n"
"given size to the given exchange and routing_key=%s using the specified\n"
"haigha interface") % (ROUTING_KEY,)
parser = OptionParser(helpString)
implChoices = [
"SocketTransport", # Blocking socket transport
]
parser.add_option(
"--impl",
action="store",
type="choice",
dest="impl",
choices=implChoices,
help=("Selection of haigha transport "
"[REQUIRED; must be one of: %s]" % ", ".join(implChoices)))
parser.add_option(
"--exg",
action="store",
type="string",
dest="exchange",
help="Destination exchange [REQUIRED]")
parser.add_option(
"--msgs",
action="store",
type="int",
dest="numMessages",
default=1000,
help="Number of messages to send [default: %default]")
parser.add_option(
"--size",
action="store",
type="int",
dest="messageSize",
default=1024,
help="Size of each message in bytes [default: %default]")
parser.add_option(
"--pubacks",
action="store_true",
dest="deliveryConfirmation",
default=False,
help="Publish in delivery confirmation mode [defaults to OFF]")
options, positionalArgs = parser.parse_args(sys.argv[2:])
if positionalArgs:
raise parser.error("Unexpected to have any positional args, but got: %r"
% positionalArgs)
if not options.impl:
parser.error("--impl is required")
if options.exchange is None:
parser.error("--exg must be specified with a valid destination exchange name")
if options.impl == "SocketTransport":
runBlockingSocketPublishTest(
implClassName=options.impl,
exchange=options.exchange,
numMessages=options.numMessages,
messageSize=options.messageSize,
deliveryConfirmation=options.deliveryConfirmation)
else:
parser.error("unexpected impl=%r" % (options.impl,))
def runBlockingSocketPublishTest(implClassName,
exchange,
numMessages,
messageSize,
deliveryConfirmation):
g_log.info(
"runBlockingSocketPublishTest: impl=%s; exchange=%s; numMessages=%d; "
"messageSize=%s; deliveryConfirmation=%s", implClassName, exchange,
numMessages, messageSize, deliveryConfirmation)
implClass = getattr(socket_transport, implClassName)
assert implClass is socket_transport.SocketTransport, implClass
payload = "a" * messageSize
class State(object):
closing = False
publishConfirm = False
channelClosed = False
connectionClosed = False
connection = None
def onConnectionClosed():
State.connectionClosed = True
g_log.info("%s: connection closed; close_info=%s", implClassName,
State.connection.close_info if State.connection else None)
assert State.closing, "unexpected connection-close"
conn = RabbitConnection(
transport="socket",
sock_opts={(socket.IPPROTO_TCP, socket.TCP_NODELAY) : 1},
close_cb=onConnectionClosed,
**getConnectionParameters())
g_log.info("%s: opened connection", implClassName)
def onChannelClosed(ch):
State.channelClosed = True
g_log.info("%s: channel closed; close_info=%s",
implClassName, ch.close_info)
assert State.closing, "unexpected channel-close"
channel = conn.channel()
channel.add_close_listener(onChannelClosed)
g_log.info("%s: opened channel", implClassName)
if deliveryConfirmation:
channel.confirm.select()
def ack(mid):
State.publishConfirm = True
def nack(mid):
g_log.error("Got Nack from broker")
raise RuntimeError("Got Nack from broker")
channel.basic.set_ack_listener( ack )
channel.basic.set_nack_listener( nack )
g_log.info("%s: enabled message delivery confirmation", implClassName)
# Publish
for i in xrange(numMessages):
assert not State.publishConfirm
message = Message(payload)
channel.basic.publish(message, exchange=exchange, routing_key=ROUTING_KEY,
immediate=False, mandatory=False)
if deliveryConfirmation:
while not State.publishConfirm:
conn.read_frames()
else:
State.publishConfirm = False
else:
g_log.info("Published %d messages of size=%d via=%s",
i+1, messageSize, implClass)
State.closing = True
g_log.info("%s: closing channel", implClassName)
channel.close()
while not State.channelClosed:
conn.read_frames()
g_log.info("%s: closing connection", implClassName)
conn.close()
while not State.connectionClosed:
conn.read_frames()
assert not State.publishConfirm
g_log.info("%s: DONE", implClassName)
def _handleAlternatingPubConsumeTest(args):
""" Parse args and invoke the alternating publish-consume test using the
requested connection class
:param args: sequence of commandline args passed after the "altpubcons"
keyword
"""
helpString = (
"\n"
"\t%prog publish OPTIONS\n"
"\t%prog publish --help\n"
"\t%prog --help\n"
"\n"
"Alternates publishing/consuming the given number of messages of the\n"
"given size one message at a time via default exchange using the\n"
"specified haigha interface")
parser = OptionParser(helpString)
implChoices = [
"SocketTransport", # Blocking socket transport
]
parser.add_option(
"--impl",
action="store",
type="choice",
dest="impl",
choices=implChoices,
help=("Selection of haigha transport "
"[REQUIRED; must be one of: %s]" % ", ".join(implChoices)))
parser.add_option(
"--msgs",
action="store",
type="int",
dest="numMessages",
default=1000,
help="Number of messages to send [default: %default]")
parser.add_option(
"--size",
action="store",
type="int",
dest="messageSize",
default=1024,
help="Size of each message in bytes [default: %default]")
parser.add_option(
"--conacks",
action="store_true",
dest="useConsumerAcks",
default=False,
help=("Configure consumer with noack=False and ack consumed messages "
"one-at-a-time [defaults to OFF]"))
parser.add_option(
"--pubacks",
action="store_true",
dest="deliveryConfirmation",
default=False,
help="Publish in delivery confirmation mode [defaults to OFF]")
options, positionalArgs = parser.parse_args(sys.argv[2:])
if positionalArgs:
raise parser.error("Unexpected to have any positional args, but got: %r"
% positionalArgs)
if not options.impl:
parser.error("--impl is required")
if options.impl == "SocketTransport":
runBlockingSocketAltPubConsumeTest(
implClassName=options.impl,
numMessages=options.numMessages,
messageSize=options.messageSize,
useConsumerAcks=options.useConsumerAcks,
deliveryConfirmation=options.deliveryConfirmation)
else:
parser.error("unexpected impl=%r" % (options.impl,))
def runBlockingSocketAltPubConsumeTest(implClassName,
numMessages,
messageSize,
useConsumerAcks,
deliveryConfirmation):
"""Alternates publishing/consuming the given number of messages of the
given size one message at a time via default exchange
"""
g_log.info(
"runBlockingSocketAltPubConsumeTest: impl=%s; numMessages=%d; "
"messageSize=%s; useConsumerAcks=%s, deliveryConfirmation=%s", implClassName,
numMessages, messageSize, useConsumerAcks, deliveryConfirmation)
implClass = getattr(socket_transport, implClassName)
assert implClass is socket_transport.SocketTransport, implClass
payload = "a" * messageSize
class State(object):
closing = False
publishAckMessageId = None
channelClosed = False
connectionClosed = False
connection = None
incomingMsgs = collections.deque()
def onConnectionClosed():
State.connectionClosed = True
g_log.info("%s: connection closed; close_info=%s", implClassName,
State.connection.close_info if State.connection else None)
assert State.closing, "unexpected onnection-close"
conn = RabbitConnection(
transport="socket",
sock_opts={(socket.IPPROTO_TCP, socket.TCP_NODELAY) : 1},
close_cb=onConnectionClosed,
**getConnectionParameters())
g_log.info("%s: opened connection", implClassName)
def onChannelClosed(ch):
State.channelClosed = True
g_log.info("%s: channel closed; close_info=%s",
implClassName, ch.close_info)
assert State.closing, "unexpected channel-close"
channel = conn.channel()
channel.add_close_listener(onChannelClosed)
g_log.info("%s: opened channel", implClassName)
if deliveryConfirmation:
channel.confirm.select()
def onAck(mid):
State.publishAckMessageId = mid
def onNack(mid):
msg = "Got Nack from broker: %r" % (mid,)
g_log.error(msg)
raise RuntimeError(msg)
channel.basic.set_ack_listener( onAck )
channel.basic.set_nack_listener( onNack )
g_log.info("%s: enabled message delivery confirmation", implClassName)
# Create transient queue
qname = channel.queue.declare(passive=False, durable=False, exclusive=False,
auto_delete=True, nowait=False)[0]
g_log.info("%s: Created queue=%s", implClassName, qname)
def publish():
assert State.publishAckMessageId is None
message = Message(payload)
msgId = channel.basic.publish(message, exchange="",
routing_key=qname,
immediate=False, mandatory=False)
if deliveryConfirmation:
while State.publishAckMessageId is None:
conn.read_frames()
else:
assert State.publishAckMessageId == msgId, (
State.publishAckMessageId, msgId)
State.publishAckMessageId = None
assert State.publishAckMessageId is None
return msgId
# Create consumer
def onIncomingMessage(msg):
State.incomingMsgs.append(msg)
channel.basic.consume(qname, consumer=onIncomingMessage,
no_ack=not useConsumerAcks, nowait=False)
g_log.info("%s: created consumer", implClassName)
# Publish/consume
for i in xrange(numMessages):
assert not State.incomingMsgs, State.incomingMsgs
msgId = publish()
# Wait for incoming
while not State.incomingMsgs:
conn.read_frames()
assert len(State.incomingMsgs) == 1, State.incomingMsgs
msg = State.incomingMsgs.pop()
assert len(msg.body) == len(payload)
if useConsumerAcks:
# print >> sys.stderr, "ZZZ delivery_info:", msg.delivery_info
channel.basic.ack(msg.delivery_info["delivery_tag"])
else:
g_log.info("Published %d messages of size=%d via=%s",
i+1, messageSize, implClass)
State.closing = True
g_log.info("%s: closing channel", implClassName)
channel.close()
while not State.channelClosed:
conn.read_frames()
g_log.info("%s: closing connection", implClassName)
conn.close()
while not State.connectionClosed:
conn.read_frames()
assert State.publishAckMessageId is None
assert not State.incomingMsgs
g_log.info("%s: DONE", implClassName)
def getConnectionParameters():
"""
:returns: dict with connection params
"""
return dict(
user='guest',
password='guest',
vhost='/',
host='localhost',
port=5672)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false
| false
| 13,373
|
py
| 5
|
haigha_perf.py
| 4
| 0.654453
| 0.651911
| 0
| 498
| 25.853414
| 82
|
SeaWar741/ITC
| 15,418,932,628,927
|
418fb94f4b967f85be066f3d2a80d1fcc0c7b31c
|
16f50a812eca90748e87bfe471e0c05f178337fd
|
/1er_Semestre/memoriaAdry.py
|
3d6fd91b873a30a7a521286c9e0722c02a80f742
|
[] |
no_license
|
https://github.com/SeaWar741/ITC
|
65f73365762366f56cfbd6d0bc788cd384672d12
|
5f75716be58ca6e00bcd8dae7546fd19fe37657f
|
refs/heads/master
| 2023-02-05T23:25:13.972031
| 2022-09-29T10:38:32
| 2022-09-29T10:38:32
| 205,020,772
| 4
| 2
| null | false
| 2023-01-19T15:28:45
| 2019-08-28T20:48:35
| 2022-06-12T03:35:28
| 2023-01-19T15:28:44
| 592,142
| 4
| 2
| 10
|
Jupyter Notebook
| false
| false
|
import random
values={} #Diccionario guardar los pares de cartas
def creatablero():
board = [[0]*6 for i in range(6)]#crea un array de 6x6 en un for
num =1 #variable para el elemento del array
for i in range(6):#for para aumentar el numero y colocar en el tablero [i][J] row
for j in range(6):#columna
board[i][j]= num
num+=1#se le suma uno
return board #regresa una matriz 6*6
def printtablero(board):
s = [[str(e) for e in row] for row in board] #cada elemento del row lo hace un string
lens = [max(map(len, col)) for col in zip(*s)] #cada elemento de la columna lo junta y lo hace un string en grupos de filas
fmt = '\t'.join('{{:{}}}'.format(x) for x in lens)#le da un fomato con igual espaciado (\t --> tab(4 espacios aprox))
table = [fmt.format(*row) for row in s]#lo vuelve una tabla
print ('\n'.join(table))#lo junta y lo imprime por linea
def setvalues(values):
keys = list(range(1, 37)) #lista de tamaño 36 con valores del 1 al 36
random.shuffle(keys)#mezcla de los numeros
values={keys[0]:'azul',keys[1]:'rojo',keys[2]:'amarillo',keys[3]:'rosa',keys[4]:'morado',keys[5]:'naranja',keys[6]:'blanco',keys[7]:'negro',keys[8]:'celeste',keys[9]:'magenta',keys[10]:'turquesa',keys[11]:'cafe',keys[12]:'beige',keys[13]:'verde',keys[14]:'lila',keys[15]:'plata',keys[16]:'dorado',keys[17]:'marino',keys[18]:'azul',keys[19]:'rojo',keys[20]:'amarillo',keys[21]:'rosa',keys[22]:'morado',keys[23]:'naranja',keys[24]:'blanco',keys[25]:'negro',keys[26]:'celeste',keys[27]:'magenta',keys[28]:'turquesa',keys[29]:'cafe',keys[30]:'beige',keys[31]:'verde',keys[32]:'lila',keys[33]:'plata',keys[34]:'dorado',keys[35]:'marino'}
#print(values)
return values #regresa los valores cartas volteadas (diccionario)
def game(values,board):
done_numbers = [] #se almacenan los pares que ya se sacaron
print()
print("----------= Bienvenido al Juego de Memoria =----------\n")
printtablero(board)
print()
user1n = input("Ingresar nombre del jugador 1: >")#guardar nombre1
user2n = input("Ingresar nombre del jugador 2: >")#guardar nombre2
print()
p1 = 0 #puntaje jugador 1
p2 = 0 #puntaje jugador 2
continueg = True #booleano para que continue el juego
i = 0 #contador
while continueg:
if (i % 2 == 0):#jugador 1
print(f"----------= Turno de: {user1n} | Puntos: {p1} =----------")#vaiable dentro de un output
position = int(input("Ingresar carta 1: "))
if position not in done_numbers:#posicion no esta en las cartas ya sacadas continua, si no pierde el turno
print(values.get(position))#imprime el valor del numero ej.1-->azul
position2 = int(input("Ingresar carta 2: "))
if position not in done_numbers:#checa si no esta en las cartas ya sacadas, si no se pierde el turno
print(values.get(position2))#imprime el valor del numero
if values.get(position) == values.get(position2) and position != position2:
print("Obtuviste puntos!")
done_numbers.append(position)#añade la carta a las cartas ya sacadas
done_numbers.append(position2)#añade la carta a las cartas ya sacadas
p1 +=1#se le suma 1 punto
else:
print("Intentalo de nuevo!") ##no obtienes puntos
else:
print("Pierdes tu turno por no poner atencion, esas cartas ya se obtuvieron!")
else:
print("Pierdes tu turno por no poner atencion, esa carta ya se obtuvo!")
else: #jugador 2
print(f"----------= Turno de: {user2n} | Puntos: {p2} =----------")
position = int(input("Ingresar carta 1: "))
if position not in done_numbers:
print(values.get(position))
position2 = int(input("Ingresar carta 2: "))
if position not in done_numbers:
print(values.get(position2))
if values.get(position) == values.get(position2) and position != position2:
print("Obtuviste puntos!")
done_numbers.append(position)
done_numbers.append(position2)
p2 +=1
else:
print("Intentalo de nuevo!")
else:
print("Pierdes tu turno por no poner atencion, esas cartas ya se obtuvieron!")
else:
print("Pierdes tu turno por no poner atencion, esa carta ya se obtuvo!")
if i >=1: #si el contador es mayor a 1, si se paso la primera ronda preguntar si quieres continuar
print("----------------------= ¿Continuar? =-----------------------")
continuegq = input("(S/N) > ").lower() ##variable para el input si desea continuar (lo hace minuscualas)
if continuegq == "n":#si es igual a n ya no va a continuar el juego
continueg = False
print("------------------------= Tablero =------------------------\n")
printtablero(board)#imprime el tablero
print()
i +=1
if len(done_numbers) == 36 or continueg == False: ##si se sacan 36 cartas o el jugador ya no quiere seguir
continueg = False #se vuelve falso
winner = "" #string vacio aqui se va a guardar quien gano
if p1 > p2: #si persona 1 mas puntos gana j1
winner = user1n
elif p2<p1:#si persona 2 mas puntos gana j2
winner = user2n
else:
winner = "Empate!" #si no es empate
print("-----------------------= ATENENCION =----------------------\n")
print("-------------------= TENEMOS UN GANADOR =------------------\n")
print(" '._==_==_=_.'")
print(' .-\: /-.')
print(' | (|:. |) |')
print(" '-|:. |-'")
print(" \::. /")
print(" '::. .'")
print(" ) (")
print(" _.' '._")
print(' `"""""""`')
print(f" {winner} ")
print(f"Puntaje: {user1n}, Puntos: {p1}")
print(f"Puntaje: {user2n}, Puntos {p2}")
board = creatablero() #crea un tablero
values = setvalues(values) #establece valores
game(values,board) #inicia el juego
|
UTF-8
|
Python
| false
| false
| 6,850
|
py
| 556
|
memoriaAdry.py
| 300
| 0.514315
| 0.493281
| 0
| 114
| 58.070175
| 636
|
Lpkepka/odis_project
| 5,265,629,923,920
|
2c61e442b7c742bc276ca4aa78a9709c0ba6f9a6
|
1725e09120b86bb8f094d10e482d94369f7d2404
|
/Tests/logParserTests.py
|
972b6fe317e43e1c65acd02bc55fabeda77b7ceb
|
[] |
no_license
|
https://github.com/Lpkepka/odis_project
|
4cecca1492843d9d00e020f2c9f3bc30511f82dd
|
12fcbf10c073eeab3d96ee07223efbf86aa0e980
|
refs/heads/main
| 2023-04-28T15:55:44.393631
| 2021-05-26T09:45:01
| 2021-05-26T09:45:01
| 360,270,272
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
import logParser
parser = logParser.LogParser()
class TestParser(unittest.TestCase):
ELBLog = '2015-05-13T23:39:43.945958Z my-loadbalancer 192.168.131.39:2817 10.0.0.1:80 0.000086 0.001048 0.001337 200 200 0 57 "GET https://www.example.com:443/ HTTP/1.1" "curl/7.38.0" DHE-RSA-AES128-SHA TLSv1.2'
IISLog = '192.168.114.201, -, 03/20/05, 7:55:20, W3SVC2, SERVER, 172.21.13.45, 4502, 163, 3223, 200, 0, GET, /DeptLogo.gif, -,'
HTTPLog = 'GET /tutorials/other/top-20-mysql-best-practices/ HTTP/1.1'
ApacheLog = '127.0.0.1 – frank [10/Oct/2000:13:55:36 -0700] “GET /apache_pb.gif HTTP/1.0” 200 2326'
NginxLog = '127.0.0.1 - dbmanager [20/Nov/2017:18:52:17 +0000] "GET / HTTP/1.1" 401 188 "-" "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0"'
NCAS_access_log = '172.21.13.45 - Microsoft\JohnDoe [07/Apr/2004:17:39:04 -0800] "GET /scripts/iisadmin/ism.dll?http/serv HTTP/1.0" 200 3401'
invalidLog = 'asdklsajdklajsd'
def test_apache_log(self):
values = parser.parseLogs(self.ApacheLog)
self.assertEqual(len(values), 6)
self.assertEqual(values[0], '127.0.0.1')
self.assertEqual(values[2], '/apache_pb.gif')
self.assertEqual(values[3], '200')
def test_HTTPLog(self):
values = parser.parseLogs(self.HTTPLog)
self.assertEqual(len(values), 6)
self.assertEqual(values[0], '')
self.assertEqual(values[2], '/tutorials/other/top-20-mysql-best-practices/')
self.assertEqual(values[3], '')
def test_IISLog(self):
values = parser.parseLogs(self.IISLog)
self.assertEqual(len(values), 6)
self.assertEqual(values[0], '192.168.114.201')
self.assertEqual(values[2], '/DeptLogo.gif')
self.assertEqual(values[3], '200')
def test_ELBLog(self):
values = parser.parseLogs(self.ELBLog)
self.assertEqual(len(values), 6)
self.assertEqual(values[0], '192.168.131.39')
self.assertEqual(values[2], 'https://www.example.com:443/')
self.assertEqual(values[3], '200')
def test_NginxLog(self):
values = parser.parseLogs(self.NginxLog)
self.assertEqual(len(values), 6)
self.assertEqual(values[0], '127.0.0.1')
self.assertEqual(values[2], '/')
self.assertEqual(values[3], '401')
def test_NCAS_access_Log(self):
values = parser.parseLogs(self.NCAS_access_log)
self.assertEqual(len(values), 6)
self.assertEqual(values[0], '172.21.13.45')
self.assertEqual(values[2], '/scripts/iisadmin/ism.dll?http/serv')
self.assertEqual(values[3], '200')
def test_invalid_logs(self):
values = parser.parseLogs(self.invalidLog)
self.assertIsNone(values)
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false
| false
| 2,817
|
py
| 14
|
logParserTests.py
| 7
| 0.641764
| 0.519388
| 0
| 62
| 44.33871
| 215
|
mitdbg/datascienceclass
| 13,941,463,854,954
|
3a8d7201547a4b87c534eae68f86a0da782fa31f
|
f97f6b8cded431c1e09e431f7f8c7aab2c0a2fe5
|
/spring_2022/lab_6/code/q5.py
|
606db0ecc1151c02e36591dac64eb19954954bca
|
[] |
no_license
|
https://github.com/mitdbg/datascienceclass
|
e725f04b75760ac03f81973903c83404ce8d90b6
|
f44185920d838a63582be6abb71ff99d323a21b7
|
refs/heads/master
| 2023-02-23T09:27:42.671532
| 2022-10-03T11:52:51
| 2022-10-03T11:52:51
| 203,637,890
| 5
| 38
| null | false
| 2023-02-08T02:36:50
| 2019-08-21T18:01:24
| 2023-01-01T05:10:34
| 2023-02-08T02:36:47
| 68,900
| 5
| 25
| 4
|
Jupyter Notebook
| false
| false
|
import pyspark
import os
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
from time import perf_counter
print("Initializing Spark session...")
spark = SparkSession.builder.config("spark.driver.memory", "32g").config(
"spark.executor.instances", 4).config("spark.executor.cores", 4).config(
"spark.driver.cores", "4").getOrCreate()
spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
print("Creating dataframe...")
# YOUR CODE GOES HERE
# 1. Read the appropriate JSON files into a single dataframe
MAX_PARTITIONS = 100000
partitions = 1
while (partitions <= MAX_PARTITIONS):
print(f"Now running for {partitions} partition(s)")
# YOUR CODE GOES HERE
# 2. Set the shuffle partitions appropriately
t_start = perf_counter()
# YOUR CODE GOES HERE
# 3. Calculate the top 3 providers. Make sure that pyspark, which uses a lazy
# approach like dask, actually performs the computation
t_end = perf_counter()
print(f"Duration for {partitions} partition(s): {(t_end-t_start)*1000} ms")
partitions *= 10
|
UTF-8
|
Python
| false
| false
| 1,067
|
py
| 129
|
q5.py
| 30
| 0.733833
| 0.713215
| 0
| 34
| 30.382353
| 80
|
LoganHentschel/csp_python
| 7,335,804,159,425
|
8cdbb654bb8c9411d7e7475718b0cd04d91bd65a
|
286fcdb146513f9ebc6d440fb6393c7925c0c491
|
/CSP_Sem1/TURTLE/1.1 Unit/Bug Fix Assignment/FIXED2_LEGS_a115_buggy_image.py
|
5ad85c398ce540aafbce70320d3cc80852a0d5df
|
[] |
no_license
|
https://github.com/LoganHentschel/csp_python
|
86dc0b305ef44e3e081b6b05e41d96567f5c9bf7
|
ed408e90a4fec9e4818a365c9e57e7a09ba697e0
|
refs/heads/master
| 2020-12-06T07:45:05.573690
| 2020-04-07T04:34:25
| 2020-04-07T04:34:25
| 232,395,261
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# a115_buggy_image.py
import turtle as trtl
##############
trtl_spider = trtl.Turtle()
#
''' CREATE SPIDER BODY '''
trtl_spider.pensize(40)
trtl_spider.circle(20)
# # #
''' CREATE SPIDER HEAD '''
### isn't part of the code quite yet...? its currently just a bad circle...
# # #
''' CONFIGURE SPIDER LEGS '''
spider_legs = 8
leg_length = 70
leg_angle = 360 / spider_legs
trtl_spider.pensize(5)
leg_increment = 0
# #
'''DRAW LEGS'''
while (leg_increment < spider_legs):
trtl_spider.goto(0,20)
trtl_spider.setheading(leg_angle*leg_increment)
trtl_spider.forward(leg_length)
# trtl_spider.forward(leg_length)
leg_increment = leg_increment + 1
# # #
trtl_spider.hideturtle()
# # # #
wn = trtl.Screen()
wn.mainloop()
|
UTF-8
|
Python
| false
| false
| 728
|
py
| 61
|
FIXED2_LEGS_a115_buggy_image.py
| 59
| 0.656593
| 0.630495
| 0
| 34
| 20.441176
| 75
|
gitbuda/optframe
| 19,567,871,025,717
|
11e42764f2b6f50ce60a552e732734eb88a76f08
|
de1776c11e306532aa4d7e838aa0651807ab65c2
|
/problems/cvrp/hmo_loader.py
|
d4d0cbe7dc0500cc91a3d7e5bcf8a96ef6448d59
|
[] |
no_license
|
https://github.com/gitbuda/optframe
|
40c12acb94618407cd00d6897603f47c35b4c6ac
|
b2ad4b297f372beaa969b08d0636ddfd3e91d53e
|
refs/heads/master
| 2021-01-23T18:21:55.606525
| 2015-06-23T13:10:26
| 2015-06-23T13:10:26
| 32,553,940
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
class HMOProblem(object):
def __init__(self):
self._warehouses_no = 0
self._customers_no = 0
self._warehouses_coords = []
self._customers_coords = []
self._vehicle_capacity = 0
self._warehouse_capacities = []
self._customer_desires = []
self._warehouse_prices = []
self._vehicle_price = 0
self._ccdistances = defaultdict(lambda: defaultdict(list))
self._wcdistances = defaultdict(lambda: defaultdict(list))
@property
def warehouses_no(self):
return self._warehouses_no
@warehouses_no.setter
def warehouses_no(self, value):
self._warehouses_no = value
@property
def customers_no(self):
return self._customers_no
@customers_no.setter
def customers_no(self, value):
self._customers_no = value
@property
def warehouses_coords(self):
return self._warehouses_coords
@warehouses_coords.setter
def warehouses_coords(self, value):
self._warehouses_coords = value
@property
def customers_coords(self):
return self._customers_coords
@customers_coords.setter
def customers_coords(self, value):
self._customers_coords = value
@property
def vehicle_capacity(self):
return self._vehicle_capacity
@vehicle_capacity.setter
def vehicle_capacity(self, value):
self._vehicle_capacity = value
@property
def warehouse_capacities(self):
return self._warehouse_capacities
@warehouse_capacities.setter
def warehouse_capacities(self, value):
self._warehouse_capacities = value
@property
def customer_desires(self):
return self._customer_desires
@customer_desires.setter
def customer_desires(self, value):
self._customer_desires = value
@property
def warehouse_prices(self):
return self._warehouse_prices
@warehouse_prices.setter
def warehouse_prices(self, value):
self._warehouse_prices = value
@property
def vehicle_price(self):
return self._vehicle_price
@vehicle_price.setter
def vehicle_price(self, value):
self._vehicle_price = value
@property
def ccdistances(self):
return self._ccdistances
@property
def wcdistances(self):
return self._wcdistances
def distance(self, a, b):
return int(pow(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2), 0.5) * 100)
def calculate_distances(self):
for i in range(self.customers_no):
for j in range(self.customers_no):
first_coords = self.customers_coords[i]
second_coords = self.customers_coords[j]
distance = self.distance(first_coords, second_coords)
self.ccdistances[i][j] = distance
for i in range(self.warehouses_no):
for j in range(self.customers_no):
first_coords = self.warehouses_coords[i]
second_coords = self.customers_coords[j]
distance = self.distance(first_coords, second_coords)
self.wcdistances[i][j] = distance
def read_coords(f, lines_no):
data_array = []
for i in range(lines_no):
string_list = f.readline().rstrip('\r\n').split('\t')
data_array.append(tuple(int(num) for num in string_list))
return data_array
def read_values(f, lines_no):
data_array = []
for i in range(lines_no):
capacity_string = f.readline().rstrip('\r\n')
data_array.append(int(capacity_string))
return data_array
def read_hmo_file(path):
hp = HMOProblem()
with open(path) as f:
hp.customers_no = int(f.readline().rstrip('\r\n'))
hp.warehouses_no = int(f.readline().rstrip('\r\n'))
f.readline()
hp.warehouses_coords = read_coords(f, hp.warehouses_no)
f.readline()
hp.customers_coords = read_coords(f, hp.customers_no)
f.readline()
hp.vehicle_capacity = int(f.readline().rstrip('\r\n'))
f.readline()
hp.warehouse_capacities = read_values(f, hp.warehouses_no)
f.readline()
hp.customer_desires = read_values(f, hp.customers_no)
f.readline()
hp.warehouse_prices = read_values(f, hp.warehouses_no)
f.readline()
vehicle_price = f.readline().rstrip('\r\n')
hp.vehicle_price = int(vehicle_price)
hp.calculate_distances()
return hp
if __name__ == '__main__':
hmo_problem = read_hmo_file('input.txt')
print hmo_problem.warehouses_no
print hmo_problem.customers_no
print hmo_problem.warehouses_coords #
print hmo_problem.customers_coords #
print 'vehicle capacity', hmo_problem.vehicle_capacity #
print hmo_problem.warehouse_capacities #
print hmo_problem.customer_desires #
print hmo_problem.warehouse_prices #
print hmo_problem.vehicle_price #
hmo_problem.calculate_distances()
print hmo_problem.ccdistances[51][50]
print hmo_problem.ccdistances[50][51]
print hmo_problem.wcdistances[4][23]
|
UTF-8
|
Python
| false
| false
| 5,167
|
py
| 206
|
hmo_loader.py
| 135
| 0.623379
| 0.618154
| 0
| 176
| 28.357955
| 77
|
benety/mongo
| 2,482,491,129,166
|
ffbfc1e218ed1c25fb8aec4cd4c58722a4fbc5a2
|
9d9e5aca4c3e32b762f55fe7488d5cdb2ca86062
|
/buildscripts/task_generation/generated_config.py
|
c054648bcbb08d6d5edfd9183cce1210026b630b
|
[
"LicenseRef-scancode-warranty-disclaimer",
"SSPL-1.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"Unicode-DFS-2015",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"AGPL-3.0-or-later"
] |
permissive
|
https://github.com/benety/mongo
|
a6a14cf5c7af5a5a2d863107cb8002dc144fd283
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
refs/heads/master
| 2023-01-06T22:12:38.994986
| 2022-05-11T22:42:28
| 2022-05-11T23:32:03
| 308,162,184
| 0
| 0
|
Apache-2.0
| true
| 2020-10-28T23:05:47
| 2020-10-28T23:05:46
| 2020-10-23T20:13:37
| 2020-10-02T22:10:54
| 401
| 0
| 0
| 0
| null | false
| false
|
"""Generated configuration."""
from typing import NamedTuple, List
from buildscripts.util.fileops import write_file_to_dir
class GeneratedFile(NamedTuple):
"""
Generated configuration file.
file_name: Name of generated configuration.
content: Content of generated configuration.
"""
file_name: str
content: str
def write_to_dir(self, directory: str) -> None:
"""
Write this file to the given directory.
:param directory: Directory to write file to.
"""
write_file_to_dir(directory, self.file_name, self.content, overwrite=False)
class GeneratedConfiguration(NamedTuple):
"""
Contain for the configuration needed to generate a task.
file_list: List of filenames and file contents needed to generate a task.
"""
file_list: List[GeneratedFile]
def write_all_to_dir(self, directory: str) -> None:
"""
Write all the configuration files to the given directory.
:param directory: Directory to write to.
"""
for item in self.file_list:
item.write_to_dir(directory)
|
UTF-8
|
Python
| false
| false
| 1,118
|
py
| 11,721
|
generated_config.py
| 10,723
| 0.659213
| 0.659213
| 0
| 43
| 25
| 83
|
mr-wafi/personal-portfolio-django3
| 4,922,032,565,211
|
1cfd13ad39a01daf9f128a48c528a8b7492e6059
|
c329fc500015c550aca6f5bbc7507e9c44b88e1d
|
/portfolio/models.py
|
86dd70d10926615baaada62ff7a1608de5522aae
|
[] |
no_license
|
https://github.com/mr-wafi/personal-portfolio-django3
|
dc7c84696dc4224e88669c3ddf72daaa5e687abe
|
67727bd81b6a986d3a776071e26ce6fba3b2f582
|
refs/heads/master
| 2022-05-28T05:22:31.971075
| 2020-05-02T06:42:08
| 2020-05-02T06:42:08
| 260,629,641
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
class Project(models.Model):
title = models.CharField(max_length=100)
description = models.CharField(max_length=250)
image = models.ImageField(upload_to='portfolio/images')
urls = models.URLField(blank=True)
#this the place where we can define details for our databse table and after defining it here we have to pass it to admin.py
#and in admin.py we will just simple import this to admin and then pass it to databse
def __str__(self):
return self.title
#what this __str__ function is doing?? this function returning the names or title of the inserted data in database
#in database the names or title of the table are like project one project tow but when we use this function this will
# return there title se we can easly find the post by there names and can edit delete or whatever we want we can apply.
|
UTF-8
|
Python
| false
| false
| 869
|
py
| 4
|
models.py
| 4
| 0.752589
| 0.745685
| 0
| 17
| 50.117647
| 123
|
pjhampton/py-global-testing
| 14,130,442,442,649
|
e3313f80e6f53c6c053a192e30d9e2d2053e20bd
|
f51f6921861c118cf049f56fe0dd8f39ba4ce1ce
|
/tools/tool1/toolone.py
|
d113bddcf56046e46a8fece7060223f2d6900aa6
|
[] |
no_license
|
https://github.com/pjhampton/py-global-testing
|
077e448d434daa1f18e19f53b130388a26648203
|
c088ae8441919e27584b1036053ca045d4c59ef3
|
refs/heads/master
| 2022-12-09T04:20:13.766458
| 2020-08-26T14:06:51
| 2020-08-26T14:06:51
| 290,489,028
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
def print_usage():
return "I'm tool one!"
|
UTF-8
|
Python
| false
| false
| 47
|
py
| 12
|
toolone.py
| 11
| 0.595745
| 0.595745
| 0
| 2
| 22
| 26
|
CarsonSlovoka/image-rename
| 19,189,913,895,062
|
37c19498feb9c8144a012a88aeb193a4ba4b9e29
|
762a58692694c44eaa6070951fb9fb2b00f6e748
|
/image_rename/template/plugins/mspaint.py
|
cc7e07edee1c98a7dd97a4d2c253ef0c00be297c
|
[] |
no_license
|
https://github.com/CarsonSlovoka/image-rename
|
90bbf9c59ef5256369e0c8aff818eed486b34c2c
|
6ff64647aa893ee5c23bfd7e8cc452a7a7d32f29
|
refs/heads/master
| 2022-12-24T19:00:46.411674
| 2020-10-11T16:48:19
| 2020-10-11T16:48:19
| 296,278,518
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
__all__ = ('PLUGIN_MS_PAINT',)
from image_rename import template
from image_rename.core import ImageRenameApp
from pathlib import Path
from subprocess import Popen, PIPE, DEVNULL
if '__file__' in globals():
PLUGIN_MS_PAINT = Path(__file__) # __file__ not in loader.exec_module
register = template.Library(__name__)
@register.hotkey(key_list=['<Alt-P>', '<Alt-p>'])
def start_ms_paint(app: ImageRenameApp):
job = Popen(['mspaint', str(app.widget_info.cur_img_path)], stdout=PIPE, stderr=PIPE, stdin=DEVNULL)
job.communicate() # waiting for the job done.
app.refresh_window()
# app.next_img_flag = True
|
UTF-8
|
Python
| false
| false
| 628
|
py
| 29
|
mspaint.py
| 23
| 0.686306
| 0.686306
| 0
| 19
| 32.052632
| 104
|
RonierisonMaciel/devops_example
| 14,491,219,704,855
|
97485351123cffb683d6053368fca1ff0d8d937a
|
223d239dcef833bb818bb8b660a0ad3dc523e936
|
/unit_tests/test_appllication.py
|
47093c1b07fc99d051b4d2fe25d512df1d379029
|
[
"MIT"
] |
permissive
|
https://github.com/RonierisonMaciel/devops_example
|
2419a2a0523e73614add4835731adea2f4d4af32
|
c9e5a1210c6ec3ab7c102c0880ea643597ab19f9
|
refs/heads/main
| 2023-01-05T19:46:15.099232
| 2020-11-02T20:16:16
| 2020-11-02T20:16:16
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import random
import json
from application import app
class TestApplication(unittest.TestCase):
def setUp(self):
self.client = app.test_client()
self.headers = {'Content-type': 'application/json'}
return super().setUp()
def test_hello_world(self):
""" Get hello world
"""
path = '/hello'
data = self.client.get(path)
expected_response = {'hello': 'world'}
assert expected_response == data.json
def test_put_get(self):
""" test application by putting a resource and reading it back
"""
resource_name = "resource_test1"
path = f'/{resource_name}'
data = {
"data": "Test data"
}
api_response = self.client.put(
path,
data=json.dumps(data),
headers=self.headers
)
expected_response = {
"resource_name": "resource_test1",
"resource_input": "Test data"
}
# Test put
assert expected_response == api_response.json, \
"PUT Error: Response different from expected"
api_response = self.client.get(
path,
headers=self.headers
)
expected_response = {
"resource_test1" : "Test data"
}
# Test put
assert expected_response == api_response.json, \
"GET Error: different response from expected"
|
UTF-8
|
Python
| false
| false
| 1,533
|
py
| 7
|
test_appllication.py
| 2
| 0.536856
| 0.534247
| 0
| 61
| 23.983607
| 70
|
Tanukimong/tanukiNet
| 8,349,416,430,233
|
20d4696f525489e3b963b37fea70a311e2b9b147
|
f952303b30bf0794a4f4fde2d46c132c069e8e19
|
/draw_lanes_avg.py
|
d6e846dc2ebb5efbf89e1697af8c0f3fd42d40de
|
[] |
no_license
|
https://github.com/Tanukimong/tanukiNet
|
da86c325cb1dfb870070d83fab674169e7019010
|
c668501903e063bdcaa59b460d82965874d1a07e
|
refs/heads/master
| 2020-08-07T08:03:33.687435
| 2019-10-16T02:52:21
| 2019-10-16T02:52:21
| 213,362,925
| 1
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
draw_lanes.py memory_size input_video output_video
'''
import numpy as np
import cv2
from PIL.Image import fromarray, BILINEAR
from moviepy.editor import VideoFileClip
from IPython.display import HTML
from keras.models import model_from_json
import sys
import warnings
warnings.filterwarnings(action='ignore') # 귀찮은 경고 감추기
scaler = 6
resized_shape = (1640//scaler, 590//scaler)
memory_size = int(sys.argv[1])
json_fname = "model_structure_when_mem_is_{}.json".format(memory_size)
weights_fname ="mem_is_{}.h5".format(memory_size)
# Load Keras model
json_file = open(json_fname, 'r')
json_model = json_file.read()
json_file.close()
model = model_from_json(json_model)
model.load_weights(weights_fname)
model.summary()
# Class to average lanes with
class Lanes():
def __init__(self):
self.recent_question = np.empty((1, 96, 272, 1))
self.initialized = False
self.recent_ans = []
self.avg_ans = []
def road_lines(image):
""" Takes in a road image, re-sizes for the model,
predicts the lane to be drawn from the model in G color,
recreates an RGB image of a lane and merges with the
original road image.
"""
# Image를 memory size 만큼 받아서 한번에 predict
small_img = fromarray(image).resize(resized_shape)
small_img = np.asarray(small_img,dtype="uint8")
small_img = small_img[None,:,:,:]/255.0 # (1, 96, 272, 1)
if lanes.recent_question.shape[0] >= memory_size:
# 이 경우에만 예측과 갈아치우기를 한다.
# 이전 프레임 지우기
lanes.recent_question = np.append(lanes.recent_question, small_img, axis=0)
lanes.recent_question = lanes.recent_question[1:]
prediction = model.predict(lanes.recent_question[np.newaxis])[0]*255
lanes.recent_ans.append(prediction)
if len(lanes.recent_ans) > 5:
lanes.recent_ans = lanes.recent_ans[1:]
# Calculate average detection
lanes.avg_ans = np.mean(np.array([i for i in lanes.recent_ans]), axis = 0)
# Generate fake R & B color dimensions, stack with G
blanks = np.zeros_like(lanes.avg_ans)
lane_drawn = np.dstack((blanks, lanes.avg_ans, blanks))
lane_drawn = lane_drawn.astype("uint8")
# Re-size to match the original image
lane_image = fromarray(lane_drawn)
lane_image = lane_image.resize((1280, 720),BILINEAR)
lane_image = np.asarray(lane_image,dtype="uint8")
# Merge the lane drawing onto the original image
result = cv2.addWeighted(image, 1, lane_image, 1, 0)
elif lanes.initialized == True:
print("=== Case 1 : image stacking only ===")
lanes.recent_question = np.append(lanes.recent_question, small_img, axis=0)
result = fromarray(image).resize((1280, 720))
result = np.array(result)
elif lanes.initialized == False:
print("=== Case 2 : initializing ===")
lanes.recent_question = small_img# (1, 96, 272, 1)
result = fromarray(image).resize((1280, 720))
result = np.array(result) # (720, 1280, 3)
lanes.initialized = True
return result
# Global variable lanes.recent_question
lanes = Lanes()
# Where to save the output video
vid_output = sys.argv[3]
# Location of the input video
clip1 = VideoFileClip(sys.argv[2])
vid_clip = clip1.fl_image(road_lines)
vid_clip.write_videofile(vid_output, audio=False)
|
UTF-8
|
Python
| false
| false
| 3,437
|
py
| 18
|
draw_lanes_avg.py
| 10
| 0.657526
| 0.630999
| 0
| 104
| 31.259615
| 83
|
pet-pal-project/petpal
| 7,816,840,508,770
|
a90311511a9a830a5475a8f0055325cbb36a2a85
|
276857b3a73d8ef2a6bbc10ade28f52cc8ed3baf
|
/core/migrations/0007_auto_20190730_1614.py
|
c5c63192d9455d40891de765c444cd58a1b862fb
|
[] |
no_license
|
https://github.com/pet-pal-project/petpal
|
4710e2bee433aaecafae7c908bf946d07b7b6aff
|
40698e1e26f0913362912f5e958892e57d845433
|
refs/heads/master
| 2022-12-12T10:56:05.977394
| 2019-08-13T20:33:08
| 2019-08-13T20:33:08
| 199,071,739
| 0
| 0
| null | false
| 2022-12-08T06:00:18
| 2019-07-26T19:45:33
| 2019-08-13T20:33:15
| 2022-12-08T06:00:17
| 11,964
| 0
| 0
| 2
|
Python
| false
| false
|
# Generated by Django 2.2.3 on 2019-07-30 20:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20190730_1309'),
]
operations = [
migrations.RenameField(
model_name='pet',
old_name='about_me',
new_name='about_Me',
),
migrations.RenameField(
model_name='pet',
old_name='emergency_contact',
new_name='emergency_Contact',
),
migrations.RenameField(
model_name='pet',
old_name='profile_image',
new_name='profile_Image',
),
migrations.RenameField(
model_name='pet',
old_name='vet_info',
new_name='vet_Info',
),
migrations.RenameField(
model_name='pet',
old_name='weight',
new_name='weight_in_lbs',
),
migrations.AddField(
model_name='pet',
name='color_and_Markings',
field=models.CharField(blank=True, max_length=200),
),
migrations.AddField(
model_name='pet',
name='sex',
field=models.CharField(blank=True, max_length=20),
),
]
|
UTF-8
|
Python
| false
| false
| 1,286
|
py
| 35
|
0007_auto_20190730_1614.py
| 18
| 0.506221
| 0.478227
| 0
| 48
| 25.791667
| 63
|
kuznesashka/Wave_prior_inverse
| 4,578,435,170,108
|
68f1b1db423da5eb81c8bd8ec836dae6f8b3a5ed
|
cfb653a29cccd0fe100c9562b80b6af7a3ff607c
|
/venv/lib/python3.8/site-packages/pyface/data_view/data_models/tests/test_row_table_data_model.py
|
4ae44c09b5f2264abddda482d28c52017cb20ab3
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/kuznesashka/Wave_prior_inverse
|
d12155ee4be7dd0b2bae6ccfb7f5e5cf37f33a9f
|
0b1b934ba4beb5105ea3baa9a25407400c3ccc71
|
refs/heads/master
| 2021-12-26T04:02:37.525285
| 2021-12-15T10:10:36
| 2021-12-15T10:10:36
| 201,934,126
| 0
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import unittest
from traits.trait_list_object import TraitList
from traits.testing.api import UnittestTools
from traits.testing.optional_dependencies import numpy as np, requires_numpy
from pyface.data_view.abstract_data_model import DataViewSetError
from pyface.data_view.abstract_value_type import AbstractValueType
from pyface.data_view.value_types.api import (
FloatValue, IntValue, TextValue, no_value
)
from pyface.data_view.data_models.data_accessors import (
AttributeDataAccessor, IndexDataAccessor, KeyDataAccessor
)
from pyface.data_view.data_models.row_table_data_model import RowTableDataModel
class DataItem:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
class TestRowTableDataModel(UnittestTools, unittest.TestCase):
def setUp(self):
super().setUp()
self.data = [
DataItem(a=i, b=10*i, c=str(i)) for i in range(10)
]
self.model = RowTableDataModel(
data=self.data,
row_header_data=AttributeDataAccessor(
attr='a',
value_type=IntValue(),
),
column_data=[
AttributeDataAccessor(
attr='b',
value_type=IntValue(),
),
AttributeDataAccessor(
attr='c',
value_type=TextValue(),
)
]
)
self.values_changed_event = None
self.structure_changed_event = None
self.model.observe(self.model_values_changed, 'values_changed')
self.model.observe(self.model_structure_changed, 'structure_changed')
def tearDown(self):
self.model.observe(
self.model_values_changed, 'values_changed', remove=True)
self.model.observe(
self.model_structure_changed, 'structure_changed', remove=True)
self.values_changed_event = None
self.structure_changed_event = None
super().tearDown()
def model_values_changed(self, event):
self.values_changed_event = event
def model_structure_changed(self, event):
self.structure_changed_event = event
def test_no_data(self):
model = RowTableDataModel()
self.assertEqual(model.get_column_count(), 0)
self.assertTrue(model.can_have_children(()))
self.assertEqual(model.get_row_count(()), 0)
def test_get_column_count(self):
result = self.model.get_column_count()
self.assertEqual(result, 2)
def test_can_have_children(self):
for row in self.model.iter_rows():
with self.subTest(row=row):
result = self.model.can_have_children(row)
if len(row) == 0:
self.assertEqual(result, True)
else:
self.assertEqual(result, False)
def test_get_row_count(self):
for row in self.model.iter_rows():
with self.subTest(row=row):
result = self.model.get_row_count(row)
if len(row) == 0:
self.assertEqual(result, 10)
else:
self.assertEqual(result, 0)
def test_get_value(self):
for row, column in self.model.iter_items():
with self.subTest(row=row, column=column):
result = self.model.get_value(row, column)
if len(row) == 0 and len(column) == 0:
self.assertEqual(result, 'A')
elif len(row) == 0:
attr = self.model.column_data[column[0]].attr
self.assertEqual(result, attr.title())
elif len(column) == 0:
self.assertEqual(result, row[0])
else:
attr = self.model.column_data[column[0]].attr
self.assertEqual(
result,
getattr(self.data[row[0]], attr)
)
def test_set_value(self):
for row, column in self.model.iter_items():
with self.subTest(row=row, column=column):
if len(row) == 0 and len(column) == 0:
with self.assertRaises(DataViewSetError):
self.model.set_value(row, column, 0)
elif len(row) == 0:
with self.assertRaises(DataViewSetError):
self.model.set_value(row, column, 0)
elif len(column) == 0:
value = 6.0 * row[0]
with self.assertTraitChanges(self.model, "values_changed"):
self.model.set_value(row, column, value)
self.assertEqual(self.data[row[0]].a, value)
self.assertEqual(
self.values_changed_event.new,
(row, column, row, column)
)
else:
value = 6.0 * row[-1] + 2 * column[0]
with self.assertTraitChanges(self.model, "values_changed"):
self.model.set_value(row, column, value)
attr = self.model.column_data[column[0]].attr
self.assertEqual(
getattr(self.data[row[0]], attr),
value,
)
self.assertEqual(
self.values_changed_event.new,
(row, column, row, column)
)
def test_get_value_type(self):
for row, column in self.model.iter_items():
with self.subTest(row=row, column=column):
result = self.model.get_value_type(row, column)
if len(row) == 0 and len(column) == 0:
self.assertIsInstance(result, AbstractValueType)
self.assertIs(
result,
self.model.row_header_data.title_type,
)
elif len(row) == 0:
self.assertIsInstance(result, AbstractValueType)
self.assertIs(
result,
self.model.column_data[column[0]].title_type,
)
elif len(column) == 0:
self.assertIsInstance(result, AbstractValueType)
self.assertIs(
result,
self.model.row_header_data.value_type,
)
else:
self.assertIsInstance(result, AbstractValueType)
self.assertIs(
result,
self.model.column_data[column[0]].value_type,
)
def test_data_updated(self):
with self.assertTraitChanges(self.model, "structure_changed"):
self.model.data = [
DataItem(a=i+1, b=20*(i+1), c=str(i)) for i in range(10)
]
self.assertTrue(self.structure_changed_event.new)
def test_data_items_updated_item_added(self):
self.model.data = TraitList([
DataItem(a=i, b=10*i, c=str(i)) for i in range(10)
])
with self.assertTraitChanges(self.model, "structure_changed"):
self.model.data += [DataItem(a=100, b=200, c="a string")]
self.assertTrue(self.structure_changed_event.new)
def test_data_items_updated_item_replaced(self):
self.model.data = TraitList([
DataItem(a=i, b=10*i, c=str(i)) for i in range(10)
])
with self.assertTraitChanges(self.model, "values_changed"):
self.model.data[1] = DataItem(a=100, b=200, c="a string")
self.assertEqual(self.values_changed_event.new, ((1,), (), (1,), ()))
def test_data_items_updated_item_replaced_negative(self):
self.model.data = TraitList([
DataItem(a=i, b=10*i, c=str(i)) for i in range(10)
])
with self.assertTraitChanges(self.model, "values_changed"):
self.model.data[-2] = DataItem(a=100, b=200, c="a string")
self.assertEqual(self.values_changed_event.new, ((8,), (), (8,), ()))
def test_data_items_updated_items_replaced(self):
self.model.data = TraitList([
DataItem(a=i, b=10*i, c=str(i)) for i in range(10)
])
with self.assertTraitChanges(self.model, "values_changed"):
self.model.data[1:3] = [
DataItem(a=100, b=200, c="a string"),
DataItem(a=200, b=300, c="another string"),
]
self.assertEqual(self.values_changed_event.new, ((1,), (), (2,), ()))
def test_data_items_updated_slice_replaced(self):
self.model.data = TraitList([
DataItem(a=i, b=10*i, c=str(i)) for i in range(10)
])
with self.assertTraitChanges(self.model, "values_changed"):
self.model.data[1:4:2] = [
DataItem(a=100, b=200, c="a string"),
DataItem(a=200, b=300, c="another string"),
]
self.assertEqual(self.values_changed_event.new, ((1,), (), (3,), ()))
def test_data_items_updated_reverse_slice_replaced(self):
self.model.data = TraitList([
DataItem(a=i, b=10*i, c=str(i)) for i in range(10)
])
with self.assertTraitChanges(self.model, "values_changed"):
self.model.data[3:1:-1] = [
DataItem(a=100, b=200, c="a string"),
DataItem(a=200, b=300, c="another string"),
]
self.assertEqual(self.values_changed_event.new, ((2,), (), (3,), ()))
def test_row_header_data_updated(self):
with self.assertTraitChanges(self.model, "values_changed"):
self.model.row_header_data = AttributeDataAccessor(attr='b')
self.assertEqual(
self.values_changed_event.new,
((), (), (), ())
)
def test_row_header_data_values_updated(self):
with self.assertTraitChanges(self.model, "values_changed"):
self.model.row_header_data.updated = (self.model.row_header_data, 'value')
self.assertEqual(
self.values_changed_event.new,
((0,), (), (9,), ())
)
def test_row_header_data_title_updated(self):
with self.assertTraitChanges(self.model, "values_changed"):
self.model.row_header_data.updated = (self.model.row_header_data, 'title')
self.assertEqual(
self.values_changed_event.new,
((), (), (), ())
)
def test_no_data_row_header_data_update(self):
model = RowTableDataModel(
row_header_data=AttributeDataAccessor(
attr='a',
value_type=IntValue(),
),
column_data=[
AttributeDataAccessor(
attr='b',
value_type=IntValue(),
),
AttributeDataAccessor(
attr='c',
value_type=TextValue(),
)
]
)
# check that updating accessors is safe with empty data
with self.assertTraitDoesNotChange(model, 'values_changed'):
model.row_header_data.attr = 'b'
def test_column_data_updated(self):
with self.assertTraitChanges(self.model, "structure_changed"):
self.model.column_data = [
AttributeDataAccessor(
attr='c',
value_type=TextValue(),
),
AttributeDataAccessor(
attr='b',
value_type=IntValue(),
),
]
self.assertTrue(self.structure_changed_event.new)
def test_column_data_items_updated(self):
with self.assertTraitChanges(self.model, "structure_changed"):
self.model.column_data.pop()
self.assertTrue(self.structure_changed_event.new)
def test_column_data_value_updated(self):
with self.assertTraitChanges(self.model, "values_changed"):
self.model.column_data[0].updated = (self.model.column_data[0], 'value')
self.assertEqual(
self.values_changed_event.new,
((0,), (0,), (9,), (0,))
)
def test_no_data_column_data_update(self):
model = RowTableDataModel(
row_header_data=AttributeDataAccessor(
attr='a',
value_type=IntValue(),
),
column_data=[
AttributeDataAccessor(
attr='b',
value_type=IntValue(),
),
AttributeDataAccessor(
attr='c',
value_type=TextValue(),
)
]
)
with self.assertTraitDoesNotChange(model, 'values_changed'):
model.column_data[0].attr = 'a'
def test_column_data_title_updated(self):
with self.assertTraitChanges(self.model, "values_changed"):
self.model.column_data[0].updated = (self.model.column_data[0], 'title')
self.assertEqual(
self.values_changed_event.new,
((), (0,), (), (0,))
)
def test_list_tuple_data(self):
data = [
(i, 10*i, str(i)) for i in range(10)
]
model = RowTableDataModel(
data=data,
row_header_data=IndexDataAccessor(
index=0,
value_type=IntValue(),
),
column_data=[
IndexDataAccessor(
index=1,
value_type=IntValue(),
),
IndexDataAccessor(
index=2,
value_type=TextValue(),
)
]
)
for row, column in model.iter_items():
with self.subTest(row=row, column=column):
result = model.get_value(row, column)
if len(row) == 0 and len(column) == 0:
self.assertEqual(result, '0')
elif len(row) == 0:
index = model.column_data[column[0]].index
self.assertEqual(result, str(index))
elif len(column) == 0:
self.assertEqual(result, row[0])
else:
index = model.column_data[column[0]].index
self.assertEqual(
result,
data[row[0]][index]
)
def test_list_dict_data(self):
data = [
{'a': i, 'b': 10*i, 'c': str(i)} for i in range(10)
]
model = RowTableDataModel(
data=data,
row_header_data=KeyDataAccessor(
key='a',
value_type=IntValue(),
),
column_data=[
KeyDataAccessor(
key='b',
value_type=IntValue(),
),
KeyDataAccessor(
key='c',
value_type=TextValue(),
)
]
)
for row, column in model.iter_items():
with self.subTest(row=row, column=column):
result = model.get_value(row, column)
if len(row) == 0 and len(column) == 0:
self.assertEqual(result, 'A')
elif len(row) == 0:
key = model.column_data[column[0]].key
self.assertEqual(result, str(key).title())
elif len(column) == 0:
self.assertEqual(result, data[row[0]]['a'])
else:
key = model.column_data[column[0]].key
self.assertEqual(
result,
data[row[0]][key]
)
|
UTF-8
|
Python
| false
| false
| 16,348
|
py
| 809
|
test_row_table_data_model.py
| 792
| 0.509237
| 0.497247
| 0
| 429
| 37.107226
| 86
|
hugobou/routeplanner
| 14,886,356,671,797
|
0c1050e86e1053880993dbc430aeef19732e5e86
|
d17c598fbbeba3cec36021b13bbf857733c2affc
|
/test/test_features.py
|
095ef5b8437d22fe98bf38a544d993d56af59c28
|
[] |
no_license
|
https://github.com/hugobou/routeplanner
|
34eca1f8e1a65697479b9fc55423417951d2c2d5
|
607d0361514e5a463ae6e4371399003ee382cd25
|
refs/heads/main
| 2023-08-11T07:06:20.820583
| 2021-10-02T09:08:07
| 2021-10-02T09:08:07
| 378,705,177
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../src')))
import unittest
import numpy as np
import graphcreator as gc
from model import features as feat
from model.features import NUM_FEATURES, FEATURE_LENGTH
class FeaturesTest(unittest.TestCase):
def test_encode_features(self):
l = list(range(5))
graph = gc.generate_path(5, x=l, y=l, w=l)
out_edges = graph.out_edges(0)
features = feat.FeaturesEncoder().encode(graph, out_edges, 0, 1)
self.assertEqual(FEATURE_LENGTH, len(features))
np.testing.assert_array_almost_equal([0.0, 1.0, 1.0, 0.0, 1.0, 0.0], features[0:NUM_FEATURES], 0.0000001)
np.testing.assert_array_almost_equal(np.ones(4*NUM_FEATURES), features[NUM_FEATURES:FEATURE_LENGTH], 0.0000001)
def test_encode_features_empty_out_edges_exception(self):
graph = gc.generate_path(5)
out_edges = []
self.assertRaises(RuntimeError, feat.FeaturesEncoder().encode, graph, out_edges, 0, 1)
def test_encode_features_more_edges_than_allowed_last_edge_ignored(self):
graph = gc.generate_hardcoded_graph_too_many_connections()
out_edges = graph.out_edges(1)
features = feat.FeaturesEncoder().encode(graph, out_edges, 1, 10)
self.assertEqual(FEATURE_LENGTH, len(features))
|
UTF-8
|
Python
| false
| false
| 1,352
|
py
| 48
|
test_features.py
| 28
| 0.684172
| 0.652367
| 0
| 32
| 41.21875
| 119
|
lTakezo/dsid_ep
| 14,559,939,181,556
|
7c1f494b816ab9fe0bd91a47ed42e060f39f3d40
|
e2b372b39a10d4ec0b87d0e73072048fff176209
|
/server.py
|
f82e8c8a6874998fce1775747b9aeccb6f33087e
|
[] |
no_license
|
https://github.com/lTakezo/dsid_ep
|
56964fb1d249678b3249a942363cc663c21240e6
|
96606432758953ff1ca49fd6b9decb573126c8e1
|
refs/heads/master
| 2022-11-13T15:41:57.325500
| 2020-07-05T15:27:33
| 2020-07-05T15:27:33
| 277,324,645
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask, redirect, url_for, render_template, request, jsonify, session
import subprocess, json
from flask_mysqldb import MySQL
import MySQLdb.cursors
import re
import apicaller
app = Flask(__name__)
app.secret_key = 'fts'
app.config['MYSQL_HOST'] = '127.0.0.1'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = 'T@wFpxLFo4pO'
app.config['MYSQL_DB'] = 'pythonlogin'
mysql = MySQL(app)
@app.route("/")
def home():
if "loggedin" in session:
user = session["username"]
return render_template("index.html", content=user, loggedin=True)
return render_template("index.html")
@app.route('/login/', methods=['GET', 'POST'])
def login():
msg = ''
if request.method == 'POST' and 'username' in request.form and 'password' in request.form:
# Create variables for easy access
username = request.form['username']
password = request.form['password']
# Check if account exists using MySQL
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE username = %s AND password = %s', (username, password,))
# Fetch one record and return result
account = cursor.fetchone()
# If account exists in accounts table in out database
if account:
# Create session data, we can access this data in other routes
session['loggedin'] = True
session['id'] = account['id']
session['username'] = account['username']
return redirect(url_for('home'))
else:
# Account doesnt exist or username/password incorrect
msg = 'Incorrect username/password!'
return render_template('login.html', msg=msg)
@app.route('/logout/')
def logout():
# Remove session data, this will log the user out
session.pop('loggedin', None)
session.pop('id', None)
session.pop('username', None)
return redirect(url_for('home'))
@app.route('/register/', methods=['GET', 'POST'])
def register():
msg = ''
# Check if "username", "password" and "email" POST requests exist (user submitted form)
if request.method == 'POST' and 'username' in request.form and 'password' in request.form and 'email' in request.form:
username = request.form['username']
password = request.form['password']
email = request.form['email']
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE username = %s', (username,))
account = cursor.fetchone()
# If account exists show error and validation checks
if account:
msg = 'Account already exists!'
elif not re.match(r'[^@]+@[^@]+\.[^@]+', email):
msg = 'Invalid email address!'
elif not re.match(r'[A-Za-z0-9]+', username):
msg = 'Username must contain only characters and numbers!'
elif not username or not password or not email:
msg = 'Please fill out the form!'
else:
# Account doesnt exists and the form data is valid, now insert new account into accounts table
cursor.execute('INSERT INTO accounts VALUES (NULL, %s, %s, %s)', (username, password, email,))
mysql.connection.commit()
msg = 'You have successfully registered!'
elif request.method == 'POST':
# Form is empty... (no POST data)
msg = 'Please fill out the form!'
# Show registration form with message (if any)
return render_template('register.html', msg=msg)
@app.route('/profile/')
def profile(type=None, content=None):
if 'loggedin' in session:
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE id = %s', (session['id'],))
account = cursor.fetchone()
return render_template('profile.html', account=account, loggedin=True, type=type, content=content)
# User is not loggedin redirect to login page
return redirect(url_for('login'))
@app.route("/hotel/", methods=["POST", "GET"])
def hotel():
try:
if request.method == "POST" and request.form["dest"]:
dest = request.form["dest"]
hotel_params = []
hotel_params.append(request.form["qt_ad"])
hotel_params.append(request.form["checkin"])
hotel_params.append(request.form["qt_qu"])
hotel_params.append(request.form["qt_no"])
hotel_params.append(request.form["pr_mx"])
querylist = [dest, "Hoteis"]
apicaller.set_localid(querylist)
apijson = apicaller.queryhotels(hotel_params)
results = [] # each restaurante will be a list inside this list
for i in range(0, len(apijson['data'])):
results_i = [] # name, latitude, longitude, photo, rating, price, address
for k, v in apijson['data'][i].items():
if k == 'name':
results_i.append(v)
elif k == 'latitude':
results_i.append(v)
elif k == 'longitude':
results_i.append(v)
elif k == 'photo':
results_i.append(v['images']['small']['url'])
elif k == 'rating':
results_i.append(v)
elif k == 'price':
results_i.append(v)
results.append(results_i)
if "loggedin" in session:
return render_template("hotel_result.html", contents=results, dest=dest, loggedin=True)
else:
return render_template("hotel_result.html", contents=results, dest=dest)
except:
if request.method == "POST":
data = request.form["content"].strip("']['").split("', '")
return profile(type="Hotel", content=data)
else:
if "loggedin" in session:
return render_template("hotel.html", loggedin=True)
else:
return render_template("hotel.html")
@app.route("/restaurante/", methods=["POST", "GET"])
def restaurante():
try:
if request.method == "POST" and request.form["dest"]:
# Parametros
dest = request.form["dest"] # tem que eliminar espaço do comeco e fim dos parametros
pr_min = request.form["p_min"]
pr_max = request.form["p_max"]
querylist = [dest, "Restaurantes"]
# query para API
apicaller.set_localid(querylist)
apijson = apicaller.queryrestaurant(pr_min + '%2C' + pr_max)
results = [] # each restaurante will be a list inside this list
for i in range(0, len(apijson['data'])):
results_i = [] # name, photo, rating, description, phone, website, address
for k, v in apijson['data'][i].items():
if k == 'name':
results_i.append(v)
elif k == 'photo':
results_i.append(v['images']['small']['url'])
elif k == 'rating':
results_i.append(v)
elif k == 'description':
results_i.append(v)
elif k == 'phone':
results_i.append(v)
elif k == 'website':
results_i.append(v)
elif k == 'address':
results_i.append(v)
results.append(results_i)
if "loggedin" in session:
return render_template("restaurante_result.html", contents=results, dest=dest, loggedin=True)
else:
return render_template("restaurante_result.html", contents=results, dest=dest)
except:
if request.method == "POST":
data = request.form["content"].strip("']['").split("', '")
#jsondata = "{'name':'"+data[0]+"', 'photo':'"+data[1]+"', 'phone':'"+data[4]+"', 'website':'"+data[5]+"', 'address':'"+data[6]+"'}"
#print(request.form["content"])
return profile(type="Restaurante", content=data)
else:
if "loggedin" in session:
return render_template("restaurante.html", loggedin=True)
else:
return render_template("restaurante.html")
@app.route("/atracao/", methods=["POST", "GET"])
def atracao():
try:
if request.method == "POST" and request.form["dest"]:
dest = request.form["dest"] # tem que eliminar espaço do comeco e fim dos parametros
rate = request.form["rate"]
querylist = [dest, "Atracoes"]
apicaller.set_localid(querylist)
apijson = apicaller.queryatractions(rate)
results = [] # each restaurante will be a list inside this list
for i in range(0, len(apijson['data'])):
results_i = [] # name, photo, rating, desciption, address
for k, v in apijson['data'][i].items():
if k == 'name':
results_i.append(v)
elif k == 'photo':
results_i.append(v['images']['small']['url'])
elif k == 'rating':
results_i.append(v)
elif k == 'description':
results_i.append(v)
elif k == 'address':
results_i.append(v)
results.append(results_i)
if "loggedin" in session:
return render_template("atracao_result.html", contents=results, dest=dest, loggedin=True)
else:
return render_template("atracao_result.html", contents=results, dest=dest)
except:
if request.method == "POST":
data = request.form["content"].strip("']['").split("', '")
return profile(type="Atração", content=data)
else:
if "loggedin" in session:
return render_template("atracao.html", loggedin=True)
else:
return render_template("atracao.html")
if __name__ == "__main__":
app.run(debug=True)
|
UTF-8
|
Python
| false
| false
| 10,288
|
py
| 6
|
server.py
| 4
| 0.551342
| 0.549592
| 0
| 255
| 39.333333
| 144
|
cloudshare/cloudshare-py-sdk
| 2,147,483,685,990
|
f882612a42051d160e2bdb16f405c146e3a50085
|
9090e39ba483a8d07b1546afe34e09d7e98c065e
|
/cloudshare/__init__.py
|
96ae6f100fb39055509927f78b8e6760d61dfabd
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/cloudshare/cloudshare-py-sdk
|
6589261a17cf5c1ff6cf9e66aa0020605876af5c
|
43f66143b80c84fab12271002bfb7cfd11e9a812
|
refs/heads/master
| 2023-05-24T22:25:42.595665
| 2023-01-08T09:24:31
| 2023-01-08T09:24:31
| 77,533,868
| 3
| 1
|
NOASSERTION
| false
| 2023-05-22T20:46:48
| 2016-12-28T13:13:01
| 2022-09-16T21:53:23
| 2023-05-22T20:46:44
| 771
| 3
| 1
| 1
|
Python
| false
| false
|
# Copyright 2015 CloudShare Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def req(hostname, method, apiId, apiKey, path="", queryParams=None, content=None):
return _get_requester().request(hostname=hostname,
method=method,
apiId=apiId,
apiKey=apiKey,
path=path,
queryParams=queryParams,
content=content)
def _get_requester():
from .ioc import get_requester
return get_requester()
|
UTF-8
|
Python
| false
| false
| 1,142
|
py
| 20
|
__init__.py
| 16
| 0.595447
| 0.588441
| 0
| 28
| 38.785714
| 82
|
vinee2626/myMadlib_first
| 4,681,514,378,646
|
0cedd62dacb9e943b6dd266485abe6bf1eb26fa7
|
c7448026a0f4e72126855dc5465f9412127a5ae6
|
/Guess.py
|
a196b0b0d3a2b71e85010b76f325b466daef3dcd
|
[] |
no_license
|
https://github.com/vinee2626/myMadlib_first
|
b48b41e0c66965a50ec19536db6244b424cdad4d
|
0cb5dceae2dc9cb02589177f90fb0dbde8910075
|
refs/heads/main
| 2023-04-22T22:39:37.998368
| 2021-05-15T11:52:06
| 2021-05-15T11:52:06
| 365,505,866
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
# create a var to store the random no.
x = random.randint(1, 10)
# set t =5 for 5 trials
t = 5
# inititate a while loop until x =0 and a and y are not equal
while(t > 0):
y = int(input("Guess a no.: "))
t -= 1
if (y == x):
print("YOU WON")
break
else:
print("YOU LOOSE")
if (x > y):
print("TOO LOW")
elif (x < y):
print("TOO HIGH")
# create a var y to store the input from user
# decrement the sentry by 1 foreah input
# if they are equal then print and if not- print
# if the input is too high or too low then tell the user about it to improve their guess
|
UTF-8
|
Python
| false
| false
| 661
|
py
| 8
|
Guess.py
| 8
| 0.582451
| 0.567322
| 0
| 24
| 25.541667
| 88
|
Fizcus/ebmeta
| 1,717,986,933,794
|
074a98c7c2adf78cdd741496055c2e1ca71ed718
|
c73f5e3bf225c1b0463abfc2487005ce3bc0fd56
|
/ebmeta/actions/edit.py
|
38d79adfda0b3ad0e050c50ad0403f2811b17828
|
[
"BSD-3-Clause",
"ISC"
] |
permissive
|
https://github.com/Fizcus/ebmeta
|
8212bc96450a834a0acf4941b8ddfde3adffce3e
|
2279ddd14235ea31b27f0eaa7e9bb26cb43d4133
|
refs/heads/master
| 2021-01-15T21:09:40.972710
| 2012-03-13T06:50:07
| 2012-03-13T06:50:07
| 7,752,675
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Edit metadata using zenity."""
from BeautifulSoup import BeautifulStoneSoup, Tag
import logging
import yaml
from zipfile import ZipFile
import ebmeta
from ebmeta import shell
from ebmeta import template
from ebmeta.actions import backup
from ebmeta.ebook import ebook_factory
from ebmeta.yamlwriter import opf_to_yaml
from ebmeta.zenity import edit_string, ZenityCancelled
log = logging.getLogger('display')
def run(new_yaml_text=None):
"""Run this action."""
path = ebmeta.arguments.filename
ebook = ebook_factory(path)
opf = ebook.opf
template_str = template.get_file_content("{}.yaml".format(ebook.type))
yaml_text = opf_to_yaml(opf, template_str)
if new_yaml_text:
result = new_yaml_text
else:
try:
result = edit_string(yaml_text, "Edit Ebook Metadata")
except ZenityCancelled:
log.debug("Operation was cancelled.")
return
if result.strip() == yaml_text.strip():
log.debug("No change was made.")
elif result:
log.debug("Writing changes to ebook file.")
d1 = yaml.load(yaml_text)
d2 = yaml.load(result)
if (ebook.type == 'epub') and (not d2.get('uuid')):
# ensure the new metadata has a uuid
d2['uuid'] = ebmeta.new_id()
changes = dict()
for key in d2.keys():
if key == 'description':
if (d1[key] or "").strip() != (d2[key] or "").strip(): changes[key] = d2[key]
if key == 'authors':
if d1[key] != d2[key]:
changes[key] = d2[key]
if d2.has_key('author sort'): changes['author sort'] = d2['author sort']
if key == 'title':
if d1[key] != d2[key]:
changes[key] = d2[key]
if d2.has_key('title sort'): changes['title sort'] = d2['title sort']
else:
if d1[key] != d2[key]: changes[key] = d2[key]
log.debug("The following keys changed: %s", ' '.join(changes.keys()))
backup.run() # backup only if backup doesn't exist
if ebook.type == 'pdf':
write_changes_pdf(ebook, changes)
else:
write_changes(ebook, changes)
def write_changes(ebook, changes):
"""Write the metadata in the given dictionary into the ebook file."""
path = ebmeta.arguments.filename
for key in changes.keys():
if changes[key] == None: changes[key] = ""
args = [
u"ebook-meta",
u'"{}"'.format(path)
]
for a, b in (
(u'authors', 'authors'),
(u'book-producer', 'book producer'),
(u'isbn', 'isbn'),
(u'language', 'language'),
(u'date', 'publication date'),
(u'publisher', 'publisher'),
(u'series', 'series'),
(u'title', 'title')
):
if changes.has_key(b): args.append(u"--{}=\"{}\"".format(a, quote(changes[b])))
for a, b in (
('rating', 'rating'), # rating can't be unset once it's set, from ebook-meta CLI
('index', 'series index'), # series index can't be unset either
('author-sort', 'author sort'),
('title-sort', 'title sort')
):
if changes.has_key(b):
if changes[b]:
args.append(u"--{}=\"{}\"".format(a, quote(changes[b])))
if changes.has_key('description'):
description = shell.pipe(["pandoc"], changes['description'])
args.append( u"--comments=\"{}\"".format(quote(description)) )
if changes.has_key('tags'):
args.append( u"--tags=\"{}\"".format(quote(u','.join(changes['tags']))) )
if len(args) > 2:
# Run ebook-meta
# shell.run(" ".join(args), shell=True)
shell.pipe(u" ".join(args), shell=True)
if ebook.type == 'epub':
# set uuid only for Epub files
if(changes.has_key('uuid')):
try:
setUuid(changes['uuid'])
except:
pass
def setUuid(uuid_txt):
"""Write a new uuid to the Epub file."""
path = ebmeta.arguments.filename
metadata = None
metapath = "content.opf"
with ZipFile(path, 'r') as zip:
try:
metadata = zip.read(metapath)
except KeyError:
metapath = "OEBPS/content.opf"
metadata = zip.read(metapath)
log.debug("new uuid: %s", uuid_txt)
soup = BeautifulStoneSoup(metadata)
id = (
soup.find('dc:identifier', attrs={'opf:scheme':'uuid'}) or
soup.find('dc:identifier', attrs={'opf:scheme':'UUID'}) or
soup.find('dc:identifier', attrs={'scheme':'uuid'}) or
soup.find('dc:identifier', attrs={'scheme':'UUID'})
)
if not id:
m = soup.find('metadata')
id = Tag(soup, 'dc:identifier')
id['id'] = 'uuid_id'
id['opf:scheme'] = 'uuid'
id.insert(0, "--")
m.insert(0, id)
id.contents[0].replaceWith(unicode(uuid_txt))
#[node.extract() for node in id.findAll()] # remove contents
#id.insert(0, uuid) # insert uuid
shell.run(["zip", "-d", path, metapath])
with ZipFile(path, 'a') as zip:
zip.writestr(metapath, str(soup))
def quote(text):
"""Change " to \\"."""
try:
return text.replace('"', '\\"')
except TypeError:
return text
def write_changes_pdf(ebook, changes):
"""Write the metadata in the given dictionary into the pdf file."""
path = ebmeta.arguments.filename
for key in changes.keys():
if changes[key] == None: changes[key] = ""
args = [
u"exiftool",
u'"{}"'.format(path)
]
for a, b in (
(u'Author', 'authors'),
(u'Title', 'title')
):
if changes.has_key(b): args.append(u"-{}=\"{}\"".format(a, quote(changes[b])))
if len(args) > 2:
# Run ebook-meta
# shell.run(" ".join(args), shell=True)
shell.pipe(u" ".join(args), shell=True)
|
UTF-8
|
Python
| false
| false
| 6,001
|
py
| 24
|
edit.py
| 18
| 0.542243
| 0.537744
| 0
| 189
| 30.751323
| 95
|
YariKartoshe4ka/Olympic
| 1,967,095,031,421
|
e036b32b1c73a6adaf5e56a845d8ccf7bf86be49
|
5b8ed38b9f840fc3ea2ffa86c1c0519a7ff5e665
|
/Python/[0001] - [0050]/[0012] Дачники.py
|
e1bcf9f3022d54ce2b2d72b26b8b554f3c57c5e0
|
[] |
no_license
|
https://github.com/YariKartoshe4ka/Olympic
|
53f49f79fb124f34ea2cd8bd1f5449ef9aad4e97
|
ea70087286b16622010e997461c02a66b2de754c
|
refs/heads/master
| 2021-01-05T05:48:55.254665
| 2020-09-04T19:16:56
| 2020-09-04T19:16:56
| 240,903,298
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
n = int(input())
d = []
rez = 0
for i in range(n):
d.append(input().split(' '))
for i in range(len(d)):
for j in range(len(d[i])):
d[i][j] = int(d[i][j])
def product(Px, Py, Ax, Ay, Bx, By):
return (Bx - Ax) * (Py - Ay) - (By - Ay) * (Px - Ax)
for i in d:
if i[2] == i[3] == i[4] == i[5] == i[6] == i[7] == i[8] == i[9]:
if i[2] == i[0] == i[1]:
rez += 1
continue
p1 = product(i[0], i[1], i[2], i[3], i[4], i[5])
p2 = product(i[0], i[1], i[4], i[5], i[6], i[7])
p3 = product(i[0], i[1], i[6], i[7], i[8], i[9])
p4 = product(i[0], i[1], i[8], i[9], i[2], i[3])
if (p1 < 0 and p2 < 0 and p3 < 0 and p4 < 0) or (p1 > 0 and p2 > 0 and p3 > 0 and p4 > 0):
rez += 1
print(rez)
|
UTF-8
|
Python
| false
| false
| 762
|
py
| 55
|
[0012] Дачники.py
| 53
| 0.412073
| 0.335958
| 0
| 29
| 25.275862
| 94
|
jonathantuck/ProjectEuler
| 4,836,133,188,693
|
92c0a0890217da6d8265a692ba03319166f2a969
|
327cad6d50fc1d7d9e7952aba5d224a83e6bf5cd
|
/pe10.py
|
1397b6c22aa50c2547e8459bc13a2ea155541ba4
|
[] |
no_license
|
https://github.com/jonathantuck/ProjectEuler
|
443356749977a5087482396d79af6a029ac28a17
|
14a06ae9db719e4f35c6fd22cf24e33bb59d0bd9
|
refs/heads/master
| 2020-12-24T14:35:45.977315
| 2013-07-16T21:15:17
| 2013-07-16T21:15:23
| 11,404,115
| 0
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import math
import time
def isPrime(x):
for i in range(2,int(math.sqrt(x))+1):
if x%i == 0:
return False
return True
start = time.time()
sum = 0
bound = 2 * (10**6)
for i in range(2,bound):
print i
if isPrime(i):
sum += i
print sum
print time.time()-start
|
UTF-8
|
Python
| false
| false
| 305
|
py
| 28
|
pe10.py
| 27
| 0.560656
| 0.531148
| 0
| 21
| 13.52381
| 42
|
chandanmaruthi/HTMServer_NeuralNetwork
| 16,071,767,636,883
|
a99e888603757ace355e0e358f83b14dc1dad0c7
|
2908e616e6235a0d0370591ae65930b73f40751c
|
/trainModelWithFile.py
|
3465dfe2ac5c2179c55195b3f479fbd4170cdb63
|
[] |
no_license
|
https://github.com/chandanmaruthi/HTMServer_NeuralNetwork
|
6cb4df9350591a71a4c9b83149f4fcc3cd378e02
|
a3c6e5478c006c0ef1b0a731d93a08749a8ea587
|
refs/heads/master
| 2020-12-25T06:03:28.479806
| 2016-07-24T04:46:10
| 2016-07-24T04:46:10
| 64,048,953
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from walnutclient import *
#objWalnutClient= walnutclient(5001,'localhost',selProject.ModelMap.RawXML)
strDataFolder = "/home/chandan/chandan/code/brainscience/"
strRootFolder = "/home/chandan/chandan/code/brainscience/"
strAppFolder = "curious/"
strModelFolder = 'curiousWorkbench/test/models/'
strModelFile = 'model1.txt'
strDataFile ='Test_003_1_WordList.csv'
inputModelFile = strRootFolder + strAppFolder + strModelFolder + strModelFile
strInputDataFile = strDataFolder + strDataFile
fo = open(inputModelFile,'r')
strRawModelXML = fo.read()
objwalnutclient = walnutclient(5001, 'localhost',strRawModelXML )
runValue = objwalnutclient.runNetworkWithFile(strInputDataFile, 'CSV',True,False,False,True)
print runValue['returnValue']
|
UTF-8
|
Python
| false
| false
| 740
|
py
| 4
|
trainModelWithFile.py
| 3
| 0.8
| 0.782432
| 0
| 21
| 34.238095
| 92
|
jackiehope/PythonBasis
| 8,057,358,680,820
|
200183a5acc5191a9bfacefcace2b4782fe13c42
|
74c4cadc040c466857b82101b5ce76c0f05c65e8
|
/hexadecimal_conversion.py
|
3e7eff51fc81ec41b484448ecae2f7f99ef9f2ba
|
[] |
no_license
|
https://github.com/jackiehope/PythonBasis
|
9c941d63b46ae3f23151c7c323a4df4759b9824e
|
2bfae0d6fe17c1c84a90a07d13d0b91dcfbfadb6
|
refs/heads/master
| 2021-01-20T18:52:39.439164
| 2016-08-04T11:34:22
| 2016-08-04T11:34:22
| 61,803,582
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding:utf-8
#十进制到十六进制
num1 = int(raw_input("输入一个十进制数:"))
print hex(num1)
#十六进制到十进制
num2 = raw_input("输入一个十六进制数:")
print int(num2, 16)
#十进制到字符串
num3 = int(raw_input("输入一个数"))
print repr(str(num3))
|
UTF-8
|
Python
| false
| false
| 292
|
py
| 6
|
hexadecimal_conversion.py
| 6
| 0.695
| 0.65
| 0
| 13
| 14.461538
| 34
|
kunweiTAN/techgym_ai
| 566,935,703,638
|
bfc3b7cb205460057f93c0fcc69b088775134d2f
|
90f545733f076747bad979faa3a8cf23867f7a3a
|
/i3KI.py
|
ba7f474993adb3e8dc24a836e6a5998f3d21e0e7
|
[] |
no_license
|
https://github.com/kunweiTAN/techgym_ai
|
f85dc52ce6e75f4c08213d5796171908beb9a69e
|
051274bcc789a563c46ed5661301535e76ae1e18
|
refs/heads/master
| 2023-08-17T05:15:18.758183
| 2021-09-21T11:57:07
| 2021-09-21T11:57:07
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#Techgym-6-1-Q
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
#桁の精度を指定
%precision 3
#表示
df = pd.read_csv('./scores400.csv')
display(df.head())
#先頭10個のarray
scores =
print("先頭10個")
#復元抽出:何度か実行してみる
num = 5
num_array = [1, 2, 3, 4, 5]
print("復元抽出")
#非復元抽出:何度か実行してみる
print("非復元抽出")
#毎回結果が変わるのを防ぐ
#点数を20個抽出する
num = 20
sample =
print("標本平均")
#母平均
print("母平均")
#10回抽出してみる
n = 10
|
UTF-8
|
Python
| false
| false
| 651
|
py
| 517
|
i3KI.py
| 412
| 0.626866
| 0.575693
| 0
| 39
| 9.923077
| 35
|
PattMayne/weird_canada
| 15,058,155,391,275
|
9579234f651f0b8c3ab7c029614b7a95379f7391
|
91e5b3ec707a9e26f24a928ea6bbd2bcb4400555
|
/blog/forms.py
|
fcc8394c5295c43e9657e1e1e5281a2f573085e5
|
[] |
no_license
|
https://github.com/PattMayne/weird_canada
|
473ba1bbcb0372a765d12fe3d319d66c6bee47b8
|
646c58596fc30d95907a74f23b3ea1c02d16a226
|
refs/heads/master
| 2021-01-10T16:03:01.060024
| 2015-12-06T19:34:10
| 2015-12-06T19:34:10
| 47,510,209
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import datetime
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.forms import ModelForm, TextInput, Select, Textarea, FileInput, NumberInput, CheckboxInput, DateField, DateInput, DateTimeInput, SelectMultiple
from indie_db.models import URL, Artist, Work, Contributor, ProductionCompany
from blog.models import Article, Tag, Author
class AddAuthorForm(ModelForm):
class Meta:
model = Author
fields = ('authorname', 'tagline', 'description_en', 'description_fr', 'website')
widgets = {
'authorname': TextInput(attrs={'placeholder': 'Enter Name', 'required': True}),
'tagline': TextInput(attrs={'placeholder': 'From the SOMETHING of...', 'required': True}),
'description_en': Textarea(attrs={'required': True, 'placeholder': 'Enter Description (English)'}),
'description_en': Textarea(attrs={'required': True, 'placeholder': 'Entrez Description (Français)'}),
'website': TextInput(attrs={'required': False, 'placeholder': 'Enter Full URL'})
}
def save(self, commit=True):
author = super(AddAuthorForm, self).save(commit=True)
author.authorname = self.cleaned_data['authorname']
author.tagline = self.cleaned_data['tagline']
author.description_en = self.cleaned_data['description_en']
author.description_fr = self.cleaned_data['description_fr']
author.website = self.cleaned_data['website']
author.save()
return author
class EditAuthorForm(ModelForm):
class Meta:
model = Author
fields = ('authorname', 'tagline', 'description_en', 'description_fr', 'website')
widgets = {
'authorname': TextInput(attrs={'placeholder': 'Enter Name', 'required': True}),
'tagline': TextInput(attrs={'placeholder': 'From the SOMETHING of...', 'required': True}),
'description_en': Textarea(attrs={'required': True, 'placeholder': 'Enter Description (English)'}),
'description_en': Textarea(attrs={'required': True, 'placeholder': 'Entrez Description (Français)'}),
'website': TextInput(attrs={'required': False, 'placeholder': 'Enter Full URL'})
}
def save(self, commit=True):
author = super(EditAuthorForm, self).save(commit=True)
author.authorname = self.cleaned_data['authorname']
author.tagline = self.cleaned_data['tagline']
author.description_en = self.cleaned_data['description_en']
author.description_fr = self.cleaned_data['description_fr']
author.website = self.cleaned_data['website']
author.save()
return author
class AddArticleForm(ModelForm):
class Meta:
model = Article
fields = ('date_created', 'title', 'body_en', 'body_fr', 'article_category', 'how_category', 'publish', 'epoch')
widgets = {
'title': TextInput(attrs={'placeholder': 'Enter Title', 'required': True}),
'date_created': DateTimeInput(attrs={'required': True}),
'epoch': Select(attrs={'required': True}),
'article_category': Select(attrs={'required': True}),
'how_category': SelectMultiple(attrs={'required': False}),
'body_en': Textarea(attrs={'placeholder': 'Write English Article', 'required': False}),
'publish': CheckboxInput(attrs={'required': False}),
'body_fr': Textarea(attrs={'placeholder': 'Donnez votre article Française', 'required': False})
}
labels = {
'date_created': _('Orignal Publication Date'),
'title': _('Title'),
'body_en': _('English Text'),
'publish': _('Publish Now?'),
'body_fr': _('Article Français'),
'article_category': _('Choose Main Category'),
'how_category': _('Choose How Category'),
'epoch': _('Epoch ("When" Category)'),
}
def save(self, commit=True):
article = super(AddArticleForm, self).save(commit=True)
article.title = self.cleaned_data['title']
chosen_creation_date = self.cleaned_data['date_created']
now = datetime.datetime.now()
hour = now.hour
minute = now.minute
chosen_creation_date = chosen_creation_date.replace(hour=hour, minute=minute)
article.date_created = chosen_creation_date
article.date_modified = datetime.datetime.now()
article.body_en = self.cleaned_data['body_en']
article.body_fr = self.cleaned_data['body_fr']
article.publish = self.cleaned_data['publish']
article.article_category = self.cleaned_data['article_category']
article.how_category = self.cleaned_data['how_category']
article.epoch = self.cleaned_data['epoch']
article.save()
return article
class UpdateProfileForm(forms.ModelForm):
username = forms.CharField(required=False)
email = forms.EmailField(required=False)
first_name = forms.CharField(required=False)
last_name = forms.CharField(required=False)
class Meta:
model = User
fields = ('username', 'email', 'first_name', 'last_name')
def clean_email(self):
username = self.cleaned_data.get('username')
email = self.cleaned_data.get('email')
if email and User.objects.filter(email=email).exclude(username=username).count():
raise forms.ValidationError('This email address is already in use. Please supply a different email address.')
return email
def save(self, commit=True):
user = super(UpdateProfileForm, self).save(commit=False)
user.email = self.cleaned_data['email']
if commit:
user.save()
return user
|
UTF-8
|
Python
| false
| false
| 5,854
|
py
| 51
|
forms.py
| 12
| 0.624957
| 0.624957
| 0
| 132
| 43.318182
| 155
|
GRSEB9S/deepSVDD
| 13,228,499,289,827
|
b5139d4b2f139e39d21a82108bccf9b65788e3b8
|
dadbed9984cdc9d9a1e84a3c9929ac4c9a58c370
|
/src/test_OCSVM.py
|
64363077556e8af89b885c402e4567eb2427ca0e
|
[
"MIT"
] |
permissive
|
https://github.com/GRSEB9S/deepSVDD
|
74efac2d3c997aff07c85d30587883ef55fd1030
|
caf44c93914414ca26525fec69b780e920b9d061
|
refs/heads/master
| 2021-07-08T21:25:50.738316
| 2017-10-06T08:30:49
| 2017-10-06T08:30:49
| 109,243,104
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from svm import SVM
from config import Configuration as Cfg
from utils.log import log_exp_config, log_SVM, log_AD_results
from utils.visualization.images_plot import plot_outliers, plot_normals
# dataset
dataset = "mnist" # "mnist", "toyseq", "normal"
Cfg.seed = 0
Cfg.out_frac = 0
Cfg.ad_experiment = True
# "mnist" parameters (-1 stands for all the rest)
Cfg.mnist_normal = 0
Cfg.mnist_outlier = -1
# "cifar10" parameters (-1 stands for all the rest)
Cfg.cifar10_normalize_mode = "fixed value" # "per channel", "per pixel"
Cfg.cifar10_normal = 1
Cfg.cifar10_outlier = -1
# SVM parameters
loss = "OneClassSVM"
kernel = "rbf"
# mnist: gamma = (1.0 / 784)
gamma = (1.0 / 784) * (10 ** 0) # if 'auto', then 1/n is taken
verbose = True
Cfg.svm_nu = 0.01
# Plot parameters
Cfg.xp_path = "../log/ocsvm/" + dataset
# initialize OC-SVM
ocsvm = SVM(loss=loss, dataset=dataset, kernel=kernel, gamma=gamma)
# train or load OC-SVM model
ocsvm.train() # train model
# ocsvm.load_model(filename=Cfg.xp_path + "/model.p") # load model
# predict scores
ocsvm.predict(which_set='train')
ocsvm.predict(which_set='val') # validate model on test set
# plot targets and outliers sorted
n_img = 32
plot_outliers(ocsvm, n_img, Cfg.xp_path)
plot_normals(ocsvm, n_img, Cfg.xp_path)
# pickle/serialize
ocsvm.dump_model(filename=Cfg.xp_path + "/model.p")
ocsvm.log_results(filename=Cfg.xp_path + "/AD_results.p")
# log
log_exp_config(Cfg.xp_path, dataset)
log_SVM(Cfg.xp_path, loss, kernel, gamma, Cfg.svm_nu)
log_AD_results(Cfg.xp_path, ocsvm)
|
UTF-8
|
Python
| false
| false
| 1,537
|
py
| 58
|
test_OCSVM.py
| 56
| 0.705921
| 0.683149
| 0
| 56
| 26.446429
| 72
|
rubenfonseca/titanium_mobile
| 7,825,430,439,872
|
8353cd7d4a201a93e78b298c41cadfbd6d198308
|
a791802f1b8aeb9df4acd45b3e77d8c2ce7079d4
|
/support/android/jspacker.py
|
84215fbcc92adf74f30c1c4045ba2eed61042a84
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/rubenfonseca/titanium_mobile
|
ae762da2516cf05f64321b3578f7332e71cb256b
|
aabda44482a5ce597dcdcd45a8326e4eea14e8e4
|
refs/heads/master
| 2020-12-25T00:03:58.863404
| 2012-02-22T03:00:52
| 2012-02-22T03:00:52
| 2,652,535
| 2
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import sys, string, os
from base64 import b64encode
JAVA_TEMPLATE = """\
package ${package_name};
import java.util.Collections;
import java.util.Map;
import java.util.HashMap;
import android.util.Base64;
import org.appcelerator.kroll.util.KrollAssetHelper;
public class AssetCryptImpl implements KrollAssetHelper.AssetCrypt
{
private static Map<String, String> assets = initAssets();
private static Map<String, String> initAssets()
{
Map<String, String> assets = new HashMap<String, String>();
${init_assets}
return Collections.unmodifiableMap(assets);
}
public String readAsset(String path)
{
String data = assets.get(path);
if (data == null) {
return null;
}
return new String(Base64.decode(data, Base64.DEFAULT));
}
}
"""
def to_java_string(s):
return '"%s"' % s
def to_java_map(map_var, keys_values):
"""Generate code to put a list of key-value pairs into a Java Map instance.
map_var - The variable name of the Java Map instance.
keys_values - A list of 2-tuples containing a key and value pair.
"""
result = []
for k, v in keys_values:
result.append('%s.put(%s, %s);' % (map_var, k, v))
return '\n'.join(result)
def read_file(filename):
file = open(filename, "rt")
try:
lines = file.read()
finally:
file.close()
return lines
class Crypt(object):
"""Helps generate source for an AssetCrypt implementation."""
KEYS_MAP_VAR = 'keys'
def __init__(self):
self.keys = []
self.assets = []
def add_asset(self, key, data):
# Convert Window paths to Unix style.
self.keys.append(key.replace('\\', '/'))
self.assets.append(data)
def generate_code(self, package, target_file):
"""Generate the Java class source and write to target file.
package - The Java package name for this class.
taget_file - Path to output generate Java source file.
"""
package_dir = os.path.join(*package.split('.'))
target_dir = os.path.join(target_file, package_dir)
try:
os.makedirs(target_dir)
except OSError, e:
pass
output = open(os.path.join(target_dir, 'AssetCryptImpl.java'), 'w')
asset_map = []
for index, key in enumerate(self.keys):
data = b64encode(self.assets[index])
asset_map.append((to_java_string(key), to_java_string(data)))
output.write(string.Template(JAVA_TEMPLATE).substitute(
package_name = package,
init_assets = to_java_map('assets', asset_map)
))
output.close()
"""
Package a set of JavaScript assets into a generated AssetCrypt Java class.
asset_dir - absolute path to assets folder that contains the sources.
sources - list of paths for each JavaScript asset.
package - The Java package name for the generated class.
target - path to where the java class will be written.
"""
def pack(asset_dir, sources, package, target):
asset_dir_len = len(asset_dir)
def rel_asset_path(path):
return path[asset_dir_len+1:]
crypt = Crypt()
# Gather sources together so we can form a crypt to store them.
for source in sources:
filename = str(source)
lines = read_file(filename)
crypt.add_asset(rel_asset_path(filename), lines)
# Generate Java code and output to target file.
crypt.generate_code(package, str(target))
|
UTF-8
|
Python
| false
| false
| 3,287
|
py
| 304
|
jspacker.py
| 169
| 0.677213
| 0.672954
| 0
| 124
| 25.5
| 77
|
thiagocosta-dev/Atividades-Python-CursoEmVideo
| 18,992,345,413,288
|
a835321a24c42be0d85c03a5d02b42ac85e16c82
|
d681e088b554a8697d5d3231173a9c800760780f
|
/ex043.py
|
255c83418724f2bc088f9d6715df537aa5fe59f3
|
[] |
no_license
|
https://github.com/thiagocosta-dev/Atividades-Python-CursoEmVideo
|
0870ef5b0d97dd8d9bc4fe30def66bb8b05c2abf
|
4dfeab066ecbe8eb5789965d878db58487e3fdbe
|
refs/heads/main
| 2023-04-15T00:00:15.225495
| 2021-04-10T00:59:07
| 2021-04-10T00:59:07
| 353,154,646
| 0
| 0
| null | false
| 2021-03-31T13:34:01
| 2021-03-30T22:11:57
| 2021-03-30T22:41:23
| 2021-03-31T13:34:01
| 4
| 1
| 0
| 1
|
Python
| false
| false
|
'''
DESAFIO 043
Desenvolva uma lógica que leia o peso e a altura de uma pessoa,
calcule seu IMC e mostre seu status, de acordo com tabela abaixo.
- Abaixo de 18.5: abaixo do peso - 30 até 40: obesidade
- Entre 18.5 e 25: peso ideal - acima de 40: obersidade mórbida
- 25 até 30: sobrepeso
'''
altura = float(input('Altura: '))
peso = float(input('Peso: '))
imc = peso / (altura ** 2)
print(f'Seu IMC é de {imc:.1f}. Portanto você está:',end=' ')
if imc < 18.5:
print('Abaixo do peso.')
elif imc >18.5 and imc <= 25:
print('Peso Ideal.')
elif imc > 25 and imc <= 30:
print('Sobrepeso.')
elif imc > 30 and imc <= 40:
print('Obesidade.')
else:
print('Obesidade Morbida.')
|
UTF-8
|
Python
| false
| false
| 709
|
py
| 41
|
ex043.py
| 40
| 0.641026
| 0.58547
| 0
| 24
| 28.166667
| 69
|
liruijia/learn
| 10,187,662,465,541
|
4f14fb85d33061db07b91870389b76fc95dc9734
|
e15501f5e90e1abbff926593950ade627244ff37
|
/SZ_CODE/hot/顺时针打印矩阵.py
|
73825d598f42bea971dbad2455990a7b6bb03685
|
[] |
no_license
|
https://github.com/liruijia/learn
|
e93edd771b97d2ae2f2f90dfae39755138691ba7
|
666fc85fef2b75e77e1d83397c8b3cb70481ae1b
|
refs/heads/master
| 2022-05-29T05:45:25.093067
| 2022-05-13T09:29:32
| 2022-05-13T09:29:32
| 207,337,291
| 0
| 0
| null | false
| 2019-11-10T07:31:55
| 2019-09-09T15:06:28
| 2019-11-10T07:29:37
| 2019-11-10T07:31:34
| 30
| 0
| 0
| 1
|
Python
| false
| false
|
#!/usr/bin/env python3
# -*- coding = utf-8 -*-
'''
@Author : RuiJia Li
@Time : 2022/4/9 12:25
@File : 顺时针打印矩阵
@Desc :
'''
"""
按逆时针去不断pop-matrix的元素
"""
def printMatrix(matrix):
res = []
while matrix :
res += matrix.pop(0)
if matrix and matrix[0]:
for row in matrix:
res.append(row.pop())
if matrix :
res += matrix.pop()[::-1]
if matrix and matrix[0]:
for row in matrix[::-1]:
res.append(row.pop(0))
return res
if __name__ == '__main__':
matrix = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
res = printMatrix(matrix)
print(res)
# [1,2,3,4,8,12,11,10,9,5,6,7]
|
UTF-8
|
Python
| false
| false
| 729
|
py
| 123
|
顺时针打印矩阵.py
| 110
| 0.482014
| 0.41295
| 0
| 32
| 20.75
| 47
|
Soyuzbek/Ume
| 395,137,002,032
|
40bd7952e75a3748af6c2180b572a79c2965f268
|
5d87f034edcd71f41a4fdbeff08acd1593dd4870
|
/main/models.py
|
5125e8fe83a60572f013098dd10cffb3752931bb
|
[] |
no_license
|
https://github.com/Soyuzbek/Ume
|
1b88b41343532a4134d81b8338784ff952a4f6b9
|
8d6ea1dda52a277d3de891386764377aa059139e
|
refs/heads/master
| 2022-12-10T02:00:40.937511
| 2021-06-08T23:27:12
| 2021-06-08T23:27:12
| 186,343,776
| 1
| 1
| null | false
| 2022-12-08T05:07:06
| 2019-05-13T04:12:11
| 2021-06-08T23:27:15
| 2022-12-08T05:07:06
| 23,231
| 0
| 1
| 4
|
JavaScript
| false
| false
|
from ckeditor.fields import RichTextField
from ckeditor_uploader.fields import RichTextUploadingField
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class Post(models.Model):
name = models.CharField(_('name'), max_length=255)
annotation = models.TextField(_('annotation'))
content = RichTextUploadingField(verbose_name=_('content'))
author = models.ForeignKey('users.User', on_delete=models.SET_NULL, null=True, verbose_name=_('author'))
date = models.DateTimeField(_('date'), auto_now=True)
image = models.ImageField(_('image'), upload_to='image', null=True)
class Meta:
verbose_name = _('Post')
verbose_name_plural = _('Posts')
def __str__(self):
return self.name
class Notification(models.Model):
name = models.TextField(_('name'))
content = RichTextUploadingField(verbose_name=_('content'))
date = models.DateTimeField(_('date'), auto_now=True)
expire = models.DateTimeField(_('date of expire'))
class Meta:
verbose_name = _('Notification')
verbose_name_plural = _('Notifications')
def __str__(self):
return self.name
class Lesson(models.Model):
id = models.CharField('ID', max_length=7, primary_key=True)
name = models.CharField(_('name'), max_length=255)
teacher = models.ForeignKey('users.Teacher', on_delete=models.SET_NULL, null=True, verbose_name=_('teacher'),
related_name='lessons')
STATE_CHOICES = (
('ZD', _('Necessary')),
('SD', _('Optional'))
)
state = models.CharField(_('type'), max_length=5, choices=STATE_CHOICES)
term = models.PositiveSmallIntegerField(_('term'), default=1)
topics = RichTextUploadingField(verbose_name=_('topics'), null=True, blank=True)
class Meta:
verbose_name = _('Lesson')
verbose_name_plural = _('Lessons')
def __str__(self):
return f'{self.id} - {self.name}'
def get_absolute_url(self):
return reverse('lesson', args=[str(self.id)])
class Wallpaper(models.Model):
name = models.CharField(_('name'), max_length=200)
image = models.ImageField(_('image'), upload_to='image')
class Meta:
verbose_name = _('Wallpaper')
verbose_name_plural = _('Wallpapers')
def __str__(self):
return self.name
class Assign(models.Model):
DAY_CHOICE = (
('Monday', _('Monday')),
('Tuesday', _('Tuesday')),
('Wednesday', _('Wednesday')),
('Thursday', _('Thursday')),
('Friday', _('Friday')),
)
TIME_SLOTS = (
('8:00 - 8:45', '8:00 - 8:45'),
('8:55 - 9:40', '8:55 - 9:40'),
('9:50 - 10:35', '9:50 - 10:35'),
('10:45 - 11:30', '10:45 - 11:30'),
('11:40 - 12:25', '11:40 - 12:25'),
('12:35 - 13:20', '12:35 - 13:20'),
('13:30 - 14:15', '13:30 - 14:15'),
('14:25 - 15:10', '14:25 - 15:10'),
('15:20 - 16:05', '15:20 - 16:05'),
('16:15 - 17:00', '16:15 - 17:00'),
('17:10 - 17:55', '17:10 - 17:55')
)
ROOM_CHOICES = (
('101', '101'),
('102', '102'),
('103', '103'),
('104', '104'),
('105', '105'),
('106', '106'),
('107', '107'),
('118', '118'),
('119', '119')
)
lesson = models.ForeignKey(Lesson, models.CASCADE, related_name='assign_set', verbose_name=_('lesson'))
room = models.CharField(_('room'), max_length=10, choices=ROOM_CHOICES)
day = models.CharField(_('day'), max_length=15, choices=DAY_CHOICE)
time_slots = models.CharField(_('time slot'), max_length=13, choices=TIME_SLOTS, default=TIME_SLOTS[0])
class Meta:
verbose_name = _('Assign')
verbose_name_plural = _('Assigns')
|
UTF-8
|
Python
| false
| false
| 3,872
|
py
| 54
|
models.py
| 38
| 0.572314
| 0.510589
| 0
| 114
| 32.964912
| 113
|
rubendegroote/CRISTALCLEAR
| 12,386,685,728,184
|
90aac66c7ab6ae7fa764b3e6b04a2541d34d911b
|
585be7cf89336bc0a0e30a6983b1085f5bf1a7e0
|
/Code/core/scanner.py
|
cc87cf6a5e659d02fc12c37cf8c4030786224744
|
[] |
no_license
|
https://github.com/rubendegroote/CRISTALCLEAR
|
951d76232604da2cab96b94ba98812394e97352c
|
3748694095753b33e42dce4704a542ef28abf549
|
refs/heads/master
| 2020-05-17T22:19:42.856068
| 2014-12-01T12:52:36
| 2014-12-01T12:52:36
| 25,816,808
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from daq.acquire import acquire, fastAcquire, clearcard, acquireRILIS, acquireCW
from daq.acquireDummy import acquireDummy, laserDummy
from multiprocessing import Process, Queue, Event, Value
import numpy as np
import os
import time
import subprocess
import threading
from PyQt4 import QtCore,QtGui
class Scanner(QtCore.QObject):
emitScanProgress = QtCore.Signal(int,str,dict)
captureDone = QtCore.Signal(object)#Emitted when a capture is done
def __init__(self,settings):
super(QtCore.QObject,self).__init__()
self.settings = settings
# array of values to iterat though
self.scanArray = []
# Progress through this array
self.pos = 0
self.currentValue = 0
# When in freerun: have the variables changed?
self.variablesChanged = False
# Acquistion mode (time, triggers, supercycle or proton pulse)
self.mode = 'Time'
# acquisition time for each wavelength when acquiring timed
self.timePerStep = 10
# Number of samples to collect
self.samplesPerStep = 10
# supercycles for each wavelength when acquiring per SS
self.SSPerStep = 10
self.totalCycles = 0
self.noOfCycles = 0
# proton pulse time for each wavelength when acquiring bunched
self.pPerStep = 10
self.startCycle = 0
self.totalPulses = 0
# True if the user can just tweak e.g. voltage or etalons, no
# iteration through the scanArray is running
self.freeScan = False
# True if the scan restarts every time
self.looping = False
# True if the scan zigzags /\, False if it scans /|
self.zigZag = False
# True if the scan is \ in a /\ scan
self.zag = True
# which variable the scanarray will chagne
self.scanVariable = 'volt'
# contains ifno the acquisition process uses to change the wavelength
# for every key there is a value
# This allows to scan by defining e.g. a wavelength, voltage or the etalons setpoints
self.scanVariables = {'wavelength': 0,
'volt': 0,
'thin': 0,
'thick': 0
}
self.continueScanning = True
self.initProcessCommunications()
def initProcessCommunications(self):
# Queues and events for the acquisition processes
self.newFreqEvent = Event()
self.controlEvent = Event()
self.newScanEvent = Event()
self.captureRunningEvent = Event()
self.recordingEvent = Event()
self.currentVolt = Value('d',0.0)
self.currentSamples = Value('i',0)
self.currentFreq = Value('d',0.0)
self.currentThick = Value('d',0.0)
self.currentThin = Value('d',0.0)
self.currentPower = Value('d',0.0)
self.currentLW = Value('d',0.0)
self.currentCycle = Value('i',0)
self.protonsPerCycle = Value('i',0)
self.protonsForHRS = Value('i',0)
self.protonPulse = Value('b',False)
self.iscool = Value('d',0.0)
self.dataQueue = Queue()
self.freqQueue = Queue()
self.errorQueue = Queue()
self.messageQueue = Queue()
self.dataStreamQueue = Queue()
def startProcesses(self):
if self.settings.debugMode:
targetF = acquireDummy
else:
targetF = acquire
self.daqProcess = Process(target=targetF,
args=(self.settings,self.dataQueue,self.controlEvent,
self.captureRunningEvent,self.recordingEvent,self.errorQueue,
self.dataStreamQueue,self.messageQueue,
self.currentVolt,self.currentSamples,
self.currentFreq,self.currentThick,
self.currentThin,self.currentPower,self.currentLW,
self.iscool))
if self.settings.laser == 'CW Laser Voltage Scan Without Wavemeter':
targetF = laserDummy
elif self.settings.laser == 'CW Laser Voltage Scan'\
or self.settings.laser == 'Matisse Manual Scan':
targetF = acquireCW
elif self.settings.laser == 'RILIS':
targetF = acquireRILIS
self.RILISProcess = Process(target=targetF,
args=(self.settings, self.freqQueue,self.controlEvent,
self.captureRunningEvent,self.recordingEvent,self.newFreqEvent,
self.errorQueue,self.messageQueue,self.currentVolt,
self.currentFreq,self.currentThick,self.currentThin,
self.currentPower,self.currentLW,self.currentCycle,
self.protonsPerCycle,self.protonsForHRS,self.protonPulse,
self.iscool))
self.daqProcess.start()
self.RILISProcess.start()
self.continueScanning = True
self.scanThread = threading.Timer(0, self.scan).start()
if not self.settings.laser == 'CW Laser Voltage Scan Without Wavemeter' :
relayPath = self.settings.path + '\\Code\\builds\\CRISValueRelay\\CRISValueRelay4\\CrisValueRelay'
self.relayProcess = subprocess.Popen(relayPath)
self.messageQueue.put((True,'Relay VI started.'))
def stopProcesses(self):
self.messageQueue.put((True,'Terminating processes...'))
self.continueScanning = False
try:
self.daqProcess.terminate()
self.RILISProcess.terminate()
if not self.settings.laser == 'CW Laser Voltage Scan Without Wavemeter' :
self.relayProcess.terminate()
except:
print "Could not stop capture processes"
def restartProcesses(self):
self.stopProcesses()
self.startProcesses()
def setScanMode(self, mode,time):
self.mode = mode
if self.mode == 'Time':
self.timePerStep = time
elif self.mode == 'Triggers':
self.samplesPerStep = time
elif self.mode == 'Supercycle':
self.SSPerStep = time
elif 'Proton Pulse':
self.pPerStep == time
def setVariable(self, variable):
self.scanVariable = variable
def toggleFreeScan(self):
self.freeScan = not self.freeScan
def toggleLooping(self):
self.looping = not self.looping
def toggleZigZag(self):
self.zigZag = not self.zigZag
def makeFreqArray(self, variable, points, steps):
self.pos = 0
self.scanArray = []
self.scanVariable = variable
start = points[0]
for i, point in enumerate(points[1:]):
newArray = np.linspace(start,point,steps[i])
start = point
self.scanArray = np.append(self.scanArray, newArray)
def resetScan(self):
self.pos = 0
self.currentValue = self.scanArray[self.pos]
self.emptyQueues()
def setCurrentValue(self, scanVariable, scanVariables):
self.scanVariable = scanVariable
self.scanVariables = scanVariables
self.currentValue = scanVariables[scanVariable]
self.variablesChanged = True
def scan(self):
if self.freeScan:
if self.variablesChanged:
self.controlEvent.clear()
self.variablesChanged = False
# Put the new scan info on the Queue and notify the processes by setting
# the newFreqEvent
# Empty the queue before putting anything on it (perhaps needed for RILIS coms
# if I have to click several times for the comms to work)
while not self.freqQueue.empty():
self.freqQueue.get()
self.freqQueue.put((self.scanVariable, self.scanVariables))
self.newFreqEvent.set()
# Wait for Wavelength change, once this is done: start up the
# filling of the dataQueue for the requested times
self.wait()
else:
time.sleep(0.05)
else:
if self.captureRunningEvent.is_set():
self.controlEvent.clear()
self.currentValue = self.scanArray[self.pos]
self.variablesChanged = False
self.scanVariables[self.scanVariable] = self.currentValue
# Put the new scan info on the Queue and notify the processes by setting
# the newFreqEvent
# Empty the queue before putting anything on it (perhaps needed for RILIS coms
# if I have to click several times for the comms to work)
while not self.freqQueue.empty():
self.freqQueue.get()
# Put the new scan info on the Queue and notify the processes by setting
# the newFreqEvent
self.freqQueue.put((self.scanVariable, self.scanVariables))
self.newFreqEvent.set()
# info to update the GUI
progress = 100*(self.currentValue-self.scanArray[0])/ \
(self.scanArray[-1]-self.scanArray[0])
if self.zigZag and self.zag:
progress = 100 - progress
self.emitScanProgress.emit(progress, self.scanVariable, self.scanVariables)
# Wait for Wavelength change, once this is done: start up the
# filling of the dataQueue for the requested times
self.wait()
else:
self.pos = 0
time.sleep(0.05)
if self.continueScanning:
# if self.captureRunningEvent.is_set():
# # empty the queue just in case some stuff was left due to sync issues
# self.emptyQueues()
self.scanThread = threading.Timer(0, self.scan).start()
def emptyQueues(self):
while not self.dataQueue.empty():
self.dataQueue.get()
while not self.freqQueue.empty():
self.freqQueue.get()
def wait(self):
# Wait for the wavelength change
while not self.controlEvent.is_set():
if self.interruptedSleep(0.001):
return
if self.captureRunningEvent.is_set():
#########################
### Timed acquisition ###
#########################
if self.mode == 'Time':
self.recordingEvent.set()
time.sleep(self.timePerStep*0.001)
self.recordingEvent.clear()
###########################
### trigger acquisition ###
###########################
elif self.mode == 'Triggers':
self.recordingEvent.set()
while not self.currentSamples.value == self.samplesPerStep:
if self.interruptedSleep(0.001):
return
self.currentSamples.value = 0
self.recordingEvent.clear()
##############################
### Supercycle acquisition ###
##############################
elif self.mode == 'Supercycle':
# we start in the middle of a proton pulse on HRS, so wait
if self.pos == 0 and self.protonPulse.value:
# Remember the pulse we started on
startCycle = self.currentCycle.value
# Wait for the current ongoing cycle to end
while startCycle == self.currentCycle.value:
if self.interruptedSleep(0.001):
return
# Remember the pulse we started on
self.startCycle = self.currentCycle.value
# Remember the total number of cycles when we started
self.noOfCycles = self.protonsPerCycle.value
while self.totalCycles < self.SSPerStep:
# Start recording
self.recordingEvent.set()
# wait until the current supercycle passes
while self.currentCycle.value == self.startCycle:
if self.interruptedSleep(0.001):
return
while not self.currentCycle.value == self.startCycle:
# Check if the total number of pulses in the cycle has
# changed if we started on the last pulse of the cycle
if not self.noOfCycles == self.protonsPerCycle.value:
self.noOfCycles = self.protonsPerCycle.value
# If we started on a pulse that just fell off the board:
# change the start pulse to the highest pulse number
if self.startCycle > self.protonsPerCycle.value:
self.startCycle = self.noOfCycles
if self.interruptedSleep(0.001):
return
# current pulse passed: stop recording
self.recordingEvent.clear()
self.totalCycles = self.totalCycles + 1
################################
### Proton Pulse acquisition ###
################################
elif self.mode == 'Proton Pulse':
# we start in the middle of a proton pulse on HRS, so wait
if self.pos == 0 and self.protonPulse.value:
while self.protonPulse.value:
if self.interruptedSleep(0.001):
return
while self.totalPulses < self.pPerStep:
# wait for a proton pulse, record what SS this happens on
while not self.protonPulse.value:
if self.interruptedSleep(0.001):
return
# Start recording
self.recordingEvent.set()
self.startCycle = self.currentCycle.value
# wait until the current proton pulse passes
while self.currentCycle.value == self.startCycle:
if self.interruptedSleep(0.001):
return
# current pulse passed: stop recording
self.recordingEvent.clear()
self.totalPulses = self.totalPulses + 1
self.scanToNext()
else:
time.sleep(0.001)
def scanToNext(self):
if self.pos < len(self.scanArray)-1:
self.pos = self.pos + 1
elif self.looping:
self.pos = 0
self.newScanEvent.set()
if self.zigZag:
self.scanArray = self.scanArray[::-1]
self.zag = not self.zag
else:
self.captureDone.emit(True)
time.sleep(0.01)
self.pos = 0
def interruptedSleep(self,t):
if self.continueScanning and self.captureRunningEvent.is_set() and not self.variablesChanged:
time.sleep(t)
return False
else:
return True
|
UTF-8
|
Python
| false
| false
| 15,643
|
py
| 92
|
scanner.py
| 67
| 0.539602
| 0.532379
| 0
| 419
| 36.334129
| 173
|
psomani7/CS685-Data_Mining
| 4,861,902,979,971
|
895d3934e04f5af3f1a02abe624881c6117dd658
|
cd5dc48e10b3233334603e5e238ecba072441c71
|
/Assignment 1/src/q2.py
|
010aeac9dd58555403b0d8e432e5ac182fdfc423
|
[] |
no_license
|
https://github.com/psomani7/CS685-Data_Mining
|
9db10b23e9e1bc0d19a9b549a6467b02bf028a04
|
0a9810450d9257ed01b7e953172c4d9628506486
|
refs/heads/main
| 2023-09-05T09:02:00.162376
| 2021-11-26T10:19:14
| 2021-11-26T10:19:14
| 505,808,954
| 1
| 0
| null | true
| 2022-06-21T11:12:53
| 2022-06-21T11:12:53
| 2021-11-26T10:19:17
| 2021-11-26T10:19:14
| 21,403
| 0
| 0
| 0
| null | false
| false
|
import numpy as np
import pandas as pd
import json
f = open('./neighbor-districts-modified.json')
data = json.load(f)
district_list = []
edge_list = []
for dist in data:
district_list.append(dist)
for neighbour in data[dist]:
if(neighbour not in district_list):
edge_list.append([dist, neighbour])
write_data = pd.DataFrame(edge_list)
write_data.to_csv('./output/edge-graph.csv', index=False, header=False)
|
UTF-8
|
Python
| false
| false
| 437
|
py
| 54
|
q2.py
| 37
| 0.688787
| 0.688787
| 0
| 17
| 24.764706
| 71
|
nkanaev/solutions
| 11,278,584,123,583
|
53869bbdfb61efd92e02076767596c425ef6354b
|
9f81ebd864660711696570d1e4c3d76ba97be02f
|
/adventofcode/2017/15.py
|
66185f616c543b0ee92d34a91de1ef5a65d4368c
|
[] |
no_license
|
https://github.com/nkanaev/solutions
|
93f83a2fce425e42dc525f8398e1164b4ac50d99
|
b7c977a8e743ff932d3babd34d39de3bb60a64df
|
refs/heads/master
| 2021-01-16T22:11:17.548527
| 2020-07-16T09:38:36
| 2020-07-16T09:38:36
| 68,294,791
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
def count(astate, bstate):
c = 0
mask = (1 << 16) - 1
for _ in range(40000000):
astate = astate * 16807 % 2147483647
bstate = bstate * 48271 % 2147483647
if (astate & mask) == (bstate & mask):
c += 1
return c
def test():
assert count(65, 8921) == 588
if __name__ == '__main__':
print(count(873, 583))
|
UTF-8
|
Python
| false
| false
| 365
|
py
| 117
|
15.py
| 105
| 0.509589
| 0.347945
| 0
| 17
| 20.470588
| 46
|
p13kara33/Data-Analysis-on-Census-Data
| 2,525,440,775,715
|
4b29a773e651bca6f4e7e36860892c8d01d02533
|
29f61f4a2b58065e100898498cdc2a955f2b7e68
|
/Descriptive Analytics/Categorical Data/freq_tables.py
|
32df64b131bd3eb70488381fd8be44e46d255d27
|
[] |
no_license
|
https://github.com/p13kara33/Data-Analysis-on-Census-Data
|
bc269cff6aca49e79bdbcf420eceae29e5a9ecb4
|
bfd98facab7c19f06d6545399fb9d722c6403906
|
refs/heads/main
| 2023-02-24T07:09:27.120490
| 2021-01-30T15:12:23
| 2021-01-30T15:12:23
| 332,567,739
| 2
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import docx
import pandas as pd
regions = {'E12000001': 'North East', 'E12000002': 'North West',
'E12000003': 'Yorkshire and the Humber', 'E12000004': 'East Midlands',
'E12000005': 'West Midlands', 'E12000009': 'South West',
'E12000006': 'East of England', 'E12000008': 'South East',
'E12000007': 'London', 'W92000004': 'Wales'}
residence_type = {'H': 'Not resident in a communal establishment',
"C": 'Resident in a communal establishment'}
family_composition = {-9: 'No code required', 1: 'Not in a family',
2: 'Married/same-sex civil partnership couple family',
3: 'Cohabiting couple family', 4: 'Lone parent family (male head)',
5: 'Lone parent family (female head)', 6: 'Other related family'}
population_base = {1: 'Usual resident',
2: 'Student living away from home during term-time',
3: 'Short-term resident'}
sex = {1: 'Male', 2: 'Female'}
age = {1: '0 to 15', 2: '16 to 24', 3: '25 to 34', 4: '35 to 44', 5: '45 to 54',
6: '55 to 64', 7: '65 to 74',
8: '75 and over'}
marital_status = {1: 'Single', 2: 'Married', 3: 'Separated but still legally married',
4: 'Divorced ', 5: 'Widowed'}
student = {1: 'Student', 2: 'No Student'}
country_of_birth = {-9: 'No code required', 1: 'UK', 2: 'Non UK'}
health = {-9: 'No code required', 1: 'Very good health', 2: 'Good health',
3: 'Fair health', 4: 'Bad health', 5: 'Very bad health'}
ethnic_group = {-9: 'No code required', 1: 'White', 2: 'Mixed',
3: 'Asian and Asian British', 4: 'Black or Black British',
5: 'Chinese or Other ethnic group'}
religion = {-9: 'No code required', 1: 'No religion', 2: 'Christian', 3: 'Buddhist',
4: 'Hindu', 5: 'Jewish', 6: 'Muslim', 7: 'Sikh', 8: 'Other religion',
9: 'Not stated'}
economic_activity = {-9: 'No code required', 1: 'Economically active: Employee',
2: 'Economically active: Self-employed',
3: 'Economically active: Unemployed',
4: 'Economically active: Full-time student',
5: 'Economically inactive: Retired',
6: 'Economically inactive: Student',
7: 'Economically inactive: Looking after home or family',
8: 'Economically inactive: Long-term sick or disabled',
9: 'Economically inactive: Other'}
occupation = {-9: 'No code required', 1: 'Managers, Directors and Senior Officials',
2: 'Professional Occupations',
3: 'Associate Professional and Technical Occupations',
4: 'Administrative and Secretarial Occupations',
5: 'Skilled Trades Occupations',
6: 'Caring, Leisure and Other Service Occupations',
7: 'Sales and Customer Service Occupations',
8: 'Process, Plant and Machine Operatives', 9: 'Elementary Occupations'}
industry = {-9: 'No code required', 1: 'Agriculture, forestry and fishing',
2: 'Mining and quarrying; Manufacturing;', 3: 'Construction',
4: 'Wholesale and retail trade; Repair of motor vehicles and motorcycles',
5: 'Accommodation and food service', 6: 'Transport and storage',
7: 'Financial and insurance', 8: 'Real estate',
9: 'Public administration and defence', 10: 'Education',
11: 'Human health and social work activities',
12: 'Other community, social and personal service activities'}
hw_per_week = {-9: 'No code required', 1: 'Part-time: 15 or less h.w.',
2: 'Part-time: 16 to 30 hw', 3: 'Full-time: 31 to 48 hw',
4: 'Full-time: 49 or more hw'}
approx_social_grade = {-9: 'No code required', 1: 'AB', 2: 'C1', 3: 'C2', 4: 'DE'}
colors = ["black", "#ff692e", "#2e5cff", "#ffd12e", "#ff2e5c", "#5cff2e", "#ff0000",
"#8b4513", "#ff7f50", "#ffff80", '#9370db', '#ff581a', '#c75757']
def freq_dict_creator(my_data, col_name, dictionary, doc_name):
"""
:param my_data: the data frame
:param col_name: the attribute name
:param dictionary: the name of the dictionary of the attribute
:return: the frequency table as a dictionary
:param doc_name: name of the docx file
"""
freq_arr = my_data[col_name].value_counts(sort=True)
perc_arr = my_data[col_name].value_counts(normalize=True, sort=True)
percentages = []
the_list = []
for i in perc_arr.index:
val = perc_arr[i] * 100
val = round(val, 1)
percentages.append(str(val) + '%')
cnt = 0
for i in freq_arr.index:
col_descr = dictionary[i]
the_list.append([[col_descr], [freq_arr[i]], [percentages[cnt]]])
cnt += 1
df = pd.DataFrame(the_list, columns=[col_name, 'Frequency', 'Percentages'])
doc = docx.Document()
t = doc.add_table(df.shape[0] + 1, df.shape[1])
# add the header rows.
for j in range(df.shape[-1]):
t.cell(0, j).text = df.columns[j]
# add the rest of the data frame
for i in range(df.shape[0]):
for j in range(df.shape[-1]):
t.cell(i + 1, j).text = str(df.values[i, j])
# save the doc
doc_name = doc_name + ".docx"
# doc.save(doc_name)
return df
|
UTF-8
|
Python
| false
| false
| 5,515
|
py
| 26
|
freq_tables.py
| 4
| 0.560109
| 0.510789
| 0
| 128
| 41.085938
| 89
|
yanzay/pagankolo
| 16,011,638,122,071
|
1e563125c219e22c58b84ac13a42571228f2e1c4
|
91f1183df4ec77f8cccc0535b427f7fc91676788
|
/models.py
|
5bade2f1ca0da7f3298cd47424d6fdef69f2ee56
|
[] |
no_license
|
https://github.com/yanzay/pagankolo
|
3da2221e972e09ba16ff6806d69a00fc353d8792
|
c9becaadb551c6a1d7114ac9344e9c526facca5e
|
refs/heads/master
| 2020-05-03T12:56:36.596558
| 2012-04-07T23:54:35
| 2012-04-07T23:54:35
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding=utf-8
from google.appengine.ext import db
class Profile(db.Model):
user = db.UserProperty(required=True, auto_current_user_add=True)
name = db.StringProperty(required=True)
avatar = db.BlobProperty()
karma = db.IntegerProperty(default=0)
favorites = db.ListProperty(db.Key)
class Category(db.Model):
link = db.StringProperty(required=True)
name = db.StringProperty(required=True)
class BlogPost(db.Model):
title = db.StringProperty(required=True)
body = db.TextProperty(required=True)
teaser = db.TextProperty()
category = db.ReferenceProperty(Category, collection_name='posts')
pub_date = db.DateTimeProperty(auto_now_add=True)
author = db.UserProperty(auto_current_user_add=True)
author_name = db.StringProperty()
published = db.BooleanProperty(default=True)
rating = db.IntegerProperty()
#comments_count = db.IntegerProperty(default=0)
class Comment(db.Model):
body = db.StringProperty(multiline=True)
post = db.ReferenceProperty(BlogPost, collection_name='comments')
date = db.DateTimeProperty(auto_now_add=True)
author = db.UserProperty(auto_current_user_add=True)
author_name = db.StringProperty()
|
UTF-8
|
Python
| false
| false
| 1,235
|
py
| 14
|
models.py
| 3
| 0.709312
| 0.706883
| 0
| 32
| 36.65625
| 70
|
by-student-2017/DFTBaby-0.1.0-31Jul2019
| 5,652,176,964,270
|
9bd5bc40c283942cee6fac8d023f3b9249d5eb5f
|
aac5982c8dcf26221419086fb90c399b9f4324ef
|
/DFTB/DensityFitting.py
|
14f45c93f633d9f9d9a7761c2b6096cfbe2bc581
|
[] |
no_license
|
https://github.com/by-student-2017/DFTBaby-0.1.0-31Jul2019
|
99184d3fa2976d4e02f7f1bddee97e56526d9365
|
92cb73f1a6472f88588986561349d7f2ad1b1c15
|
refs/heads/master
| 2022-12-12T00:12:50.449505
| 2020-09-01T21:05:59
| 2020-09-01T21:05:59
| 290,116,049
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
"""
import numpy as np
import numpy.linalg as la
from DFTB.BasisSets import AtomicBasisSet, AuxiliaryBasisSet
from DFTB import AtomicData
from DFTB import XYZ
class DensityFitting:
def __init__(self, atomlist, hubbard_U):
"""
Parameters:
===========
bfs: list of numeric basis functions
bfs_aux: list of auxiliary basis functions
"""
self.atomlist = atomlist
self.bfs = AtomicBasisSet(atomlist).bfs
self.bfs_aux = AuxiliaryBasisSet(atomlist, hubbard_U).bfs
# create a list of points that should have the symmetry
# of the molecule on which the true density should agree
# with the model density
Xs,Ys,Zs = [], [], []
# atomic centers
for Zi,posi in atomlist:
x,y,z = posi
Xs.append(x)
Ys.append(y)
Zs.append(z)
# points between atoms
for i,(Zi,posi) in enumerate(atomlist):
for j,(Zj,posj) in enumerate(atomlist):
if i == j:
continue
Rij = np.array(posj)-np.array(posi)
x,y,z = np.array(posi) + 0.33*Rij
Xs.append(x)
Ys.append(y)
Zs.append(z)
x,y,z = np.array(posi) - 0.33*Rij
Xs.append(x)
Ys.append(y)
Zs.append(z)
# points perpendicular to plane between 3 atoms
for i,(Zi,posi) in enumerate(atomlist):
for j,(Zj,posj) in enumerate(atomlist):
if i == j:
continue
for k,(Zk,posk) in enumerate(atomlist):
if i == k:
continue
if i == j:
continue
Rij = np.array(posj) - np.array(posi)
Rik = np.array(posk) - np.array(posi)
x,y,z = np.array(posi) + 0.25*np.cross(Rij, Rik)
Xs.append(x)
Ys.append(y)
Zs.append(z)
Xs,Ys,Zs = np.mgrid[-6.0:6.0:30j,-6.0:6.0:30j,-6.0:6.0:30j]
Xs = Xs.ravel()
Ys = Ys.ravel()
Zs = Zs.ravel()
grid = np.array(Xs), np.array(Ys), np.array(Zs)
# number of fit parameters
self.Nfit = len(self.bfs_aux)
assert self.Nfit % 4 == 0
# number of basis functions
self.Nbfs = len(self.bfs)
# number of sample points
self.Npts = len(Xs)
assert self.Npts > self.Nfit
# save grid to xyz file
gridlist = atomlist[:]
for i in range(0, self.Npts):
gridlist.append( (1, (Xs[i], Ys[i], Zs[i])) )
XYZ.write_xyz("/tmp/fitgrid.xyz", [gridlist])
# evaluate all basis functions on the grid
self.bf_grid = np.zeros((self.Npts, self.Nbfs))
for m,bfm in enumerate(self.bfs):
self.bf_grid[:,m] = bfm.amp(*grid)
# evaluate fit functions on the grid
self.bf_aux_grid = np.zeros((self.Npts, self.Nfit))
for m,bfm_aux in enumerate(self.bfs_aux):
self.bf_aux_grid[:,m] = bfm_aux.amp(*grid)
#
M = self.bf_aux_grid
self.M = M
# constrained C.x - Q = 0
Id4 = np.eye(4)
C = np.hstack([Id4 for i in range(0, self.Nfit/4)])
Ct = C.transpose()
#
Mt = M.transpose()
MtM = np.dot(Mt, M)
MtMinv = la.inv(MtM)
#
self.A1 = np.dot(C,np.dot(MtMinv, Mt))
self.A2 = np.dot(C,np.dot(MtMinv, Ct))
self.A3 = np.dot(MtMinv, Ct)
self.A4 = np.dot(MtMinv,Mt)
def fit(self, P, Qtot, Dtot):
"""
Parameters:
===========
P: (partial) density matrix
Qtot: total charge
Dtot: total dipole moment
"""
# compute true density
y = np.zeros(self.Npts)
for m in range(0, self.Nbfs):
for n in range(0, self.Nbfs):
y += self.bf_grid[:,m] * P[m,n] * self.bf_grid[:,n]
# linear least square fit
Q = np.array([Qtot, Dtot[0], Dtot[1], Dtot[2]])
print "Q = %s" % Q
# solve for Lagrange multiplier
c = Q - np.dot(self.A1,y)
lagr = la.solve(self.A2,c)
print "Lagrange multiplier = %s" % lagr
#
x = np.dot(self.A4,y) + np.dot(self.A3,lagr)
print " Density Fitting"
print " ==============="
print " Atom dQ partial dipole"
for i,(Zi,posi) in enumerate(self.atomlist):
print " %s%d %+5.3f [%+5.3f %+5.3f %+5.3f]" \
% (AtomicData.atom_names[Zi-1], i, x[4*i+0], x[4*i+1], x[4*i+2], x[4*i+3])
# compare fit density with true density at the fit points
yfit = np.dot(self.M,x)
err = np.sum(abs(y-yfit))
"""
print "RHO"
print y
print "RHO fit"
print yfit
"""
print "|y-yfit| = %s" % err
print "|y-0 | = %s" % np.sum(abs(y))
return x
|
UTF-8
|
Python
| false
| false
| 5,088
|
py
| 348
|
DensityFitting.py
| 342
| 0.482115
| 0.468553
| 0
| 150
| 32.886667
| 90
|
dukebw/rl-study-group-supreme-octo-memory
| 1,030,792,161,861
|
7bb37ebad558e056038b3ca061d7888135881fc9
|
7cb99a8393e641f70f9a70a5f33ac85c1198cee0
|
/week2/hw1/behaviour_cloning.py
|
386c50dde181317b465aeeb94464103fde6a7d4e
|
[] |
no_license
|
https://github.com/dukebw/rl-study-group-supreme-octo-memory
|
299a23095ba3ca502e2fc812474c6dd06efab54a
|
0f0dbb5280b744fafb13d913fc4834b5ca1a194b
|
refs/heads/master
| 2021-09-06T04:56:11.240081
| 2017-12-20T16:18:23
| 2017-12-20T16:18:23
| 103,286,800
| 0
| 1
| null | false
| 2017-12-20T16:18:24
| 2017-09-12T15:15:21
| 2017-10-19T12:23:30
| 2017-12-20T16:18:24
| 38
| 0
| 1
| 0
|
C
| false
| null |
"""Deep RL Fall 2017 HW1 Section 2."""
import argparse
import math
import pickle
import random
import sys
import gym
import load_policy
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import torch
import torch.nn.utils.rnn as rnn_utils
class GRULinear(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers):
super(GRULinear, self).__init__()
self.gru = torch.nn.GRU(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=True,
batch_first=True,
dropout=0.5)
self.linear = torch.nn.Linear(in_features=32,
out_features=3,
bias=True)
def forward(self, inputs, hidden_prev=None):
outputs, hidden_curr = self.gru(inputs, hidden_prev)
if isinstance(inputs, rnn_utils.PackedSequence):
unpacked, seq_lengths = rnn_utils.pad_packed_sequence(
outputs, batch_first=True)
# NOTE(brendan): The unpacked sequence is unpadded before being
# input to the linear layer, so that any values not in the original
# sequences are thrown away.
outputs = _unpad(unpacked, seq_lengths)
pred = self.linear(outputs)
return pred, hidden_curr
def _unpad(padded, seq_lengths):
"""Removes all the padding from a tensor padded along dimension 1, and
returns the result.
"""
return torch.cat(
[padded[i, :seq_len, :] for i, seq_len in enumerate(seq_lengths)],
dim=0)
def _get_batch(batch):
"""Gets next batch, returning a padded sequence."""
seq_lengths = [b.shape[0] for b in batch]
# NOTE(brendan): Each sequence is padded out to the maximum sequence
# length.
for i, seq in enumerate(batch):
batch[i] = np.pad(
seq,
pad_width=((0, seq_lengths[0] - seq.shape[0]), (0, 0)),
mode='edge')
batch = np.array(batch)
batch = batch.astype(np.float32)
batch = torch.from_numpy(batch)
return batch, seq_lengths
def _get_rollout(obs, actions, batch_i, batch_indices, batch_size):
r"""Get rollout {(x_i, \pi(x_i)}.
Returns a (observations, actions) tuple. `obs` is a packed sequence, where
all sequences have been padded to the length of the longest sequence in the
batch.
"""
def _seq_len(a):
return a[1].shape[0]
start_i = batch_i*batch_size
end_i = (batch_i + 1)*batch_size
obs = [obs[i] for i in batch_indices[start_i:end_i]]
obs = sorted(enumerate(obs), key=_seq_len, reverse=True)
sort_indices = [batch_indices[start_i + o[0]] for o in obs]
obs = [o[1] for o in obs]
actions = [actions[i] for i in sort_indices]
obs, seq_lengths = _get_batch(obs)
actions, _ = _get_batch(actions)
obs = torch.autograd.Variable(obs, requires_grad=True).cuda()
actions = _unpad(actions, seq_lengths)
actions = torch.autograd.Variable(actions).cuda(async=True)
obs = rnn_utils.pack_padded_sequence(obs,
lengths=seq_lengths,
batch_first=True)
return obs, actions
def _evaluate(plt_mean_returns, env, model, num_rollouts):
"""Evaluate mean return of model and store in plt_mean_returns."""
returns = []
visited_obs = [[] for _ in range(num_rollouts)]
model.eval()
for i in range(10):
done = False
hidden_curr = None
obs = env.reset()
steps = 0
total_r = 0.0
while not done:
obs = obs[np.newaxis, np.newaxis, :]
obs = torch.from_numpy(obs.astype(np.float32))
obs = torch.autograd.Variable(obs, volatile=True)
action, hidden_curr = model(obs.cuda(), hidden_curr)
obs, r, done, _ = env.step(action.cpu().data.numpy())
visited_obs[i].append(obs)
total_r += r
steps += 1
if steps >= env.spec.timestep_limit:
break
print('return: {}'.format(total_r))
returns.append(total_r)
plt_mean_returns.append(np.mean(returns))
return visited_obs
def _get_subseqs(subseqs, seq, start_i, subseq_len):
"""Retrieves a set of subsequences from the middle of seq, and appends the
stacked subsequences to subseqs.
"""
for i in range((len(seq) - start_i)//subseq_len):
start = subseq_len*i + start_i
end = subseq_len*(i + 1) + start_i
subseqs.append(seq[start:end])
def _form_subsequence_minibatches(subseq_obs, subseq_actions, boxs_loop):
"""Turns lists of observation and action sequences into lists of
subsequences, with maximum length `subseq_len`.
"""
subseq_len = 100
for obs_i, obs_seq in enumerate(boxs_loop['data']['obs']):
actions_seq = boxs_loop['data']['action'][obs_i]
if obs_seq.shape[0] < subseq_len:
subseq_obs.append(obs_seq)
subseq_actions.append(actions_seq)
continue
start_i = math.floor(np.random.uniform(0, subseq_len))
start_i = min(start_i, obs_seq.shape[0] - subseq_len)
_get_subseqs(subseq_obs, obs_seq, start_i, subseq_len)
_get_subseqs(subseq_actions, actions_seq, start_i, subseq_len)
def _train_single_epoch(plt_train_mean_losses, boxs_loop, batch_size):
"""Train model for one epoch."""
boxs_loop['model'].train()
subseq_obs = []
subseq_actions = []
_form_subsequence_minibatches(subseq_obs, subseq_actions, boxs_loop)
batch_indices = list(range(len(subseq_obs)))
random.shuffle(batch_indices)
losses = []
for batch_i in range(len(subseq_obs)//batch_size):
obs, actions = _get_rollout(subseq_obs,
subseq_actions,
batch_i,
batch_indices,
batch_size)
pred, _ = boxs_loop['model'](obs)
mse_loss = boxs_loop['criticism'](pred, actions)
print(mse_loss.cpu().data.numpy()[0])
losses.append(mse_loss.data[0])
boxs_loop['optimizer'].zero_grad()
mse_loss.backward()
boxs_loop['optimizer'].step()
plt_train_mean_losses.append(np.mean(losses))
def _get_model(expert_data, model_name):
"""MLP with four layers of 32 hidden units each."""
models = {
'feedforward': torch.nn.Sequential(
torch.nn.Linear(
in_features=expert_data['observations'][0].shape[-1],
out_features=32,
bias=True),
torch.nn.ReLU(),
torch.nn.Linear(
in_features=32,
out_features=32,
bias=True),
torch.nn.ReLU(),
torch.nn.Linear(
in_features=32,
out_features=32,
bias=True),
torch.nn.ReLU(),
torch.nn.Linear(
in_features=32,
out_features=expert_data['actions'][0].shape[-1],
bias=True)),
'gru': GRULinear(
input_size=expert_data['observations'][0].shape[-1],
hidden_size=32,
num_layers=3),
}
return models[model_name]
def _init_boxs_loop(expert_data, model_name):
"""Initializes Box's loop:
(model(data) -> inference -> criticism -> updated model(data))*.
"""
model = _get_model(expert_data, model_name).cuda()
criticism = torch.nn.MSELoss().cuda()
biases = [p for name, p in model.named_parameters() if 'bias' in name]
weights = [p for name, p in model.named_parameters() if 'bias' not in name]
optimizer = torch.optim.Adam([{'params': biases, 'weight_decay': 0},
{'params': weights, 'weight_decay': 1e-4}],
lr=1e-3)
return {'data': {'obs': expert_data['observations'],
'action': expert_data['actions']},
'model': model,
'criticism': criticism,
'optimizer': optimizer}
def _subplot(nrows_ncols_plotnum, x, y, xlab, ylab):
"""Create subplot using the nrows_ncols_plotnum shorthand."""
ax = plt.subplot(nrows_ncols_plotnum)
ax.set_xlabel(xlab)
ax.set_ylabel(ylab)
plt.plot(x, y)
def behaviour_cloning(flags):
"""Imitation learning from saved (obs, actions) pairs from an expert
policy.
"""
torch.backends.cudnn.benchmark = True
env = gym.make(flags.envname)
# NOTE(brendan): expert_data is expected to be a list of numpy arrays of
# observations/actions. Each outer list represents a sequence constituting
# one episode observed by the expert.
with open(flags.bhvr_clone_gt_file, 'rb') as f:
expert_data = pickle.load(f)
policy_fn = load_policy.load_policy(flags.expert_policy_file)
boxs_loop = _init_boxs_loop(expert_data, flags.model_name)
plt_train_mean_losses = []
plt_mean_returns = []
for _ in range(flags.num_epochs):
_train_single_epoch(plt_train_mean_losses, boxs_loop, flags.batch_size)
visited_obs = _evaluate(plt_mean_returns,
env,
boxs_loop['model'],
flags.num_rollouts)
expert_actions = [[] for _ in range(flags.num_rollouts)]
for i, rollout in enumerate(visited_obs):
for obs in rollout:
expert_actions[i].append(policy_fn(obs[None, :]))
expert_actions = [np.squeeze(np.array(a, dtype=np.float32))
for a in expert_actions]
visited_obs = [np.squeeze(np.array(o, dtype=np.float64))
for o in visited_obs]
boxs_loop['data']['obs'] += visited_obs
boxs_loop['data']['action'] += expert_actions
sys.stdout.flush()
epochs = list(range(len(plt_train_mean_losses)))
plt.figure(1)
_subplot(211, epochs, plt_train_mean_losses, 'Epoch', 'Training loss')
_subplot(212, epochs, plt_mean_returns, 'Epoch', 'Return')
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bhvr-clone-gt-file', type=str, default=None)
parser.add_argument('--envname', type=str, default=None)
parser.add_argument('--expert-policy-file', type=str, default=None)
parser.add_argument('--model-name', type=str, default=None)
parser.add_argument('--batch-size', type=int, default=None)
parser.add_argument('--num-epochs', type=int, default=None)
parser.add_argument('--num-rollouts', type=int, default=None)
args = parser.parse_args()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)):
behaviour_cloning(args)
|
UTF-8
|
Python
| false
| false
| 11,020
|
py
| 10
|
behaviour_cloning.py
| 2
| 0.57677
| 0.569147
| 0
| 329
| 32.495441
| 79
|
BigAngryDinosaur/kattispy
| 4,604,204,968,495
|
59478432472423f46f1b741b116281d420da945b
|
6aafd92070272371cec89345391517c25acf6441
|
/dominoes2.py
|
e395af8eed6ad83cff00c3af0b06169ce44d8ec1
|
[] |
no_license
|
https://github.com/BigAngryDinosaur/kattispy
|
747054bc4d7bcaf6c31b2a3964307fe732eeee99
|
130fe52061f3a738f669eb0bc90738a8da81344c
|
refs/heads/master
| 2023-02-23T00:12:23.028851
| 2021-01-14T15:23:44
| 2021-01-14T15:23:44
| 329,654,286
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import collections
read = lambda x: list(map(x, sys.stdin.readline().split()))
def main():
num_test_cases = read(int)[0]
for _ in range(num_test_cases):
n, m, l = read(int)
graph = { s:[] for s in range(1, n+1) }
for _ in range(m):
s, d = read(int)
graph[s].append(d)
visited = [False] * (n+1)
dominoes = 0
def dfs(node):
nonlocal dominoes
dominoes += 1
visited[node] = True
for des in graph[node]:
if not visited[des]:
dfs(des)
for _ in range(l):
source = read(int)[0]
if not visited[source]:
dfs(source)
print(dominoes)
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false
| false
| 815
|
py
| 18
|
dominoes2.py
| 18
| 0.451534
| 0.442945
| 0
| 35
| 22.285714
| 59
|
orange-eng/internship
| 13,099,650,253,022
|
9e49ec31b57e2b4efe29b75f5b401fbfb2b9bfd5
|
f0adf5afb93b7f0a67802e876a02e898cd92a172
|
/Baidu/learning_to_simulate/learning_to_simulate/connectivity_utils.py
|
5bd781355d3d3dcaf9df50dc34f4b9d9668e0d78
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/orange-eng/internship
|
9a2f746b3d50673038481392100d375f6eec82d3
|
c8c566df453d3a4bdf692338f74916ae15792fa1
|
refs/heads/main
| 2023-07-18T11:46:36.659858
| 2021-08-31T09:39:10
| 2021-08-31T09:39:10
| 358,230,295
| 2
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# Lint as: python3
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools to compute the connectivity of the graph."""
import functools
import numpy as np
from sklearn import neighbors
import tensorflow.compat.v1 as tf
def _compute_connectivity(positions, radius, add_self_edges):
"""Get the indices of connected edges with radius connectivity.
Args:
positions: Positions of nodes in the graph. Shape:
[num_nodes_in_graph, num_dims].
radius: Radius of connectivity.
add_self_edges: Whether to include self edges or not.
Returns:
senders indices [num_edges_in_graph]
receiver indices [num_edges_in_graph]
"""
# 使用KD tree的方法来确定每个节点相连的节点个数
tree = neighbors.KDTree(positions)
receivers_list = tree.query_radius(positions, r=radius)
num_nodes = len(positions)
senders = np.repeat(range(num_nodes), [len(a) for a in receivers_list])
receivers = np.concatenate(receivers_list, axis=0)
if not add_self_edges:
# Remove self edges.
mask = senders != receivers
senders = senders[mask]
receivers = receivers[mask]
return senders, receivers
def _compute_connectivity_for_batch(
positions, n_node, radius, add_self_edges):
"""`compute_connectivity` for a batch of graphs.
Args:
positions: Positions of nodes in the batch of graphs. Shape:
[num_nodes_in_batch, num_dims].
n_node: Number of nodes for each graph in the batch. Shape:
[num_graphs in batch].
radius: Radius of connectivity.
add_self_edges: Whether to include self edges or not.
Returns:
senders indices [num_edges_in_batch]
receiver indices [num_edges_in_batch]
number of edges per graph [num_graphs_in_batch]
"""
# TODO(alvarosg): Consider if we want to support batches here or not.
# Separate the positions corresponding to particles in different graphs.
positions_per_graph_list = np.split(positions, np.cumsum(n_node[:-1]), axis=0)
# np.cumsum函数 作用主要就是计算轴向的累加和
receivers_list = []
senders_list = []
n_edge_list = []
num_nodes_in_previous_graphs = 0
# Compute connectivity for each graph in the batch.
for positions_graph_i in positions_per_graph_list:
senders_graph_i, receivers_graph_i = _compute_connectivity(
positions_graph_i, radius, add_self_edges)
num_edges_graph_i = len(senders_graph_i)
n_edge_list.append(num_edges_graph_i)
# Because the inputs will be concatenated, we need to add offsets to the
# sender and receiver indices according to the number of nodes in previous
# graphs in the same batch.
receivers_list.append(receivers_graph_i + num_nodes_in_previous_graphs)
senders_list.append(senders_graph_i + num_nodes_in_previous_graphs)
num_nodes_graph_i = len(positions_graph_i)
num_nodes_in_previous_graphs += num_nodes_graph_i
# Concatenate all of the results.
senders = np.concatenate(senders_list, axis=0).astype(np.int32)
receivers = np.concatenate(receivers_list, axis=0).astype(np.int32)
n_edge = np.stack(n_edge_list).astype(np.int32)
return senders, receivers, n_edge
def compute_connectivity_for_batch_pyfunc(
positions, n_node, radius, add_self_edges=True):
"""`_compute_connectivity_for_batch` wrapped in a pyfunc."""
partial_fn = functools.partial(
_compute_connectivity_for_batch, add_self_edges=add_self_edges)
senders, receivers, n_edge = tf.py_function(
partial_fn,
[positions, n_node, radius],
[tf.int32, tf.int32, tf.int32])
senders.set_shape([None])
receivers.set_shape([None])
n_edge.set_shape(n_node.get_shape())
return senders, receivers, n_edge
|
UTF-8
|
Python
| false
| false
| 4,351
|
py
| 172
|
connectivity_utils.py
| 135
| 0.70334
| 0.6968
| 0
| 121
| 34.371901
| 80
|
g4lk/VisionArtificial
| 15,144,054,725,134
|
acb4fe84b82f192e38f9752731279015729d28fb
|
7c34c786e3e52f17972fd729b97f84fd732af5ab
|
/prueba.py
|
de20d20405f8669b5567b62cd78236a7d057f258
|
[] |
no_license
|
https://github.com/g4lk/VisionArtificial
|
8b141630eb8178e2414b33bee56fa8aebc6324be
|
7dad7bf1449c1dc39d4c1dd51cd5dffffc134ee6
|
refs/heads/master
| 2020-04-28T22:59:13.609560
| 2019-04-09T21:11:03
| 2019-04-09T21:11:03
| 175,637,566
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# Write Python code here
# import the necessary packages
import cv2 as cv
import os,sys
import numpy as np
def mejorContraste(img):
#-----Converting image to LAB Color model-----------------------------------
lab= cv.cvtColor(img, cv.COLOR_BGR2LAB)
#-----Splitting the LAB image to different channels-------------------------
l, a, b = cv.split(lab)
#-----Applying CLAHE to L-channel-------------------------------------------
clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(4,4))
cl = clahe.apply(l)
#-----Merge the CLAHE enhanced L-channel with the a and b channel-----------
limg = cv.merge((cl,a,b))
#-----Converting image from LAB Color model to RGB model--------------------
final = cv.cvtColor(limg, cv.COLOR_LAB2BGR)
return final
#https://stackoverflow.com/questions/32522989/opencv-better-detection-of-red-color/32523532?noredirect=1#comment74897695_32523532
def generarMascaraRojos(img):
inversa = ~img
hsv_inversa = cv.cvtColor(inversa,cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv_inversa,np.array([90-10, 10, 70]),np.array([90+10, 255, 255]))
return mask
# Mascaras: circular,triangular,octogonal
count_c, count_t, count_o = 0, 0, 0
contador_c, contador_t, contador_o = np.zeros(shape=(25, 25, 3)), np.zeros(shape=(25, 25, 3)), np.zeros(shape=(25, 25, 3))
for dir in os.listdir('./train_recortadas/'):
for dir2 in os.listdir(f'./train_recortadas/{dir}/'):
for imagen in os.listdir(f'./train_recortadas/{dir}/{dir2}/'):
img = cv.imread(f'./train_recortadas/{dir}/{dir2}/{imagen}')
image = mejorContraste(img)
mask = generarMascaraRojos(image)
res = cv.bitwise_and(image, image, mask=mask)
if dir == 'obligacion':
count_c += 1
contador_c = cv.add(contador_c, np.float64(cv.resize(res, (25, 25))))
elif dir == 'peligro':
count_t += 1
contador_t = cv.add(contador_t, np.float64(cv.resize(res, (25, 25))))
elif dir == 'stop':
count_o += 1
contador_o = cv.add(contador_o, np.float64(cv.resize(res, (25, 25))))
np.set_printoptions(threshold=sys.maxsize)
mc = np.uint8(np.divide(contador_c,count_c))
mt = np.uint8(np.divide(contador_t,count_t))
mo = np.uint8(np.divide(contador_o,count_o))
cv.imshow('mc',cv.resize(mc, (250,250)))
cv.imshow('mt',cv.resize(mt, (250,250)))
cv.imshow('mo',cv.resize(mo, (250,250)))
cv.waitKey(0)
print('MC:' + str(mc))
print('MT:' + str(mt))
print('MO:' + str(mo))
# close all open windows
cv.destroyAllWindows()
|
UTF-8
|
Python
| false
| false
| 2,617
|
py
| 6
|
prueba.py
| 3
| 0.596485
| 0.549484
| 0
| 68
| 37.485294
| 129
|
Glyochi/Python_FacialReg
| 6,055,903,917,946
|
17e1089faa432495e1cfdea5d0832aedd66b5696
|
304377380f29ee6ee423445dd77e8d57075cf6b0
|
/FacialDetection/ImageManager.py
|
354eb670b8b7886961a5ee9d4bcd7fdcd0a61a75
|
[] |
no_license
|
https://github.com/Glyochi/Python_FacialReg
|
5de80d52e6fe1f38f323607ea88a9bfcadff2cf4
|
ad151ec69d03e37851b49b8f33828ccad5c9700b
|
refs/heads/main
| 2023-06-29T01:46:59.848651
| 2021-08-07T23:39:33
| 2021-08-07T23:39:33
| 382,724,537
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import cv2 as cv
from Point import Point
from DetectedArea import DetectedArea
from DetectedArea import DetectedFace
from helperFunctions import *
import numpy as np
"""
At this point in development, what I need the ImageManager to do is store the image, perform eyes detection on the image in 0, 90, 180, and 270 degree.
Then ImageManager collects all "detected eyes" called DetectedArea objs, which still has the x, y cordinate corresponding to the rotated angle of the image when
they were found. It then proceed to convert the x, y value to the cordinate in the original non-rotated image.
After that, it checks for overlapping DetectedAreas and merge them into one big DetectedArea obj.
Once that's done, we get an array of all the location of the eyes and their sizes. We can then connect every possible pair of approximately-same-size-eyes to see
if the distance between them are "reasonable". If a pair of eyes has a reasonable distance inbetweeen and the size of the eyes don't differ too much,
we can call it a potential face.
Once we get an array of potential faces, we rotate the image by an angle decided by the position of the two eyes (to increase precision), crop out the face (to increase performance),
and run facial detection on that.
At the end, ImageManger should have an array list of all detected faces.
"""
class ImageManager:
"""
A manager that stores the actual image and can do image processing function on it. This object will be used to take care of facial detection.
"""
haarcascade_eye = cv.CascadeClassifier("classifier/haarcascade_eye.xml")
haarcascade_face = cv.CascadeClassifier("classifier/haarcascade_frontalface_default.xml")
# haarcascade_face = cv.CascadeClassifier("classifier/lbpcascaade_frontalface_improved.xml")
haarcascade_nose = cv.CascadeClassifier("classifier/haarcascade_nose.xml")
HARDCODED_similarSizeScale = 0.5
HARDCODED_pairOfEyesDistanceRange = (1.5, 3.5)
# eye and face min and max dimensions in a 500pixel x ? pixel images
HARDCODED_eyeMinDimensions = (10, 10)
HARDCODED_eyeMaxDimensions = (200, 200)
HARDCODED_faceMinDimensions = (80, 80)
HARDCODED_faceMaxDimensions = (500, 500)
def __init__(self, img):
"""
Constructing an ImageManger object
:param img: the image/frame that we will run facial detection on
"""
# Blank canvas that we are going to use to store the rotated image
self.image = img
self.grayImage = cv.cvtColor(self.image, cv.COLOR_BGR2GRAY)
self.imageCenter = Point(img.shape[1]/2, img.shape[0]/2)
def HELPER_runHaarDetectionCounterClockwiseAngle(self, detector, minDimensions, maxDimensions, angle, scaleFactor, minNeighbors):
"""
Run the given haarDetection on the image rotated by the given angle and generate 1 array that
contain detected objects found.
:param detector: the haarcascade object that is going to scan the image
:param angle: the angle by which the image is rotated
:param scaleFactor: scaleFactor parameter for detectMultiScale function
:param minNeighbors: minNeighbors parameter for detectMultiScale function
:return a 2-tuple with the first element being an array containing all the raw coordinates of the detected objects in the rotated image,
and the second element the Point object containing the coordinates of the center of the rotated image.
"""
angle = angle % 360
# finding objects in then given angle degree rotated image
rotatedGrayImage = rotateCounterClockwise(self.grayImage, angle)
# Collecting raw detected objects received from detectMultiScale
rawObjs = detector.detectMultiScale(rotatedGrayImage, scaleFactor = scaleFactor, minNeighbors = minNeighbors, minSize = minDimensions, maxSize = maxDimensions)
detectedAreas = []
rotatedCenter = Point(rotatedGrayImage.shape[1]/2, rotatedGrayImage.shape[0]/2)
for obj in rawObjs:
# Convert raw information into DetectedArea obj
area = DetectedArea((obj[0], obj[1]), (obj[2], obj[3]))
# Put the translated DetectedArea into translatedObj array
detectedAreas.append(area)
return (detectedAreas, rotatedCenter)
def HELPER_rotateDetectedAreaClockwise(self, rawPositions, origin, angle):
"""
Rotate detectedAreas in rawPositions around the given origin
:param rawPositions: the array that contains detectedAreas with raw values taken from detectMultiScale
:param origin: the origin which the detectedAreas are rotating around
:param angle: the angle by which the image was rotated when detectMultiScale ran
:return an array containing detectedAreas with translated coordinates in the non-rotated image
"""
angle = angle % 360
if angle == 0:
return rawPositions
# Translate the coordinates to the coordinates in the non-rotated image
for area in rawPositions:
area.rotateAreaClockwise(origin, angle)
return rawPositions
def HELPER_projectDetectedArea(self, rawPositions, rotatedCenter):
"""
Project the raw positions such that the new positions relative to self.imageCenter is the same
as relative positions of the old Coordinates to rotatedCenter.
:param rawPositions: the array that contains detectedAreas
:return an array containing detectedAreas with projected coordinates
"""
# Translate the coordinates to the coordinates in the non-rotated image
for area in rawPositions:
area.projectArea(rotatedCenter, self.imageCenter)
return rawPositions
def HELPER_standardizeCounterClockwiseDetectedArea(self, detectedAreas, rotatedCenter, angle):
"""
Translate the coordinates of counter clockwise rotated detectedAreas to the original image coordinates
:param detectedAreas: a list of detectedArea objects
:param rotatedCenter: the rotated image's center's point object
:param angle: the angle by which the image was rotated counter clockwise by
:return a list of translated detectedAreas for the nonrotated image
"""
AllObjects = []
self.HELPER_rotateDetectedAreaClockwise(detectedAreas, rotatedCenter, angle)
self.HELPER_projectDetectedArea(detectedAreas, rotatedCenter)
AllObjects.append(detectedAreas)
return AllObjects
def HELPER_mergeDetectedObjs(self, list):
"""
Given a list of any number of arrays of (detected objects, rotatedCenter), scan through all of them and if find two duplicates (similar detected objects with similar
sizes and positions), merge them and put all the unique detected object in an array.
:param list: a list of arrays
:return an array that contains all the unique detected objects.
"""
array0 = list[0]
others = []
for i in range(1,len(list)):
others.append(list[i])
# checking duplicates in the first array
i = 0
while i < len(array0) - 1:
j = i + 1
while j < len(array0):
if array0[i].similarSize(array0[j], self.HARDCODED_similarSizeScale) and array0[i].overlap(array0[j]):
array0[i].merge(array0[j])
array0.pop(j)
j = j - 1
j = j + 1
i = i + 1
# check for duplicates in the other arrays. Merge them if they are, append them if they arent
for array in others:
for area in array:
matched = False
# Scan through the array to find matches
for area0 in array0:
if area0.similarSize(area, self.HARDCODED_similarSizeScale) and area0.overlap(area):
area0.merge(area)
matched = True
break
# if didnt find any matches then append it onto array0
if matched == False:
array0.append(area)
return array0
def HELPER_findEyesCounterClockwiseAngle(self, angle, scaleFactor, minNeighbors):
"""
Find the (non-paired) eyes in the counter clockwise rotated image. Merge the duplicates and return them in an array
:param angle: the angle by which the image is rotated by counter clockwise
:param scaleFactor: detectMultiscale parameter
:param minNeighbors: detectMultiscale parameter
:return an array of detected eyes in the counter clockwise rotated image
"""
(detectedEyes, rotatedCenter) = self.HELPER_runHaarDetectionCounterClockwiseAngle(self.haarcascade_eye, self.HARDCODED_eyeMinDimensions, self.HARDCODED_eyeMaxDimensions, angle, scaleFactor, minNeighbors)
eyes = self.HELPER_mergeDetectedObjs(self.HELPER_standardizeCounterClockwiseDetectedArea(detectedEyes, rotatedCenter, angle))
return eyes
def findPairsOfEyesCounterClockwiseAngle(self, angle, scaleFactor, minNeighbors):
"""
Find the pairs of eyes in the counter clockwise rotated image. Return them in an array containing 2-tuple with the
first element being the left-most eye and the second element the right-most eye.
:param angle: the angle by which the image is rotated by counter clockwise
:param scaleFactor: detectMultiscale parameter
:param minNeighbors: detectMultiscale parameter
:return an array of pairs of eyes
"""
eyes = self.HELPER_findEyesCounterClockwiseAngle(angle, scaleFactor, minNeighbors)
pairOfEyes = []
for i in range(len(eyes) - 1):
for j in range(i,len(eyes)):
if eyes[i].similarSize(eyes[j], self.HARDCODED_similarSizeScale):
dist = eyes[i].center.distTo(eyes[j].center)
averageRadius = (eyes[1].radius + eyes[j].radius)/2
if dist < self.HARDCODED_pairOfEyesDistanceRange[1] * averageRadius and dist > self.HARDCODED_pairOfEyesDistanceRange[0] * averageRadius:
# Let the left most eye be the first eye. This is for calculating relative angle of the face in findFacesUsingPairOfEyes method
if eyes[i].center.x < eyes[j].center.x:
pairOfEyes.append((eyes[i], eyes[j]))
else:
pairOfEyes.append((eyes[j], eyes[i]))
return pairOfEyes
def findPairsOfEyesCounterClockwiseMultipleAngles(self, angles, scaleFactor, minNeighbors):
"""
Find the pairs of eyes in the counter clockwise rotated images. Return them in an array containing 2-tuple with the
first element being the left-most eye and the second element the right-most eye.
:param angles: the angles by which the image is rotated by counter clockwise
:param scaleFactor: detectMultiscale parameter
:param minNeighbors: detectMultiscale parameter
:return an array of pairs of eyes
"""
eyes = []
for angle in angles:
eyes.append(self.HELPER_findEyesCounterClockwiseAngle(angle, scaleFactor, minNeighbors))
eyes = self.HELPER_mergeDetectedObjs(eyes)
pairOfEyes = []
for i in range(len(eyes) - 1):
for j in range(i,len(eyes)):
if eyes[i].similarSize(eyes[j], self.HARDCODED_similarSizeScale):
if eyes[i].appropriateDistanceTo(eyes[j], self.HARDCODED_pairOfEyesDistanceRange[0], self.HARDCODED_pairOfEyesDistanceRange[1]):
# Let the left most eye be the first eye. This is for calculating relative angle of the face in findFacesUsingPairOfEyes method
if eyes[i].center.x < eyes[j].center.x:
pairOfEyes.append((eyes[i], eyes[j]))
else:
pairOfEyes.append((eyes[j], eyes[i]))
return pairOfEyes
def DEBUG_findFacesUsingPairOfEyes(self, pairOfEyes, scaleFactor, minNeighbors):
"""
Using given pairs of eyes, for each pair find the angle the face is leaning, crop the area the face could be out and run haarDetection on that area.
Return an array of all detectedAreas encapsulating faces.
:param pairOfEyes: 2-tuple (left eye, right eye)
:param scaleFactor: parameter for detectMultiScale
:param minNeighbors: parameter for detectedMultiScale
return array of all detected faces
"""
# For right now, let face width be 6 average radius, and height be 10 average radius with 5 average radiuses from 2 eyes center to the top
# 5 average radiuses from 2 eyes center to the chin
debugArrayFaces = []
debugPotentialFaceNumber = 1
for pair in pairOfEyes:
leftEye = pair[0]
rightEye = pair[1]
eyeAverageRadius = (leftEye.radius + rightEye.radius)/2
# halfFaceDimensions store the distance from faceOrigin to the left border, to the upper border, and to the lower border
# faceMinRadius is the min radius of the rectangle encapsulating the detected Face
halfFaceDimensions = (eyeAverageRadius * 4, eyeAverageRadius * 5, eyeAverageRadius * 5)
faceMinRadius = eyeAverageRadius * 3
# relative angle is the relative angle of the right eye to the left eye, but I limit the ranges from 270 -> 0 -> 90 degree because faces usually aren't up side down,
# still this function takes care of that case also (scan it upside down when find no face)
relativeCounterClockwiseAngle = (leftEye.center.relativeCounterClockwiseAngle(rightEye.center) + 90) % 180 - 90
originalImageCenter = Point(self.grayImage.shape[1]/2, self.grayImage.shape[0]/2)
faceOrigin = Point((leftEye.center.x + rightEye.center.x)/2,(leftEye.center.y + rightEye.center.y)/2)
# Rotate the face origin point and the image such that the face is straightened up
rotatedGrayImage = rotateClockwise(self.grayImage, relativeCounterClockwiseAngle)
rotatedImageCenter = Point(rotatedGrayImage.shape[1]/2, rotatedGrayImage.shape[0]/2)
rotatedFaceOrigin = faceOrigin.rotatePointClockwise(originalImageCenter, relativeCounterClockwiseAngle)
rotatedFaceOrigin = rotatedFaceOrigin.projectPoint(originalImageCenter, rotatedImageCenter)
# DEBUG step --- DEBUG step --- DEBUG step --- DEBUG step --- DEBUG step --- DEBUG step --- DEBUG step --- DEBUG step ---
debugOriginalImage = rotateClockwise(self.image, 0)
leftEye.draw(debugOriginalImage, (255,0,255), 2)
rightEye.draw(debugOriginalImage, (0,255,255), 2)
cv.circle(debugOriginalImage, faceOrigin.exportCoordinates(), 0, (0, 255, 0), 20)
debugRotatedOriginalImage = rotateClockwise(debugOriginalImage, relativeCounterClockwiseAngle)
cv.circle(debugRotatedOriginalImage, rotatedFaceOrigin.exportCoordinates(), 0, (255, 255, 255), 12)
# DEBUG step --- DEBUG step --- DEBUG step --- DEBUG step --- DEBUG step --- DEBUG step --- DEBUG step --- DEBUG step ---
# Crop the potential face out
cropRangeMinY = int(max(0, rotatedFaceOrigin.y - halfFaceDimensions[1]))
cropRangeMaxY = int(min(rotatedGrayImage.shape[0], rotatedFaceOrigin.y + halfFaceDimensions[2]))
cropRangeMinX = int(max(0, rotatedFaceOrigin.x - halfFaceDimensions[0]))
cropRangeMaxX = int(min(rotatedGrayImage.shape[1], rotatedFaceOrigin.x + halfFaceDimensions[0]))
try:
rotatedCrop = np.zeros((cropRangeMaxY - cropRangeMinY, cropRangeMaxX - cropRangeMinX), dtype='uint8')
rotatedCrop[0: rotatedCrop.shape[0], 0: rotatedCrop.shape[1]] = \
rotatedGrayImage[cropRangeMinY:cropRangeMaxY, cropRangeMinX:cropRangeMaxX]
except:
rotatedCrop = np.zeros((cropRangeMaxY - cropRangeMinY, cropRangeMaxX - cropRangeMinX, 3), dtype ='uint8')
rotatedCrop[0: rotatedCrop.shape[0], 0: rotatedCrop.shape[1]] = \
rotatedGrayImage[cropRangeMinY:cropRangeMaxY, cropRangeMinX:cropRangeMaxX]
try:
debugRotatedCrop = np.zeros((cropRangeMaxY - cropRangeMinY, cropRangeMaxX - cropRangeMinX), dtype ='uint8')
debugRotatedCrop[0: debugRotatedCrop.shape[0], 0: debugRotatedCrop.shape[1]] = debugRotatedOriginalImage[cropRangeMinY:cropRangeMaxY, cropRangeMinX:cropRangeMaxX]
except:
debugRotatedCrop = np.zeros((cropRangeMaxY - cropRangeMinY, cropRangeMaxX - cropRangeMinX, 3), dtype ='uint8')
debugRotatedCrop[0: debugRotatedCrop.shape[0], 0: debugRotatedCrop.shape[1]] = debugRotatedOriginalImage[cropRangeMinY:cropRangeMaxY, cropRangeMinX:cropRangeMaxX]
rotatedFaceCenter = Point((cropRangeMinX + cropRangeMaxX)/2, (cropRangeMinY + cropRangeMaxY)/2)
croppedCenter = Point(rotatedCrop.shape[1]/2, rotatedCrop.shape[0]/2)
# find the face in the cropped Area
detectedFaces = self.haarcascade_face.detectMultiScale(rotatedCrop, scaleFactor, minNeighbors, minSize = self.HARDCODED_faceMinDimensions, maxSize = self.HARDCODED_faceMaxDimensions)
# DEBUGING AREA
debugRotatedCrop = cv.circle(debugRotatedCrop, croppedCenter.exportCoordinates(), 0, (255, 255, 255), 6)
# DEBUGING AREA
boolUpSideDown = False
if len(detectedFaces) == 0:
# Scan the image upside down in case of upside down faces
rotatedCrop = rotateClockwise(rotatedCrop, 180)
detectedFaces = self.haarcascade_face.detectMultiScale(rotatedCrop, scaleFactor, minNeighbors, minSize = self.HARDCODED_faceMinDimensions, maxSize = self.HARDCODED_faceMaxDimensions)
if len(detectedFaces) == 0:
continue
boolUpSideDown = True
# merge smaller faces to the biggest face
biggestFace = detectedFaces[0]
for i in range(1, len(detectedFaces)):
# Doesn't have to be too complicated, if one dimension is larger the other is 99% of the time larger as well
if detectedFaces[i][2] > biggestFace[2]:
biggestFace = detectedFaces[i]
# if face's radius is too small then its not a face (face right now is a 4-tuple (x, y, w, h))
if biggestFace[2] ** 2 + biggestFace[3] ** 2 < (faceMinRadius * 2) ** 2:
if boolUpSideDown:
continue
# Scan the image upside down in case of upside down faces
rotatedCrop = rotateClockwise(rotatedCrop, 180)
detectedFaces = self.haarcascade_face.detectMultiScale(rotatedCrop, scaleFactor, minNeighbors, minSize = self.HARDCODED_faceMinDimensions, maxSize = self.HARDCODED_faceMaxDimensions)
if len(detectedFaces) == 0:
continue
boolUpSideDown = True
# merge smaller faces to the biggest face
biggestFace = detectedFaces[0]
for i in range(1, len(detectedFaces)):
# if one dimension is larger the other is 99% of the time larger as well
if detectedFaces[i][2] > biggestFace[2]:
biggestFace = detectedFaces[i]
if biggestFace[2] ** 2 + biggestFace[3] ** 2 < (faceMinRadius * 2) ** 2:
continue
# if the face found was upside down, then face angle = eye's relative angle + 180
if boolUpSideDown:
counterClockwiseFaceAngle = relativeCounterClockwiseAngle + 180
# if not then = eye's relative angle
else:
counterClockwiseFaceAngle = relativeCounterClockwiseAngle
biggestFace = DetectedFace((biggestFace[0],biggestFace[1]), (biggestFace[2], biggestFace[3]), counterClockwiseFaceAngle)
if boolUpSideDown:
biggestFace.rotateAreaClockwise(croppedCenter, 180)
# Convert biggestFace coordinates from being in the cropped image to the original image
biggestFace.draw(debugRotatedCrop, (0, 0, 255), 2)
cv.imshow("Potential face " + str(debugPotentialFaceNumber), resizeMinTo(debugRotatedCrop, 250))
debugArrayFaces.append((biggestFace.copy(), rotatedFaceCenter, croppedCenter, rotatedImageCenter, relativeCounterClockwiseAngle, originalImageCenter))
debugPotentialFaceNumber = debugPotentialFaceNumber + 1
return debugArrayFaces
def findFacesUsingPairOfEyes(self, pairOfEyes, scaleFactor, minNeighbors):
"""
Using given pairs of eyes, for each pair of similar-size eyes, find the angle the face is leaning, crop the area the face could be out and run haarDetection on that area.
Return an array of all detectedAreas encapsulating faces.
:param pairOfEyes: 2-tuple (left eye, right eye)
:param scaleFactor: parameter for detectMultiScale
:param minNeighbors: parameter for detectedMultiScale
return array of all detected faces
"""
faces = []
# For right now, let face width be 6 average radius, and height be 10 average radius with 5 average radiuses from 2 eyes center to the top
# 5 average radiuses from 2 eyes center to the chin. I'm going to make the height ratio 3 top:5 bottom once i added mouth/nose detection for orientation
for pair in pairOfEyes:
leftEye = pair[0]
rightEye = pair[1]
eyeAverageRadius = (leftEye.radius + rightEye.radius)/2
# halfFaceDimensions store the distance from faceOrigin to the left border, to the upper border, and to the lower border
# faceMinRadius is the min radius of the rectangle encapsulating the detected Face
halfFaceDimensions = (eyeAverageRadius * 4, eyeAverageRadius * 5, eyeAverageRadius * 5)
faceMinRadius = eyeAverageRadius * 3
# relative angle is the relative angle of the right eye to the left eye, but I limit the ranges from 270 -> 0 -> 90 degree because faces usually aren't up side down,
# still this function takes care of that case also (scan it upside down when find no face)
relativeCounterClockwiseAngle = (leftEye.center.relativeCounterClockwiseAngle(rightEye.center) + 90) % 180 - 90
originalImageCenter = Point(self.grayImage.shape[1]/2, self.grayImage.shape[0]/2)
faceOrigin = Point((leftEye.center.x + rightEye.center.x)/2,(leftEye.center.y + rightEye.center.y)/2)
# Rotate the face origin point and the image such that the face is straightened up
rotatedGrayImage = rotateClockwise(self.grayImage, relativeCounterClockwiseAngle)
rotatedImageCenter = Point(rotatedGrayImage.shape[1]/2, rotatedGrayImage.shape[0]/2)
rotatedFaceOrigin = faceOrigin.rotatePointClockwise(originalImageCenter, relativeCounterClockwiseAngle).projectPoint(originalImageCenter, rotatedImageCenter)
# Crop the potential face out
cropRangeMinY = int(max(0, rotatedFaceOrigin.y - halfFaceDimensions[1]))
cropRangeMaxY = int(min(rotatedGrayImage.shape[0], rotatedFaceOrigin.y + halfFaceDimensions[2]))
cropRangeMinX = int(max(0, rotatedFaceOrigin.x - halfFaceDimensions[0]))
cropRangeMaxX = int(min(rotatedGrayImage.shape[1], rotatedFaceOrigin.x + halfFaceDimensions[0]))
try:
rotatedCrop = np.zeros((cropRangeMaxY - cropRangeMinY, cropRangeMaxX - cropRangeMinX), dtype='uint8')
rotatedCrop[0: rotatedCrop.shape[0], 0: rotatedCrop.shape[1]] = \
rotatedGrayImage[cropRangeMinY:cropRangeMaxY, cropRangeMinX:cropRangeMaxX]
except:
rotatedCrop = np.zeros((cropRangeMaxY - cropRangeMinY, cropRangeMaxX - cropRangeMinX, 3), dtype ='uint8')
rotatedCrop[0: rotatedCrop.shape[0], 0: rotatedCrop.shape[1]] = \
rotatedGrayImage[cropRangeMinY:cropRangeMaxY, cropRangeMinX:cropRangeMaxX]
rotatedFaceCenter = Point((cropRangeMinX + cropRangeMaxX)/2, (cropRangeMinY + cropRangeMaxY)/2)
croppedCenter = Point(rotatedCrop.shape[1]/2, rotatedCrop.shape[0]/2)
# find the face in the cropped Area
detectedFaces = self.haarcascade_face.detectMultiScale(rotatedCrop, scaleFactor, minNeighbors, minSize = self.HARDCODED_faceMinDimensions, maxSize = self.HARDCODED_faceMaxDimensions)
boolUpSideDown = False
# if found no faces right side up
if len(detectedFaces) == 0:
# Scan the image upside down
rotatedCrop = rotateClockwise(rotatedCrop, 180)
detectedFaces = self.haarcascade_face.detectMultiScale(rotatedCrop, scaleFactor, minNeighbors, minSize = self.HARDCODED_faceMinDimensions, maxSize = self.HARDCODED_faceMaxDimensions)
if len(detectedFaces) == 0:
continue
boolUpSideDown = True
# merge smaller faces to the biggest face
biggestFace = detectedFaces[0]
for i in range(1, len(detectedFaces)):
# Doesn't have to be too complicated, if one dimension is larger the other is 99% of the time larger as well
if detectedFaces[i][2] > biggestFace[2]:
biggestFace = detectedFaces[i]
# if face's radius is too small then its not a face (face right now is a 4-tuple (x, y, w, h))
if biggestFace[2] ** 2 + biggestFace[3] ** 2 < (faceMinRadius * 2) ** 2:
if boolUpSideDown:
continue
# Scan the image upside down in case of upside down faces
rotatedCrop = rotateClockwise(rotatedCrop, 180)
detectedFaces = self.haarcascade_face.detectMultiScale(rotatedCrop, scaleFactor, minNeighbors, minSize = self.HARDCODED_faceMinDimensions, maxSize = self.HARDCODED_faceMaxDimensions)
if len(detectedFaces) == 0:
continue
boolUpSideDown = True
# merge smaller faces to the biggest face
biggestFace = detectedFaces[0]
for i in range(1, len(detectedFaces)):
# if one dimension is larger the other is 99% of the time larger as well
if detectedFaces[i][2] > biggestFace[2]:
biggestFace = detectedFaces[i]
# if face's radius is too small then its not a face (face right now is a 4-tuple (x, y, w, h))
if biggestFace[2] ** 2 + biggestFace[3] ** 2 < (faceMinRadius * 2) ** 2:
continue
biggestFace = DetectedFace((biggestFace[0],biggestFace[1]), (biggestFace[2], biggestFace[3]))
if boolUpSideDown:
biggestFace.rotateAreaClockwise(croppedCenter, 180)
# Convert biggestFace coordinates from being in the cropped image to the original image
biggestFace.projectArea(croppedCenter, rotatedFaceCenter)
biggestFace.rotateAreaCounterClockwise(rotatedImageCenter, relativeCounterClockwiseAngle)
biggestFace.projectArea(rotatedImageCenter, originalImageCenter)
# if the face found was upside down, then face angle = eye's relative angle + 180, the eyes are swapped
if boolUpSideDown:
counterClockwiseFaceAngle = (relativeCounterClockwiseAngle + 180) % 360
biggestFace.counterClockwiseAngle = counterClockwiseFaceAngle
biggestFace.leftEye = rightEye
biggestFace.rightEye = leftEye
# if not then = eye's relative angle, the eyes are in the same order
else:
counterClockwiseFaceAngle = relativeCounterClockwiseAngle
biggestFace.counterClockwiseAngle = counterClockwiseFaceAngle
biggestFace.leftEye = leftEye
biggestFace.rightEye = rightEye
faces.append(biggestFace)
return faces
def findFacesCounterClockwiseAngle(self, angle, scaleFactor, minNeighbors):
"""
Find faces in the image and return them as detectedArea objects in an array
:param angle: counter clockwise angle by which the image is rotated
:param scaleFactor: paramter for detectMultiScale
:param minNeighbors: parameter for detectMultiScale
:return an array of faces as detectedFace objects
"""
return self.findFacesUsingPairOfEyes(self.findPairsOfEyesCounterClockwiseAngle(angle, scaleFactor, minNeighbors), scaleFactor, minNeighbors)
def findFacesCounterClockwiseMultipleAngles(self, angles, scaleFactor, minNeighbors):
"""
Find faces in the image and return them as detectedArea objects in an array
:param angles: counter clockwise angles by which the image is rotated
:param scaleFactor: paramter for detectMultiScale
:param minNeighbors: parameter for detectMultiScale
:return an array of faces as detectedFace objects
"""
return self.findFacesUsingPairOfEyes(self.findPairsOfEyesCounterClockwiseMultipleAngles(angles, scaleFactor, minNeighbors), scaleFactor, minNeighbors)
|
UTF-8
|
Python
| false
| false
| 30,527
|
py
| 29
|
ImageManager.py
| 28
| 0.649916
| 0.637043
| 0
| 579
| 51.671848
| 211
|
Hironobu-Kawaguchi/atcoder
| 12,000,138,672,810
|
34da6b9ecbae5385119036eccae183f540e605b7
|
503d2f8f5f5f547acb82f7299d86886691966ca5
|
/atcoder/abc078_b.py
|
2b5cae2475d6126894e9953816c4f454b1adb8cb
|
[] |
no_license
|
https://github.com/Hironobu-Kawaguchi/atcoder
|
3fcb649cb920dd837a1ced6713bbb939ecc090a9
|
df4b55cc7d557bf61607ffde8bda8655cf129017
|
refs/heads/master
| 2023-08-21T14:13:13.856604
| 2023-08-12T14:53:03
| 2023-08-12T14:53:03
| 197,216,790
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# https://atcoder.jp/contests/abc078/tasks/abc078_b
X, Y, Z = map(int, input().split())
ans = (X - Z) // (Y + Z)
print(ans)
|
UTF-8
|
Python
| false
| false
| 128
|
py
| 2,068
|
abc078_b.py
| 1,658
| 0.578125
| 0.53125
| 0
| 5
| 24
| 51
|
terrence85561/leetcode
| 11,433,202,979,844
|
509b1c5732198808c2c13ccfa6d3bc274a8af026
|
0a6b735ad72612a4f98993ed6ac9c4ac24f2ef11
|
/python/Search/LC332_findItineryBacktracking.py
|
28727801bba22d69b75a45ef9e6c17eaf7a7beba
|
[] |
no_license
|
https://github.com/terrence85561/leetcode
|
774950b76894b80a6df542ed7a7f82b927f0a5fe
|
68f3ea79b41df1fc5195f07af1b98092f8dea44a
|
refs/heads/master
| 2022-02-11T16:14:08.158961
| 2022-02-02T10:14:01
| 2022-02-02T10:27:15
| 138,051,491
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
class Solution:
def findItineraryHierholzerAlgorithm(self, tickets: List[List[str]]) -> List[str]:
graph = {}
for ticket in tickets:
# O(n)
src = ticket[0]
des = ticket[1]
if src in graph:
graph[src].append(des)
else:
graph[src] = [des]
for key in graph:
# O(nlogn)
graph[key] = sorted(graph[key], reverse=True)
path = []
def dfs(graph, path, src):
# O(n) time, traverse each edge once
if src not in graph:
des_list = None
else:
des_list = graph[src]
while des_list:
nextDes = des_list.pop()
dfs(graph, path, nextDes)
path.append(src)
dfs(graph, path, 'JFK')
return path[::-1]
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
graph = {}
for ticket in tickets:
# O(n)
src = ticket[0]
des = ticket[1]
if src in graph:
graph[src].append(des)
else:
graph[src] = [des]
visit = {}
for key in graph:
# O(nlogn)
visit[key] = [False] * len(graph[key])
graph[key] = sorted(graph[key])
path = ['JFK']
l = len(tickets) + 1
def dfs(graph, visit, l, path, cur):
# O(n!)
if len(path) == l:
return True
if cur not in graph:
return False
for i in range(len(graph[cur])):
des = graph[cur][i]
if visit[cur][i]:
continue
visit[cur][i] = True
path.append(des)
if dfs(graph, visit, l, path, des):
return path
path.pop()
visit[cur][i] = False
return False
if dfs(graph, visit, l, path, 'JFK'):
return path
return None
|
UTF-8
|
Python
| false
| false
| 2,083
|
py
| 193
|
LC332_findItineryBacktracking.py
| 192
| 0.422468
| 0.419587
| 0
| 77
| 26.051948
| 86
|
ahmedsaalah/Mailer_rep
| 9,740,985,833,100
|
02d529611eb93c88a82c7e39c0c50d58312f0247
|
23c3e62a4f44c8257eaaaa176502f450cd08a5f5
|
/mail.py
|
1678f0d3104720715436d6979448cf5c459196f5
|
[] |
no_license
|
https://github.com/ahmedsaalah/Mailer_rep
|
a9180fbf585476e40555b7bb29f0c54ceb920a2b
|
2879454f40dc2860ee9f01ed654aeafbc064bc4b
|
refs/heads/master
| 2021-03-27T06:23:33.934085
| 2017-12-09T16:09:31
| 2017-12-09T16:09:31
| 105,224,376
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask
from flask_mail import Mail, Message
from flask import Flask, render_template, request, redirect, jsonify, url_for, flash
import hashlib
from usermodel import usermodel
from flask import session as login_session
import requests
import json
app =Flask(__name__)
mail=Mail(app)
app.secret_key = 'some_secret'
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = 'eng.ahmad.abobakr@gmail.com'
app.config['MAIL_PASSWORD'] = 'kramara1234'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
''' Views '''
@app.route('/')
def HomePage():
active = "SendMail"
return render_template('imo.html' ,message="")
@app.route('/send')
def routeToMain():
res = checkToRediect()
if res :
active = "SendMail"
return render_template('SendMail.html' ,active=active ,message="")
else :
return redirect(url_for('HomePage'))
@app.route('/addToList')
def addToList(message = ''):
res = checkToRediect()
if res :
active = "AddtoList"
return render_template('addToList.html' ,active=active ,message="")
else :
return redirect(url_for('HomePage'))
''' end Views '''
''' post request function '''
@app.route("/login", methods=['POST','GET'])
def login():
email = request.form["email"]
password = request.form["pass"]
user = usermodel()
response = user.getifExist(email,haaash(password))
if response :
login_session['state'] = email
return redirect(url_for('routeToMain'))
else :
message = alertFunc("the email address and password you entered not matched")
return render_template('signin.html' ,message= message)
@app.route("/logout", methods=['POST','GET'])
def logout():
del login_session['state']
return redirect(url_for('HomePage'))
@app.route("/SendMAIL", methods=['POST','GET'])
def sendmail():
msg = request.form["msgpost"]
print(msg)
return render_template('imo.html' ,message=msg ,msg=msg)
# return redirect(url_for('routeToMain'))
@app.route("/add", methods=['POST','GET'])
def add_list_member():
email = request.form["email"]
r = requests.post(
"https://api.mailgun.net/v3/lists/best@bestofferz.top/members",
auth=('api', 'key-9b0f9477e53b446adf19f1371d809aef'),
data={'subscribed': True,
'address': email
})
login_session['message'] = "Email added successfully to list"
return redirect(url_for('addToList'))
''' end post request function '''
''' main function '''
@app.route("/complex_Send")
def send_email(mailContent, subject):
MAILGUN_API_KEY = "key-8cafc2ea8c8b2b14a329173afe11c325"
to = 'ahmode2003@gmail.com'
f ="mr.Younis <postmaster@bestofferz.top>"
url = "https://api.mailgun.net/v3/sandbox727fec112f48401bacea21224c2846cd.mailgun.org/messages"
auth = ('api', MAILGUN_API_KEY)
data = {
'from': f,
'to': to,
'subject': subject,
'text': 'Plaintext content',
'html': mailContent
}
response = requests.post(url, auth=auth, data=data)
return response.text
''' end main function '''
def alertFunc(Message):
return "<script>alert('"+Message+"')</script>"
def haaash(w):
h = hashlib.sha256(w)
return h.digest().encode('base64')[:6]
def checkToRediect():
try:
if login_session['state'] == "younesidbs@gmail.com" :
return True
else :
return False
except KeyError:
return False
@app.template_filter('ses')
def _ses(string=""):
try:
if login_session['message'] == "" :
pass
elif login_session['message'] :
string = login_session['message']
string =alertFunc(string)
del login_session['message']
except KeyError:
pass
return string
''' end'''
if __name__ == '__main__':
app.run(debug = True)
|
UTF-8
|
Python
| false
| false
| 4,160
|
py
| 7
|
mail.py
| 2
| 0.598317
| 0.579808
| 0
| 205
| 19.297561
| 99
|
ddnimara/KTH_Projects
| 17,308,718,222,320
|
f135969f841bff0aab511d2fbf7c7b3dc1653eca
|
d99c5413d2b8aed6aaa06bd95cc6e568b6ea19c2
|
/Artificial Neural Networks/DD2437_Artificial_Neural_Network_Lab_3/3_1.py
|
02c867968766758476301967dd782a26478586fb
|
[] |
no_license
|
https://github.com/ddnimara/KTH_Projects
|
661002c1ed579abb0d48b22f98aa1f6ccd637811
|
79d6448729edce3e7a8b830459a6e5d22730f51c
|
refs/heads/master
| 2020-09-15T15:56:26.362672
| 2020-07-18T11:26:03
| 2020-07-18T11:26:03
| 223,495,272
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import load_data as ld
import functions as fun
training = ld.get_data('Training')
testing = ld.get_data('Testing')
biz_test = ld.get_data('Dissimilar')
weights = fun.fit(training)
for i in range(testing.shape[0]):
print('initial', testing[i])
test_output = fun.predict(weights,testing[i],100)
print('target', training[i])
print('ouput',test_output)
print('-------------')
results = fun.number_of_attractors(weights)
print('attractors',np.unique(results,axis=0))
print('training', training)
print("number of attractors ", np.unique(results,axis=0).shape[0])
print('initial bizzare', biz_test)
biz_out=fun.predict(weights,biz_test,100)
print('final bizzare',biz_out)
|
UTF-8
|
Python
| false
| false
| 746
|
py
| 36
|
3_1.py
| 24
| 0.66622
| 0.652815
| 0
| 32
| 21.375
| 66
|
zhyordanova/Python-Basics
| 128,849,057,249
|
ad5004e65404286d3ed55c5803ae756ed161fe96
|
cd329424035732bf9041c9dfde1f277f8f4b09d6
|
/01-First-Steps-in-Programming/Exercise/08_fish_tank.py
|
4627fafcb6be107989a2945bbbfc10716e56b9d0
|
[] |
no_license
|
https://github.com/zhyordanova/Python-Basics
|
3e90e5bd8d78cdd7b36bfdb348c6b346813293b1
|
d6b5e4737e67b26383a6b67606f9dc671f50c355
|
refs/heads/main
| 2023-03-20T06:15:55.358694
| 2021-03-15T07:52:31
| 2021-03-15T07:52:31
| 339,508,784
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# Един литър вода се равнява на един кубичен дециметър/ 1л=1 дм3/.
length = int(input())
width = int(input())
height = int(input())
persent_occupied_volume = float(input())
volume_aquarium = length * width * height
total_litters = volume_aquarium * 0.001
persent_occupied = persent_occupied_volume * 0.01
litters = total_litters * (1 - persent_occupied)
print(litters)
|
UTF-8
|
Python
| false
| false
| 420
|
py
| 116
|
08_fish_tank.py
| 115
| 0.72118
| 0.691689
| 0
| 13
| 27.692308
| 66
|
ver0nika4ka/PythonCrashCourse
| 11,639,361,413,756
|
9097298c19fa69606a713f339a0b845a7c72d621
|
304926837d94f37ef33c46b8f3c71ecfac4690e8
|
/name_cases.py
|
c0bea0fe87515cea21cdbcf88aa060164c5dcb90
|
[] |
no_license
|
https://github.com/ver0nika4ka/PythonCrashCourse
|
1015d207d9da1b0f9efaee3acc502d2757880f33
|
6bde3b716deb86d022da5cb478c0a95505fe5acc
|
refs/heads/master
| 2021-07-12T17:24:16.478133
| 2021-06-17T03:27:24
| 2021-06-17T03:27:24
| 246,993,773
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
variable = 'Victor'
print(f"Hello {variable}, would you like to learn some Python today?")
|
UTF-8
|
Python
| false
| false
| 90
|
py
| 126
|
name_cases.py
| 112
| 0.744444
| 0.744444
| 0
| 2
| 44.5
| 70
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.