repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
modoboa/modoboa
| 1,340,029,830,674
|
5e06f0bd7d7002eabb07fc5d2b364eed0e0fe935
|
bbf0c3294b8aac4cbabf255c2904556144c85610
|
/modoboa/lib/imap_utf7.py
|
c40348ab8827643f9ab1e89d4724411f535c4540
|
[
"ISC"
] |
permissive
|
https://github.com/modoboa/modoboa
|
4a170fabcb15b892fe627795b02a02d4c16783d6
|
df699aab0799ec1725b6b89be38e56285821c889
|
refs/heads/master
| 2023-08-30T12:58:51.313642
| 2023-08-29T13:22:14
| 2023-08-29T13:22:14
| 9,469,271
| 2,201
| 370
|
ISC
| false
| 2023-09-13T12:47:28
| 2013-04-16T09:43:55
| 2023-09-13T04:53:03
| 2023-09-13T12:47:28
| 28,656
| 2,665
| 361
| 79
|
Python
| false
| false
|
# -*- coding: iso-8859-1 -*-
"""
Imap folder names are encoded using a special version of utf-7 as
defined in RFC 2060 section 5.1.3.
5.1.3. Mailbox International Naming Convention
By convention, international mailbox names are specified using a
modified version of the UTF-7 encoding described in [UTF-7]. The
purpose of these modifications is to correct the following problems
with UTF-7:
1) UTF-7 uses the "+" character for shifting; this conflicts with
the common use of "+" in mailbox names, in particular USENET
newsgroup names.
2) UTF-7's encoding is BASE64 which uses the "/" character; this
conflicts with the use of "/" as a popular hierarchy delimiter.
3) UTF-7 prohibits the unencoded usage of "\"; this conflicts with
the use of "\" as a popular hierarchy delimiter.
4) UTF-7 prohibits the unencoded usage of "~"; this conflicts with
the use of "~" in some servers as a home directory indicator.
5) UTF-7 permits multiple alternate forms to represent the same
string; in particular, printable US-ASCII chararacters can be
represented in encoded form.
In modified UTF-7, printable US-ASCII characters except for "&"
represent themselves; that is, characters with octet values 0x20-0x25
and 0x27-0x7e. The character "&" (0x26) is represented by the two-
octet sequence "&-".
All other characters (octet values 0x00-0x1f, 0x7f-0xff, and all
Unicode 16-bit octets) are represented in modified BASE64, with a
further modification from [UTF-7] that "," is used instead of "/".
Modified BASE64 MUST NOT be used to represent any printing US-ASCII
character which can represent itself.
"&" is used to shift to modified BASE64 and "-" to shift back to US-
ASCII. All names start in US-ASCII, and MUST end in US-ASCII (that
is, a name that ends with a Unicode 16-bit octet MUST end with a "-
").
For example, here is a mailbox name which mixes English, Japanese,
and Chinese text: ~peter/mail/&ZeVnLIqe-/&U,BTFw-
Found here:
http://svn.plone.org/svn/collective/mxmImapClient/trunk/imapUTF7.py
"""
import codecs
# encoding
PRINTABLE = set(range(0x20, 0x26)) | set(range(0x27, 0x7f))
def modified_utf7(s):
s_utf7 = s.encode("utf-7")
return s_utf7[1:-1].replace(b"/", b",")
def doB64(_in, r): # NOQA:N802
if _in:
r.extend([b"&", modified_utf7("".join(_in)), b"-"])
del _in[:]
def encoder(s, *args, **kwargs):
r = []
_in = []
for c in s:
if ord(c) in PRINTABLE:
doB64(_in, r)
r.append(c.encode())
elif c == "&":
doB64(_in, r)
r.append(b"&-")
else:
_in.append(c)
doB64(_in, r)
return (b"".join(r), len(s))
# decoding
def modified_unutf7(s):
s_utf7 = b"+" + s.replace(b",", b"/") + b"-"
return s_utf7.decode("utf-7")
def decoder(s, *args, **kwargs):
r = []
decoded = bytearray()
for c in s:
if c == ord("&") and not decoded:
decoded.append(ord("&"))
elif c == ord("-") and decoded:
if len(decoded) == 1:
r.append("&")
else:
r.append(modified_unutf7(decoded[1:]))
decoded = bytearray()
elif decoded:
decoded.append(c)
else:
r.append(chr(c))
if decoded:
r.append(modified_unutf7(decoded[1:]))
bin_str = "".join(r)
return (bin_str, len(s))
class StreamReader(codecs.StreamReader):
def decode(self, s, errors="strict"):
return decoder(s)
class StreamWriter(codecs.StreamWriter):
def decode(self, s, errors="strict"):
return encoder(s)
def imap4_utf_7(name):
if name == "imap4-utf-7" or name == "imap4_utf_7":
return (encoder, decoder, StreamReader, StreamWriter)
codecs.register(imap4_utf_7)
# testing methods
def imapUTF7Encode(ust): # NOQA:N802
"Returns imap utf-7 encoded version of string"
return ust.encode("imap4-utf-7")
def imapUTF7EncodeSequence(seq): # NOQA:N802
"Returns imap utf-7 encoded version of strings in sequence"
return [imapUTF7Encode(itm) for itm in seq]
def imapUTF7Decode(st): # NOQA:N802
"Returns utf7 encoded version of imap utf-7 string"
return st.decode("imap4-utf-7")
def imapUTF7DecodeSequence(seq): # NOQA:N802
"Returns utf7 encoded version of imap utf-7 strings in sequence"
return [imapUTF7Decode(itm) for itm in seq]
def utf8Decode(st): # NOQA:N802
"Returns utf7 encoded version of imap utf-7 string"
return st.decode("utf-8")
def utf7SequenceToUTF8(seq): # NOQA:N802
"Returns utf7 encoded version of imap utf-7 strings in sequence"
return [itm.decode("imap4-utf-7").encode("utf-8") for itm in seq]
__all__ = ["imapUTF7Encode", "imapUTF7Decode", ]
if __name__ == "__main__":
# print 'bøx'.encode('imap4-utf-7')
# print 'expected b&APg-x'
# print 'båx'.encode('imap4-utf-7')
# print 'expected b&AOU-x'
print("#######")
print("bøx")
e = imapUTF7Encode("bøx")
print(e)
print(imapUTF7Decode(e).encode("latin-1"))
print("#######")
print("båx")
e = imapUTF7Encode("båx")
print(e)
print(imapUTF7Decode(e).encode("latin-1"))
print("#######")
print("~/bågø")
e = imapUTF7Encode("~/bågø")
print(e)
print(imapUTF7Decode(e).encode("latin-1"))
print("#######")
print("Ting & Såger")
e = imapUTF7Encode("Ting & Såger")
print(e)
print(imapUTF7Decode(e).encode("latin-1"))
# e = imapUTF7Decode('b&AOU-x')
# print e.encode('latin-1')
# e = imapUTF7Decode('b&APg-x')
# print e.encode('latin-1')
print("#######")
print("~/Følder/mailbåx & stuff + more")
n = "~/Følder/mailbåx & stuff + more"
e = imapUTF7Encode(n)
print(e)
print(imapUTF7Decode(e).encode("latin-1"))
print("#######")
print("~peter/mail/&ZeVnLIqe-/&U,BTFw-")
print(imapUTF7Decode("~peter/mail/&ZeVnLIqe-/&U,BTFw-").encode("utf-8"))
|
UTF-8
|
Python
| false
| false
| 6,088
|
py
| 807
|
imap_utf7.py
| 478
| 0.61693
| 0.586627
| 0
| 217
| 26.981567
| 76
|
iamsureshtumu/CRUD-CBV-school-students
| 2,705,829,412,641
|
686c3bc567dedc916d7c9e3459869437e34f95f2
|
542b26197f7a5abe46bd2df594aadc6cafb62a82
|
/CBV_DEMO/urls.py
|
2cbaba4b961a9102de59b2287ecea403d1cffbcf
|
[] |
no_license
|
https://github.com/iamsureshtumu/CRUD-CBV-school-students
|
c57f83e5366cc7f32491ea069a01070bb6be3cb5
|
6fdbdd1888e1317ec8abcc94b8dd0cf7d9761bd4
|
refs/heads/main
| 2023-05-28T23:33:56.603855
| 2021-06-08T18:00:38
| 2021-06-08T18:00:38
| 324,770,664
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""CBV_DEMO URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from myapp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.base.as_view(),name="homepage"),
path('schools/',views.School_ListView.as_view(),name="school_list"), #check cap's or not
path('schools/<int:pk>',views.School_DetailView.as_view(),name="detail_view"),
path('schools/create/',views.Create_School.as_view(),name="create_school"),
path('schools/update/<int:pk>',views.Update_School.as_view(),name="update_school"),
path('schools/delete/<int:pk>',views.Delete_School.as_view(),name="delete_school"),
path('students/',views.Student_ListView.as_view(),name="student_list"), # check cap's or not
path('students/<int:pk>',views.Student_DetailView.as_view(),name="studentdetail_view"), #carefull! dont give same names for url. because it will override. better divide the names.
path('students/create/',views.Create_Student.as_view(),name="create_student"),
path('students/update/<int:pk>',views.Update_Student.as_view(),name="update_student"),
path('students/delete/<int:pk>',views.Delete_Student.as_view(),name="delete_student"),
]
|
UTF-8
|
Python
| false
| false
| 1,810
|
py
| 5
|
urls.py
| 2
| 0.701657
| 0.697238
| 0
| 36
| 49.277778
| 183
|
Jimmy-INL/google-research
| 1,778,116,460,777
|
218a90a306b0b1c5be4d39718e05d556dd976164
|
e10a6d844a286db26ef56469e31dc8488a8c6f0e
|
/disarm/categorical/experiment_launcher.py
|
bb16f3cdc23244dc7b84954340768b710cd43fa8
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
https://github.com/Jimmy-INL/google-research
|
54ad5551f97977f01297abddbfc8a99a7900b791
|
5573d9c5822f4e866b6692769963ae819cb3f10d
|
refs/heads/master
| 2023-04-07T19:43:54.483068
| 2023-03-24T16:27:28
| 2023-03-24T16:32:17
| 282,682,170
| 1
| 0
|
Apache-2.0
| true
| 2020-07-26T15:50:32
| 2020-07-26T15:50:31
| 2020-07-26T13:45:43
| 2020-07-24T10:45:03
| 239,465
| 0
| 0
| 0
| null | false
| false
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python binary for running the coupled estimator experiments."""
import os
from absl import app
from absl import flags
from absl import logging
import dataset
import networks as categorical_networks
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
layers = tf.keras.layers
flags.DEFINE_enum('dataset', 'static_mnist',
['static_mnist', 'dynamic_mnist',
'fashion_mnist', 'omniglot',
'binarized_mnist', 'celeba'],
'Dataset to use.')
flags.DEFINE_float('genmo_lr', 1e-4,
'Learning rate for decoder, Generation network.')
flags.DEFINE_float('infnet_lr', 1e-4,
'Learning rate for encoder, Inference network.')
flags.DEFINE_float('prior_lr', 1e-2,
'Learning rate for prior variables.')
flags.DEFINE_integer('batch_size', 200, 'Training batch size.')
flags.DEFINE_integer('num_steps', int(1e6), 'Number of training steps.')
flags.DEFINE_string('encoder_type', 'nonlinear',
'Choice supported: nonlinear')
flags.DEFINE_enum('grad_type', 'reinforce_loo',
['reinforce_loo', 'arsm', 'ars', 'arsp', 'arsmp', 'disarm'],
'Gradient estimator type.')
flags.DEFINE_string('logdir', '/tmp/logdir',
'Directory for storing logs.')
flags.DEFINE_bool('verbose', False,
'Whether to turn on training result logging.')
flags.DEFINE_integer('repeat_idx', 0,
'Dummy flag to label the experiments in repeats.')
flags.DEFINE_bool('eager', False, 'Enable eager execution.')
flags.DEFINE_bool('bias_check', False,
'Carry out bias check for RELAX and baseline')
flags.DEFINE_bool('demean_input', False,
'Demean for encoder and decoder inputs.')
flags.DEFINE_bool('initialize_with_bias', False,
'Initialize the final layer bias of decoder '
'with dataset mean.')
flags.DEFINE_integer('seed', 1, 'Global random seed.')
flags.DEFINE_list('estimate_grad_basket', [],
'List of gradient estimators to compute in addition '
'for variance.')
flags.DEFINE_integer('num_eval_samples', 100,
'Number of samples for evaluation, default to None, '
'when the num_samples will be used.')
flags.DEFINE_integer('num_train_samples', 1,
'Number of samples for evaluation, default to None, '
'when the num_samples will be used.')
flags.DEFINE_integer('num_categories', 10,
'Number of categories for categorical variables.')
flags.DEFINE_integer('num_variables', 20,
'Number of hidden categorical varibles.')
flags.DEFINE_integer('num_samples', None,
'Number of samples for REINFORCE Baseline.'
'Default to None, when the num_categories will be used.')
flags.DEFINE_bool('stick_breaking', False,
'Use stick breaking augmentation for categorical variables.')
flags.DEFINE_bool('tree_structure', False,
'Use tree structure stick breaking.')
flags.DEFINE_bool('importance_weight', False,
'Use importance weight stick breaking.')
flags.DEFINE_bool('one_hot', False,
'Use one-hot categorical representation.')
flags.DEFINE_bool('debug', False, 'Turn on debugging mode.')
flags.DEFINE_string('logits_order', None,
'The order to sort the logits: [None, abs, ascending, '
'descending].')
flags.DEFINE_float('weight_scale', None,
'Scale of initializer.')
FLAGS = flags.FLAGS
def process_batch_input(input_batch):
if FLAGS.dataset == 'celeba':
return input_batch
else:
input_batch = tf.reshape(input_batch, [tf.shape(input_batch)[0], -1])
input_batch = tf.cast(input_batch, tf.float32)
return input_batch
def initialize_grad_variables(target_variable_list):
return [tf.Variable(tf.zeros(shape=i.shape)) for i in target_variable_list]
def estimate_gradients(input_batch, bvae_model, gradient_type,
stick_breaking=False,
tree_structure=False,
importance_weight=False,
logits_sorting_order=None,
num_samples=None):
"""Estimate gradient for inference and generation networks."""
if num_samples is None:
num_samples = FLAGS.num_samples
if gradient_type == 'reinforce_loo' and stick_breaking:
with tf.GradientTape(persistent=True) as tape:
elbo, _, encoder_logits, _ = bvae_model(
input_batch, num_samples=1,
stick_breaking=True,
tree_structure=tree_structure)
genmo_loss = -1. * tf.reduce_mean(elbo)
learning_signal, encoder_llk = bvae_model.get_layer_grad_estimation(
input_batch,
grad_type=gradient_type,
num_samples=num_samples,
stick_breaking=True,
tree_structure=tree_structure,
logits_sorting_order=logits_sorting_order)
infnet_objective = tf.reduce_sum(
tf.reduce_mean(tf.stop_gradient(-1. * learning_signal) * encoder_llk,
axis=0), # reduce num_samples
axis=0) # reduce batch dims
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_grads = tape.gradient(
infnet_objective,
bvae_model.encoder_vars)
elif gradient_type == 'disarm' and importance_weight:
with tf.GradientTape(persistent=True) as tape:
elbo, _, encoder_logits, _ = bvae_model(
input_batch, num_samples=1,
stick_breaking=True,
tree_structure=tree_structure)
genmo_loss = -1. * tf.reduce_mean(elbo)
learning_signal, encoder_llk_diff = bvae_model.get_layer_grad_estimation(
input_batch,
grad_type=gradient_type,
num_samples=1,
stick_breaking=True,
tree_structure=False,
logits_sorting_order=None,
importance_weighting=True)
infnet_objective = tf.reduce_sum(
tf.reduce_mean(
tf.stop_gradient(-1. * learning_signal) * encoder_llk_diff,
axis=0), # reduce num_samples
axis=0) # reduce batch dims
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_grads = tape.gradient(
infnet_objective,
bvae_model.encoder_vars)
elif gradient_type == 'disarm' and stick_breaking:
with tf.GradientTape(persistent=True) as tape:
elbo = bvae_model(input_batch, num_samples=1,
stick_breaking=True,
tree_structure=tree_structure,
logits_sorting_order=logits_sorting_order)[0]
genmo_loss = -1. * tf.reduce_mean(elbo)
learning_signal, encoder_logits = bvae_model.get_layer_grad_estimation(
input_batch,
grad_type=gradient_type,
num_samples=1, # num_samples,
stick_breaking=True,
tree_structure=tree_structure,
logits_sorting_order=logits_sorting_order)
infnet_objective = tf.reduce_sum(
tf.reduce_mean(
tf.stop_gradient(-1. * learning_signal) * encoder_logits,
axis=0), # reduce num_samples
axis=0) # reduce batch dims
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_grads = tape.gradient(
infnet_objective,
bvae_model.encoder_vars)
elif gradient_type in ['reinforce_loo', 'arsm', 'ars', 'arsp', 'arsmp']:
with tf.GradientTape(persistent=True) as tape:
elbo, _, encoder_logits, _ = bvae_model(input_batch, stick_breaking=False)
genmo_loss = -1. * tf.reduce_mean(elbo)
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_grad_multiplier = -1. * bvae_model.get_layer_grad_estimation(
input_batch,
grad_type=gradient_type,
num_samples=num_samples)
infnet_grads = tape.gradient(
encoder_logits,
bvae_model.encoder_vars,
output_gradients=infnet_grad_multiplier)
del tape
return (genmo_grads, prior_grads, infnet_grads, genmo_loss)
@tf.function
def train_one_step(
train_batch_i,
bvae_model,
genmo_optimizer,
infnet_optimizer,
prior_optimizer,
grad_variable_dict,
grad_sq_variable_dict):
"""Train Discrete VAE for 1 step."""
metrics = {}
input_batch = process_batch_input(train_batch_i)
(genmo_grads, prior_grads, infnet_grads, genmo_loss) = estimate_gradients(
input_batch, bvae_model, FLAGS.grad_type,
stick_breaking=FLAGS.stick_breaking,
tree_structure=FLAGS.tree_structure,
importance_weight=FLAGS.importance_weight,
logits_sorting_order=FLAGS.logits_order)
genmo_vars = bvae_model.decoder_vars
genmo_optimizer.apply_gradients(list(zip(genmo_grads, genmo_vars)))
prior_vars = bvae_model.prior_vars
prior_optimizer.apply_gradients(list(zip(prior_grads, prior_vars)))
infnet_vars = bvae_model.encoder_vars
infnet_optimizer.apply_gradients(list(zip(infnet_grads, infnet_vars)))
batch_size_sq = tf.cast(FLAGS.batch_size * FLAGS.batch_size, tf.float32)
encoder_grad_var = bvae_model.compute_grad_variance(
grad_variable_dict[FLAGS.grad_type],
grad_sq_variable_dict[FLAGS.grad_type],
infnet_grads) / batch_size_sq
variance_dict = {}
if (FLAGS.grad_type == 'reinforce_loo') and FLAGS.estimate_grad_basket:
for method in FLAGS.estimate_grad_basket:
if method == FLAGS.grad_type:
continue # Already computed
if ('disarm' in method) and ('tree' not in method):
main_method, logits_order = method.split('-')
logits_order = None if logits_order == 'null' else logits_order
(_, _, infnet_grads, _) = estimate_gradients(
input_batch, bvae_model, main_method,
stick_breaking=True,
tree_structure=False,
logits_sorting_order=logits_order)
variance_dict[method] = bvae_model.compute_grad_variance(
grad_variable_dict[method],
grad_sq_variable_dict[method],
infnet_grads) / batch_size_sq
return (encoder_grad_var, variance_dict, genmo_loss, metrics)
# @tf.function
def evaluate(model, tf_dataset, max_step=1000, num_eval_samples=None,
stick_breaking=False,
tree_structure=False):
"""Evaluate the model."""
if num_eval_samples:
num_samples = num_eval_samples
elif FLAGS.num_eval_samples:
num_samples = FLAGS.num_eval_samples
else:
num_samples = FLAGS.num_samples
# tf.print('Evaluate with samples: %d.', num_samples)
loss = 0.
n = 0.
for batch in tf_dataset.map(process_batch_input):
if n >= max_step: # used for train_ds, which is a `repeat` dataset.
break
if num_samples > 1:
batch_size = tf.shape(batch)[0]
input_batch = tf.tile(
batch, [num_samples] + [1] * (len(batch.shape)-1))
elbo = tf.reshape(model(input_batch,
stick_breaking=stick_breaking,
tree_structure=tree_structure)[0],
[num_samples, batch_size])
objectives = (tf.reduce_logsumexp(elbo, axis=0, keepdims=False) -
tf.math.log(tf.cast(tf.shape(elbo)[0], tf.float32)))
else:
objectives = model(batch,
stick_breaking=stick_breaking,
tree_structure=tree_structure)[0]
loss -= tf.reduce_mean(objectives)
n += 1.
return loss / n
# @tf.function
def maxprob_histogram(
model, tf_dataset, max_step=1000,
stick_breaking=False,
tree_structure=False):
"""Evaluate the model."""
results = []
n = 0
for batch in tf_dataset.map(process_batch_input):
if n >= max_step: # used for train_ds, which is a `repeat` dataset.
break
encoder_logits = model(
batch,
stick_breaking=stick_breaking,
tree_structure=tree_structure)[2]
max_prob = tf.reshape(
tf.math.reduce_max(
tf.nn.softmax(encoder_logits, axis=-1),
axis=-1),
[-1])
results.append(max_prob)
n += 1
return tf.concat(results, axis=-1)
def run_bias_check(model, batch, target_type, baseline_type):
"""Run bias check."""
tf.print(f'Running a bias check comparing {target_type} and {baseline_type}.')
mu = 0.
s = 0.
for step in range(1, int(1e6) + 1):
diff = run_bias_check_step(
batch,
model,
target_type=target_type,
baseline_type=baseline_type)
prev_mu = mu
mu = mu + (diff - mu) / step
s = s + (diff - mu) * (diff - prev_mu)
if step % 100 == 0:
sigma = tf.math.sqrt(s / step)
z_score = mu / (sigma / tf.math.sqrt(float(step)))
tf.print(step, 'z_score: ', z_score, 'sigma: ', sigma)
@tf.function
def run_bias_check_step(
train_batch_i,
bvae_model,
target_type='local-armpp',
baseline_type='vimco'):
"""Run bias check for 1 batch."""
input_batch = process_batch_input(train_batch_i)
if target_type == 'disarm':
infnet_grads = estimate_gradients(
input_batch, bvae_model, 'disarm',
stick_breaking=True,
tree_structure=FLAGS.tree_structure,
importance_weight=FLAGS.importance_weight,
num_samples=1)[2]
baseline_infnet_grads = estimate_gradients(
input_batch, bvae_model, 'reinforce_loo',
stick_breaking=False,
tree_structure=False,
num_samples=2)[2]
else:
infnet_grads = estimate_gradients(
input_batch, bvae_model, target_type,
stick_breaking=FLAGS.stick_breaking)[2]
baseline_infnet_grads = estimate_gradients(
input_batch, bvae_model, baseline_type,
stick_breaking=False)[2]
diff = tf.concat([tf.reshape(x - y, [-1])
for x, y in zip(infnet_grads, baseline_infnet_grads)],
axis=0)
return tf.reduce_mean(diff)
def main(_):
tf.random.set_seed(FLAGS.seed)
logdir = FLAGS.logdir
if not os.path.exists(logdir):
os.makedirs(logdir)
if FLAGS.eager:
tf.config.experimental_run_functions_eagerly(FLAGS.eager)
genmo_lr = tf.constant(FLAGS.genmo_lr)
infnet_lr = tf.constant(FLAGS.infnet_lr)
prior_lr = tf.constant(FLAGS.prior_lr)
genmo_optimizer = tf.keras.optimizers.Adam(learning_rate=genmo_lr)
infnet_optimizer = tf.keras.optimizers.Adam(learning_rate=infnet_lr)
prior_optimizer = tf.keras.optimizers.SGD(learning_rate=prior_lr)
theta_optimizer = tf.keras.optimizers.Adam(learning_rate=infnet_lr,
beta_1=0.999)
batch_size = FLAGS.batch_size
if FLAGS.dataset == 'celeba':
train_ds, valid_ds, test_ds, train_ds_mean, train_size = (
dataset.get_celeba_batch(batch_size))
num_steps_per_epoch = int(train_size / batch_size)
encoder = categorical_networks.CnnEncoderNetwork(
hidden_size=FLAGS.num_variables,
num_categories=FLAGS.num_categories,
train_mean=train_ds_mean)
decoder = categorical_networks.CnnDecoderNetwork(
train_mean=train_ds_mean)
else:
if FLAGS.dataset == 'static_mnist':
train_ds, valid_ds, test_ds = dataset.get_static_mnist_batch(batch_size)
train_size = 50000
elif FLAGS.dataset == 'dynamic_mnist':
train_ds, valid_ds, test_ds = dataset.get_dynamic_mnist_batch(batch_size)
train_size = 50000
elif FLAGS.dataset == 'fashion_mnist':
train_ds, valid_ds, test_ds = dataset.get_dynamic_mnist_batch(
batch_size, fashion_mnist=True)
train_size = 50000
elif FLAGS.dataset == 'omniglot':
train_ds, valid_ds, test_ds = dataset.get_omniglot_batch(batch_size)
train_size = 23000
elif FLAGS.dataset == 'binarized_mnist':
train_ds, valid_ds, test_ds = dataset.get_binarized_mnist_batch(
batch_size)
train_size = 50000
num_steps_per_epoch = int(train_size / batch_size)
train_ds_mean = dataset.get_mean_from_iterator(
train_ds, dataset_size=train_size, batch_size=batch_size)
if FLAGS.initialize_with_bias:
bias_value = -tf.math.log(
1./tf.clip_by_value(train_ds_mean, 0.001, 0.999) - 1.)
bias_initializer = tf.keras.initializers.Constant(bias_value)
else:
bias_initializer = 'zeros'
if FLAGS.encoder_type == 'nonlinear':
encoder_hidden_sizes = [512, 256, FLAGS.num_variables]
encoder_activations = [layers.LeakyReLU(0.2),
layers.LeakyReLU(0.2),
None]
decoder_hidden_sizes = [256, 512, 784]
decoder_activations = [layers.LeakyReLU(0.2),
layers.LeakyReLU(0.2),
None]
elif FLAGS.encoder_type == 'linear':
encoder_hidden_sizes = [FLAGS.num_variables]
encoder_activations = [None]
decoder_hidden_sizes = [784]
decoder_activations = [None]
else:
raise NotImplementedError
if FLAGS.weight_scale is not None:
kernel_initializer = tf.keras.initializers.VarianceScaling(
scale=FLAGS.weight_scale, seed=FLAGS.seed)
else:
kernel_initializer = 'glorot_uniform'
encoder = categorical_networks.CategoricalNetwork(
encoder_hidden_sizes,
encoder_activations,
num_categories=FLAGS.num_categories,
mean_xs=train_ds_mean,
demean_input=FLAGS.demean_input,
name='bvae_encoder',
kernel_initializer=kernel_initializer)
decoder = categorical_networks.BinaryNetwork(
decoder_hidden_sizes,
decoder_activations,
demean_input=FLAGS.demean_input,
final_layer_bias_initializer=bias_initializer,
name='bvae_decoder',
kernel_initializer=kernel_initializer)
prior_logit = tf.Variable(
tf.zeros([FLAGS.num_variables, FLAGS.num_categories], tf.float32))
if FLAGS.grad_type == 'relax':
control_network = tf.keras.Sequential()
control_network.add(
layers.Dense(137, activation=layers.LeakyReLU(alpha=0.3)))
control_network.add(
layers.Dense(1))
else:
control_network = None
bvae_model = categorical_networks.CategoricalVAE(
encoder,
decoder,
prior_logit,
FLAGS.num_categories,
one_hot_sample=FLAGS.one_hot,
grad_type=FLAGS.grad_type)
if FLAGS.dataset == 'celeba':
bvae_model.build(input_shape=(FLAGS.batch_size, 64, 64, 3))
else:
bvae_model.build(input_shape=(FLAGS.batch_size, 784))
tensorboard_file_writer = tf.summary.create_file_writer(logdir)
# In order to use `tf.train.ExponentialMovingAverage`, one has to
# use `tf.Variable`.
grad_variable_dict = {}
grad_sq_variable_dict = {}
for method in set([FLAGS.grad_type] + FLAGS.estimate_grad_basket):
grad_variable_dict[method] = initialize_grad_variables(
bvae_model.encoder_vars)
grad_sq_variable_dict[method] = initialize_grad_variables(
bvae_model.encoder_vars)
ckpt = tf.train.Checkpoint(
genmo_optimizer=genmo_optimizer,
infnet_optimizer=infnet_optimizer,
theta_optimizer=theta_optimizer,
grad_variable_dict=grad_variable_dict,
grad_sq_variable_dict=grad_sq_variable_dict,
bvae_model=bvae_model)
ckpt_manager = tf.train.CheckpointManager(
ckpt, logdir, max_to_keep=5)
if not FLAGS.debug and ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
logging.info(
'Last checkpoint was restored: %s.', ckpt_manager.latest_checkpoint)
else:
tf.print('No checkpoint to load.')
logging.info('No checkpoint to load.')
start_step = infnet_optimizer.iterations.numpy()
logging.info('Training start from step: %s', start_step)
train_iter = train_ds.__iter__()
for step_i in range(start_step, FLAGS.num_steps):
(encoder_grad_var, variance_dict, genmo_loss, metrics) = train_one_step(
train_iter.next(),
bvae_model,
genmo_optimizer,
infnet_optimizer,
prior_optimizer,
grad_variable_dict,
grad_sq_variable_dict)
train_loss = tf.reduce_mean(genmo_loss)
# Summarize
if step_i % 1000 == 0:
metrics.update({
'train_objective': train_loss,
'eval_metric/train': evaluate(
bvae_model, train_ds,
max_step=num_steps_per_epoch,
num_eval_samples=FLAGS.num_train_samples,
stick_breaking=FLAGS.stick_breaking,
tree_structure=FLAGS.tree_structure),
'eval_metric/valid': evaluate(
bvae_model, valid_ds,
num_eval_samples=FLAGS.num_eval_samples,
stick_breaking=FLAGS.stick_breaking,
tree_structure=FLAGS.tree_structure),
'eval_metric/test': evaluate(
bvae_model, test_ds,
num_eval_samples=FLAGS.num_eval_samples,
stick_breaking=FLAGS.stick_breaking,
tree_structure=FLAGS.tree_structure),
f'var/{FLAGS.grad_type}': encoder_grad_var,
})
if FLAGS.grad_type == 'relax':
if FLAGS.temperature is None:
metrics['relax/temperature'] = tf.math.exp(
bvae_model.log_temperature_variable)
if FLAGS.scaling_factor is None:
metrics['relax/scaling'] = bvae_model.scaling_variable
tf.print(step_i, metrics)
max_prob = maxprob_histogram(
bvae_model, train_ds,
stick_breaking=FLAGS.stick_breaking,
tree_structure=FLAGS.tree_structure)
with tensorboard_file_writer.as_default():
for k, v in metrics.items():
tf.summary.scalar(k, v, step=step_i)
tf.summary.histogram('max_prob', max_prob, step=step_i)
if variance_dict: # if variance_dict == {}
tf.print(variance_dict)
for k, v in variance_dict.items():
tf.summary.scalar(f'var/{k}_minus_{FLAGS.grad_type}',
v - encoder_grad_var, step=step_i)
tf.summary.scalar(f'var/{k}', v, step=step_i)
# Checkpoint
if step_i % 10000 == 0:
ckpt_save_path = ckpt_manager.save()
logging.info('Saving checkpoint for step %d at %s.',
step_i, ckpt_save_path)
if FLAGS.bias_check:
if FLAGS.grad_type == 'reinforce_loo':
baseline_type = 'ars'
else:
baseline_type = 'reinforce_loo'
run_bias_check(bvae_model,
train_iter.next(),
FLAGS.grad_type,
baseline_type)
if __name__ == '__main__':
app.run(main)
|
UTF-8
|
Python
| false
| false
| 23,526
|
py
| 6,251
|
experiment_launcher.py
| 4,585
| 0.627136
| 0.61872
| 0
| 642
| 35.64486
| 80
|
uzdun/CodeableModels
| 13,116,830,169,227
|
d5d229ff26108d77fe90013fc2726ee0f1725de9
|
3ce6edccac106cdc656d50db5010909b0c88b235
|
/samples/shopping_model1.py
|
fee61ba7bfbdc642efbc582e747805a7c6d84db2
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/uzdun/CodeableModels
|
48ef6c84b8aae78ff005e257e23abb5cd8d4fec8
|
b60b1aaa7ffc407b14b7610f43ca8a7dbfee5591
|
refs/heads/master
| 2022-04-26T01:45:49.092396
| 2022-03-24T08:06:46
| 2022-03-24T08:06:46
| 126,474,037
| 13
| 3
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
*File Name:* samples/shopping_model1.py
This is a Codeable Models example realizing a simple sample shopping domain model.
It is inspired by the model at:
`<https://www.uml-diagrams.org/examples/online-shopping-domain-uml-diagram-example.html>`_
The example is explained in :ref:`first_class_model`.
"""
from codeable_models import CClass, CBundle
from plant_uml_renderer import PlantUMLGenerator
from metamodels.domain_metamodel import domain_metaclass
cart = CClass(domain_metaclass, "Cart")
item = CClass(domain_metaclass, "Item", attributes={
"quantity": int,
"price": float
})
product = CClass(domain_metaclass, "Product", attributes={
"id": str,
"name": str,
"price": float
})
cart_item_relation = cart.association(item, "in cart: [cart] 0..1 -> [item in cart] *")
#
# Alternative way to write the cart-item association
# cart.association(item, name="in cart", role_name="item in cart", multiplicity="*",
# source_role_name="cart", source_multiplicity="0..1")
item_product_relation = item.association(product, "product definition: [cart item] * -> [product] 1")
shopping_model = CBundle("shopping_model1", elements=cart.get_connected_elements())
def run():
print("***************** Shopping Model Example 1 *****************")
print('*** Plant UML Generation')
generator = PlantUMLGenerator()
generator.generate_class_models(shopping_model.name, [shopping_model, {}])
print(f"... Generated models in {generator.directory!s}/{shopping_model.name!s}")
if __name__ == "__main__":
run()
|
UTF-8
|
Python
| false
| false
| 1,567
|
py
| 135
|
shopping_model1.py
| 78
| 0.683472
| 0.678366
| 0
| 48
| 31.645833
| 101
|
POA-WHU/POA-spiders
| 17,033,840,306,732
|
d982b81ea569575cb0e828fa015bb8335ef4f091
|
e6e3871f8e0b781e5ae3cbd779a6f124311e0d28
|
/src/config.py
|
3654a98b189348616b8df092e1e16abc0b7bd419
|
[
"MIT"
] |
permissive
|
https://github.com/POA-WHU/POA-spiders
|
afd578fc0ec602ad80be919cfa6dc61f57e0ec3b
|
e61a1620cc0e84e10abfbdc85f22cf8ea85481c2
|
refs/heads/main
| 2023-04-15T02:27:20.464224
| 2021-04-11T11:02:42
| 2021-04-11T11:02:42
| 343,425,763
| 0
| 0
|
MIT
| false
| 2021-03-21T14:54:58
| 2021-03-01T13:27:41
| 2021-03-21T14:41:10
| 2021-03-21T14:54:57
| 92
| 0
| 1
| 0
|
Python
| false
| false
|
from pathlib import Path
project_path = Path(__file__).absolute().parent.parent
class Database:
user = 'root'
password = ''
host = 'localhost'
port = '3306'
database = 'test_db'
server = f'mysql+pymysql://{user}:{password}@{host}:{port}'
class Log:
format = '[%(name)-10s] %(levelname)-8s: %(message)s'
level = 'DEBUG'
user_agents_path = project_path / 'src/base/utilities/user_agents.json'
temp_pdf_path = project_path / 'src/temp.pdf'
|
UTF-8
|
Python
| false
| false
| 478
|
py
| 23
|
config.py
| 21
| 0.627615
| 0.612971
| 0
| 23
| 19.782609
| 71
|
LGamez/Mobike
| 9,070,970,935,108
|
7c00e6f9b4b27d9754a18c8094880575529c739d
|
4aeb8f0cd9bb2ee4d1909b9c856c5b37bdea38db
|
/misperris/migrations/0009_auto_20181028_1506.py
|
6d36544c639ea4eb3c3dfea04099f59b8f0990d0
|
[] |
no_license
|
https://github.com/LGamez/Mobike
|
fb21c845f69b3313d3dc47c429733f8525979014
|
85ea4030a61b8f7490212b212b101d74037f10e9
|
refs/heads/master
| 2020-07-22T01:06:09.546421
| 2019-12-06T01:21:56
| 2019-12-06T01:21:56
| 207,024,708
| 0
| 0
| null | false
| 2019-12-06T01:21:57
| 2019-09-07T20:53:43
| 2019-12-06T01:16:26
| 2019-12-06T01:21:56
| 8,736
| 0
| 0
| 0
|
Python
| false
| false
|
# Generated by Django 2.0.9 on 2018-10-28 18:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('misperris', '0008_auto_20181028_1449'),
]
operations = [
migrations.AlterField(
model_name='mascota',
name='foto',
field=models.FileField(blank=True, null=True, upload_to=''),
),
]
|
UTF-8
|
Python
| false
| false
| 409
|
py
| 39
|
0009_auto_20181028_1506.py
| 26
| 0.589242
| 0.513447
| 0
| 18
| 21.722222
| 72
|
Harish752/harish
| 3,229,815,425,674
|
5202bff75f78a60c740bf0ade5abf573f3f1c9f7
|
bd8e33b3d7cb24d69a75f05b3b1848d8fa290864
|
/fac.py
|
a9f8ff2ca6b2f7c6f6d4fe3c1b046fddd6deb38c
|
[] |
no_license
|
https://github.com/Harish752/harish
|
f590b606dc3ee6a34d288e40aa0018a6b67b2094
|
4db687ad72cd69b59a46ffba70ae1f307ad28f0a
|
refs/heads/master
| 2020-05-23T00:56:02.619517
| 2019-08-30T17:02:19
| 2019-08-30T17:02:19
| 186,579,708
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
ab=int(input())
for i in range(1,ab+1):
if(ab%i==0):
print(i,end=" ")
|
UTF-8
|
Python
| false
| false
| 73
|
py
| 79
|
fac.py
| 78
| 0.547945
| 0.506849
| 0
| 4
| 17.25
| 23
|
sagiraju19/django-sorcery
| 10,599,979,335,205
|
9da4f04ad767fb10708a208110ca6f29701d0f52
|
e32f6e0804e9ed9d5b868523c7762d21d39732eb
|
/tests/db/test_models.py
|
c38cbbd2b45784027ddb84def6f96084ac469a9a
|
[
"MIT"
] |
permissive
|
https://github.com/sagiraju19/django-sorcery
|
736a999d0da34e466216a36a91235bf3cc8c4a10
|
9b9cbecad6bde88943db56d3f63a07d0562eb860
|
refs/heads/master
| 2020-03-25T12:18:44.647427
| 2018-08-06T17:41:50
| 2018-08-06T17:41:50
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django_sorcery.db import models
from django_sorcery.utils import make_args
from ..base import TestCase
from ..models import (
Address,
Business,
CompositePkModel,
ModelOne,
ModelTwo,
Option,
Owner,
Part,
Vehicle,
VehicleType,
db,
)
class TestModels(TestCase):
def test_model_repr(self):
owner = Owner(id=1, first_name="Meaty", last_name="McManPipes")
self.assertEqual(repr(owner), "Owner(id=1, first_name='Meaty', last_name='McManPipes')")
def test_simple_repr(self):
vehicle = Vehicle()
self.assertEqual(models.simple_repr(vehicle), "Vehicle(id=None)")
vehicle.name = "Test"
self.assertEqual(models.simple_repr(vehicle), "Vehicle(id=None, name='Test')")
vehicle.id = 1234
self.assertTrue(models.simple_repr(vehicle), "Vehicle(id=1234, name='Test')")
vehicle.id = "abc"
self.assertTrue(models.simple_repr(vehicle), "Vehicle(id='abc', name='Test')")
vehicle.id = b"abc"
self.assertTrue(models.simple_repr(vehicle), "Vehicle(id='abc', name='Test')")
def test_primary_keys(self):
pks = models.get_primary_keys(Vehicle, {"pk": 1234})
self.assertIsNone(pks)
def test_primary_keys_composite(self):
pks = models.get_primary_keys(CompositePkModel, {"id": 4321, "pk": 1234})
self.assertEqual(pks, (4321, 1234))
def test_primary_keys_composite_missing(self):
pks = models.get_primary_keys(CompositePkModel, {"pk": 1234})
self.assertIsNone(pks)
def test_primary_keys_from_instance(self):
vehicle = Vehicle(id=1234)
pks = models.get_primary_keys_from_instance(vehicle)
self.assertEqual(pks, 1234)
def test_primary_keys_from_instance_composite(self):
vehicle = CompositePkModel(id=1234, pk=4321)
pks = models.get_primary_keys_from_instance(vehicle)
self.assertEqual(pks, {"id": 1234, "pk": 4321})
def test_primary_keys_from_instance_with_none(self):
self.assertIsNone(models.get_primary_keys_from_instance(None))
def test_model_to_dict(self):
vehicle = Vehicle(
id=1,
name="vehicle",
owner=Owner(id=2, first_name="first_name", last_name="last_name"),
is_used=True,
paint="red",
type=VehicleType.car,
options=[Option(id=3, name="option 1"), Option(id=4, name="option 2")],
parts=[Part(id=5, name="part 1"), Part(id=6, name="part 2")],
)
self.assertEqual(
{
"created_at": None,
"is_used": True,
"name": "vehicle",
"options": [3, 4],
"owner": 2,
"paint": "red",
"parts": [5, 6],
"type": VehicleType.car,
},
models.model_to_dict(vehicle),
)
def test_model_to_dict_exclude(self):
vehicle = Vehicle(
id=1,
name="vehicle",
owner=Owner(id=2, first_name="first_name", last_name="last_name"),
is_used=True,
paint="red",
type=VehicleType.car,
options=[Option(id=3, name="option 1"), Option(id=4, name="option 2")],
parts=[Part(id=5, name="part 1"), Part(id=6, name="part 2")],
)
self.assertEqual(
{
"created_at": None,
"is_used": True,
"name": "vehicle",
"options": [3, 4],
"paint": "red",
"parts": [5, 6],
},
models.model_to_dict(vehicle, exclude=["type", "owner"]),
)
def test_model_to_dict_fields(self):
vehicle = Vehicle(
name="vehicle",
owner=Owner(first_name="first_name", last_name="last_name"),
is_used=True,
paint="red",
type=VehicleType.car,
options=[Option(name="option 1"), Option(name="option 2")],
parts=[Part(name="part 1"), Part(name="part 2")],
)
self.assertEqual(
{"is_used": True, "name": "vehicle", "paint": "red"},
models.model_to_dict(vehicle, fields=["name", "is_used", "paint"]),
)
def test_model_to_dict_private_relation(self):
obj = ModelTwo(pk=2, name="two", _model_one=ModelOne(pk=1, name="one"))
self.assertEqual({"name": "two"}, models.model_to_dict(obj))
def test_serialize_none(self):
self.assertIsNone(models.serialize(None))
def test_shallow_serialize(self):
vehicle = Vehicle(owner=Owner(first_name="first_name", last_name="last_name"), type=VehicleType.car)
self.assertDictEqual(
{
"_owner_id": None,
"created_at": None,
"id": None,
"is_used": None,
"name": None,
"paint": None,
"type": VehicleType.car,
},
models.serialize(vehicle),
)
def test_serialize_with_composites(self):
business = Business(
name="test",
location=Address(street="street 1", state="state 1", zip="zip 1"),
other_location=Address(street="street 2", state="state 2", zip="zip 2"),
)
self.assertDictEqual(
{
"id": None,
"name": "test",
"location": {"state": "state 1", "street": "street 1", "zip": "zip 1"},
"other_location": {"state": "state 2", "street": "street 2", "zip": "zip 2"},
},
models.serialize(business),
)
def test_serialize_with_relations(self):
vehicle = Vehicle(
name="vehicle",
owner=Owner(first_name="first_name", last_name="last_name"),
is_used=True,
paint="red",
type=VehicleType.car,
options=[Option(name="option 1"), Option(name="option 2")],
parts=[Part(name="part 1"), Part(name="part 2")],
)
self.assertDictEqual(
{
"_owner_id": None,
"created_at": None,
"id": None,
"is_used": True,
"paint": "red",
"type": VehicleType.car,
"name": "vehicle",
"owner": {"id": None, "first_name": "first_name", "last_name": "last_name"},
"options": [{"id": None, "name": "option 1"}, {"id": None, "name": "option 2"}],
"parts": [{"id": None, "name": "part 1"}, {"id": None, "name": "part 2"}],
},
models.serialize(vehicle, Vehicle.owner, Vehicle.options, Vehicle.parts),
)
class TestClone(TestCase):
def setUp(self):
super(TestClone, self).setUp()
self.vehicle = Vehicle(
name="vehicle",
owner=Owner(first_name="first_name", last_name="last_name"),
is_used=True,
paint="red",
type=VehicleType.car,
options=[Option(name="option 1"), Option(name="option 2")],
parts=[Part(name="part 1"), Part(name="part 2")],
)
db.add(self.vehicle)
db.flush()
db.expire_all()
def test_clone_none(self):
self.assertIsNone(models.clone(None))
def test_shallow_clone(self):
clone = models.clone(self.vehicle)
db.add(clone)
db.flush()
self.assertNotEqual(clone, self.vehicle)
self.assertNotEqual(clone.as_dict(), self.vehicle.as_dict())
self.assertNotEqual(clone.id, self.vehicle.id)
self.assertEqual(clone.name, self.vehicle.name)
# self.assertEqual(models.model_to_dict(clone), models.model_to_dict(self.vehicle))
self.assertIsNone(clone.owner)
self.assertEqual(clone.options, [])
self.assertEqual(clone.parts, [])
def test_clone_with_relation(self):
clone = models.clone(
self.vehicle, Vehicle.owner, paint="blue", options=self.vehicle.options, parts=self.vehicle.parts
)
db.add(clone)
db.flush()
self.assertNotEqual(clone, self.vehicle)
self.assertNotEqual(clone.as_dict(), self.vehicle.as_dict())
self.assertNotEqual(clone.id, self.vehicle.id)
self.assertEqual(clone.paint, "blue")
self.assertEqual(clone.name, self.vehicle.name)
# self.assertNotEqual(models.model_to_dict(clone), models.model_to_dict(self.vehicle))
clone.paint = "red"
# self.assertEqual(models.model_to_dict(clone), models.model_to_dict(self.vehicle))
self.assertNotEqual(clone.owner, self.vehicle.owner)
self.assertNotEqual(clone.owner.as_dict(), self.vehicle.owner.as_dict())
self.assertNotEqual(clone.owner.id, self.vehicle.owner.id)
# self.assertEqual(models.model_to_dict(clone.owner), models.model_to_dict(self.vehicle.owner))
self.assertEqual(clone.options, self.vehicle.options)
self.assertEqual(clone.parts, self.vehicle.parts)
def test_clone_with_composite(self):
business = Business(
name="test",
location=Address(street="street 1", state="state 1", zip="zip 1"),
other_location=Address(street="street 2", state="state 2", zip="zip 2"),
)
clone = models.clone(business)
self.assertNotEqual(clone, business)
self.assertDictEqual(models.model_to_dict(clone), models.model_to_dict(business))
self.assertNotEqual(id(clone.location), id(business.location))
self.assertNotEqual(id(clone.other_location), id(business.other_location))
self.assertEqual(clone.location, business.location)
self.assertEqual(clone.other_location, business.other_location)
def test_clone_with_relation_options(self):
clone = models.clone(self.vehicle, make_args(Vehicle.owner, first_name="test"))
db.add(clone)
db.flush()
self.assertNotEqual(clone, self.vehicle)
self.assertNotEqual(clone.as_dict(), self.vehicle.as_dict())
self.assertNotEqual(clone.id, self.vehicle.id)
# self.assertEqual(models.model_to_dict(clone), models.model_to_dict(self.vehicle))
self.assertNotEqual(clone.owner, self.vehicle.owner)
self.assertNotEqual(clone.owner.as_dict(), self.vehicle.owner.as_dict())
self.assertNotEqual(clone.owner.id, self.vehicle.owner.id)
# self.assertNotEqual(models.model_to_dict(clone.owner), models.model_to_dict(self.vehicle.owner))
self.assertEqual(clone.owner.first_name, "test")
def test_clone_list_relation(self):
clone = models.clone(self.vehicle, Vehicle.options)
db.add(clone)
db.flush()
self.assertNotEqual(clone, self.vehicle)
self.assertNotEqual(clone.as_dict(), self.vehicle.as_dict())
self.assertNotEqual(clone.id, self.vehicle.id)
# self.assertEqual(models.model_to_dict(clone), models.model_to_dict(self.vehicle))
for cloned, orig in zip(clone.options, self.vehicle.options):
self.assertNotEqual(cloned, orig)
self.assertNotEqual(cloned.as_dict(), orig.as_dict())
self.assertNotEqual(cloned.id, orig.id)
# self.assertEqual(models.model_to_dict(cloned), models.model_to_dict(orig))
|
UTF-8
|
Python
| false
| false
| 11,481
|
py
| 12
|
test_models.py
| 10
| 0.571901
| 0.561101
| 0
| 322
| 34.65528
| 109
|
rspeer/dominionstats
| 3,650,722,249,852
|
7646fdc7533384492ec81d02ea24d1fe45716118
|
ea836aaf2e563ae1dd4bbddf1ba57f05dc8b2396
|
/queries.py
|
08599fc884b2617df456214c716a8757cf0499b1
|
[] |
no_license
|
https://github.com/rspeer/dominionstats
|
8a4a2827d6093bcaac8ccdbfc75ae4b4ba1eb713
|
bfc0132aed6827559173511b83e1726e04b92e42
|
refs/heads/master
| 2016-09-06T05:04:25.680582
| 2011-11-26T06:51:47
| 2011-11-26T06:51:47
| 1,540,471
| 9
| 5
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import pymongo
if __name__ == '__main__':
c = pymongo.Connection()
db = c.test
games = db.games
ct = 0
print games.find({'players': 'rrenaud'})
for g in games.find({'players': 'rrenaud'}).min({'_id': 'game-2011'}):
print g['_id']
#print games.find({'turns.plays': ["Fishing Village"]}).count()
#print games.find({'turns': {
# $elemMatch: {'plays': 'Fishing Village'}
# }
# }).count()
|
UTF-8
|
Python
| false
| false
| 491
|
py
| 75
|
queries.py
| 49
| 0.480652
| 0.470468
| 0
| 15
| 30.266667
| 74
|
zhiwei-Feng/Coursera-Machine-Learning-Assignment-Solutions
| 34,359,740,321
|
12a8864bc03de830a4b64737e04f04f5e1542a76
|
f05b5a08981b13dc68fd4429b6b2f819c302a00d
|
/machine-learning-ex3/predictOneVsAll.py
|
1b663c5e25c15b003b34556a513270988b94c4dc
|
[] |
no_license
|
https://github.com/zhiwei-Feng/Coursera-Machine-Learning-Assignment-Solutions
|
a7bbdfe863607c075913f3a6c2910d0e649cd44f
|
11716efcc512c2b9473f585a91c60fc6c88eb393
|
refs/heads/master
| 2020-06-03T12:15:24.805203
| 2019-07-21T03:18:15
| 2019-07-21T03:18:15
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
from sigmoid import sigmoid
def predict_one_vs_all(all_theta, X):
m = X.shape[0]
X = np.c_[np.ones(m), X]
h_theta = sigmoid(X @ all_theta.T)
p = np.argmax(h_theta, axis=1) + 1 # based-0 thus add one
return p
|
UTF-8
|
Python
| false
| false
| 251
|
py
| 73
|
predictOneVsAll.py
| 49
| 0.609562
| 0.593625
| 0
| 11
| 21.818182
| 62
|
chiaminchuang/Leetcode
| 8,452,495,658,187
|
7f465ec1af9edf980e2a077447559ea4602dacc5
|
f17c46839a6309f8c9618ae3406d71a771840816
|
/Easy/217-Contains Duplicate/217.py
|
2bcf8a25dde1f6b3f2a6204beec25bc78792ef17
|
[] |
no_license
|
https://github.com/chiaminchuang/Leetcode
|
a3c9332dadc17961cfaa5ccaa6363b47305787b5
|
8b10d93d804ba19922c04e4b511b9ad71fc24f5b
|
refs/heads/master
| 2021-09-26T08:46:02.583348
| 2018-10-28T09:42:28
| 2018-10-28T09:42:28
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
class Solution(object):
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
nums = self.quickSort(nums)
for i in range(0, len(nums)-1):
if nums[i] == nums[i+1]:
return True
return False
def quickSort(self, nums):
if len(nums) <= 1:
return nums
index = random.randint(0, len(nums)-1)
pivot = nums[index]
less, greater = [], []
for i in range(0, len(nums)):
if nums[i] <= pivot and i != index:
less.append(nums[i])
elif nums[i] > pivot:
greater.append(nums[i])
return self.quickSort(less) + [pivot] + self.quickSort(greater)
|
UTF-8
|
Python
| false
| false
| 828
|
py
| 154
|
217.py
| 154
| 0.450483
| 0.442029
| 0
| 28
| 27.642857
| 71
|
mfraezz/osf.io
| 12,343,736,045,538
|
d1f4a6896b268c812353056509342908962624c0
|
608cf796ec871f2cba9edb5b7b22475a9a42f515
|
/osf/models/registration_bulk_upload_row.py
|
65e5a7c77c3eac55bce873180240964a45de05c9
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
https://github.com/mfraezz/osf.io
|
2a8f694d871437b581528096c5793e9fdcd6f286
|
04810812a89e4d2a990a44898969576e4d174e13
|
refs/heads/develop
| 2023-08-25T20:31:22.287806
| 2021-12-01T02:17:39
| 2021-12-01T02:17:39
| 26,929,858
| 0
| 0
|
Apache-2.0
| true
| 2021-08-03T05:23:21
| 2014-11-20T20:12:10
| 2019-11-14T17:39:54
| 2021-08-03T05:23:16
| 192,581
| 0
| 0
| 88
|
Python
| false
| false
|
import hashlib
from django.db import models
from osf.models.base import BaseModel
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.fields import ensure_bytes
class RegistrationBulkUploadRow(BaseModel):
"""Defines a database table that stores information about to-be-created (draft) registrations.
"""
# The bulk upload to which this registration belongs
upload = models.ForeignKey('RegistrationBulkUploadJob', blank=False, null=True, on_delete=models.CASCADE)
# The draft registration that have been successfully created
draft_registration = models.ForeignKey('DraftRegistration', blank=True, null=True, on_delete=models.CASCADE)
# A flag that indicates whether the draft registration has been created
is_completed = models.BooleanField(default=False)
# A flag that indicates whether the draft registration creation is in progress
is_picked_up = models.BooleanField(default=False)
# The raw text string of a row in the CSV template
csv_raw = models.TextField(default='', blank=False, null=False)
# The parsed version of the above raw text string.
# TODO: add a comment here for the expected format of the value
csv_parsed = DateTimeAwareJSONField(default=dict, blank=False, null=False)
row_hash = models.CharField(default='', blank=False, null=False, unique=True, max_length=255)
@classmethod
def create(cls, upload, csv_raw, csv_parsed):
registration_row = cls(upload=upload, draft_registration=None, is_completed=False,
is_picked_up=False, csv_raw=csv_raw, csv_parsed=csv_parsed,
row_hash=hashlib.md5(ensure_bytes(csv_raw)).hexdigest(),)
return registration_row
|
UTF-8
|
Python
| false
| false
| 1,761
|
py
| 8
|
registration_bulk_upload_row.py
| 3
| 0.725724
| 0.723453
| 0
| 39
| 44.153846
| 112
|
amanvats/latestNewsReader
| 12,017,318,508,878
|
b27a2e2a08e1a1de4e61a90e325685761be8124e
|
6caf47029db69694ca23da5071693bf3b0025e5a
|
/news.py
|
47d92c98a7bb3ce63713e3dfe6bdc92b39305fd9
|
[] |
no_license
|
https://github.com/amanvats/latestNewsReader
|
39d11e9aaaa2e6712cb4ea615942768604be3324
|
9e7183cf90904728ccc60158d663abcd00086c8c
|
refs/heads/master
| 2020-12-02T09:58:26.673764
| 2017-07-09T07:17:22
| 2017-07-09T07:17:22
| 96,667,437
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import requests
import os
from bs4 import BeautifulSoup
from gtts import gTTS
def news():
content = " "
# the target we want to open
url = 'http://www.hindustantimes.com/top-news'
# open with GET method
resp = requests.get(url)
# http_response 200 means OK status
if resp.status_code == 200:
print("Successfully Scrapped")
print("The news is as follow :-\n")
# we need a parser,Python built-in HTML parser is enough .
soup = BeautifulSoup(resp.text, 'html.parser')
#print(soup.prettify())
# l is the list which contains all the text i.e news
l = soup.find("ul", attrs={'class': 'latest-news-bx more-latest-news more-separate'})
#Notice here its findAll, above it was find only
# find all the elements of a, i.e anchor
for i in l.findAll("a"):
content += i.text
content += "."
content += ","
return content
else:
content = "Error Occurred"
return content
news_found = news()
language = 'en'
myobj = gTTS(text=news_found, lang=language, slow=False)
myobj.save("news.mp3")
os.system("news.mp3")
|
UTF-8
|
Python
| false
| false
| 1,173
|
py
| 2
|
news.py
| 1
| 0.608696
| 0.601023
| 0
| 42
| 26.904762
| 93
|
shjang1013/Algorithm
| 850,403,559,936
|
b9aed4c997113c4a17af8ee56a1311777c37bf31
|
52cb25dca22292fce4d3907cc370098d7a57fcc2
|
/BAEKJOON/다이나믹 프로그래밍/2748_피보나치 수2.py
|
a308a3b851ae4dfbe9fd640c1f0a479e9f4dac3c
|
[] |
no_license
|
https://github.com/shjang1013/Algorithm
|
c4fc4c52cbbd3b7ecf063c716f600d1dbfc40d1a
|
33f2caa6339afc6fc53ea872691145effbce0309
|
refs/heads/master
| 2022-09-16T12:02:53.146884
| 2022-08-31T16:29:04
| 2022-08-31T16:29:04
| 227,843,135
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# 동적 계획법(다이나믹 프로그래밍)을 이용한 피보나치
def fibo(N):
f = [0, 1]
for i in range(2, N+1):
f.append(f[i-2]+f[i-1])
return f[N]
N = int(input())
print(fibo(N))
|
UTF-8
|
Python
| false
| false
| 212
|
py
| 279
|
2748_피보나치 수2.py
| 278
| 0.517857
| 0.482143
| 0
| 9
| 17.666667
| 31
|
foamliu/Mobile-Image-Colorization
| 12,859,132,106,746
|
529a08ef5c5d8e8ed083e5da85bfdbc59bd4d0f2
|
6d0a111cee48492ec52f738fd9df7215defe8aff
|
/config.py
|
58311c4a911f5017ad6bfda0c0e853ec4e554c62
|
[
"MIT"
] |
permissive
|
https://github.com/foamliu/Mobile-Image-Colorization
|
852f7cc39cca0a1825f2ba6dee2695a66023b52b
|
ce7556100c04fe11120e1796de419355672eea17
|
refs/heads/master
| 2020-09-08T15:58:09.864899
| 2019-12-12T15:10:22
| 2019-12-12T15:10:22
| 221,177,716
| 3
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # sets device for model and PyTorch tensors
im_size = 256
channel = 3
batch_size = 32
epochs = 10000
# Training parameters
num_workers = 4 # for data-loading; right now, only 1 works with h5py
grad_clip = 5. # clip gradients at an absolute value of
print_freq = 100 # print training/validation stats every __ batches
checkpoint = None # path to checkpoint, None if none
num_train = 1281167
num_valid = 50000
num_test = 100000
num_classes = 313
kernel = 3
epsilon = 1e-8
nb_neighbors = 5
# temperature parameter T
T = 0.38
|
UTF-8
|
Python
| false
| false
| 615
|
py
| 11
|
config.py
| 9
| 0.723577
| 0.64878
| 0
| 24
| 24.625
| 114
|
seonghunYang/TIL
| 171,798,692,893
|
be815854b239d307bc59cb6ca3e70a2fff837fdd
|
a2d55794a22bc6e3572c9f4b06736b1f2511847d
|
/python 기본/built_in_fuction.py
|
122838fef04bdac3a5ee9748d33c582b01436ad1
|
[] |
no_license
|
https://github.com/seonghunYang/TIL
|
2d3061b0cd565d7c70658c2fc6b13a01716d9541
|
1ada140a5e9f1cc6280bc01448c5daaf9a43bf86
|
refs/heads/master
| 2023-01-21T21:38:45.652946
| 2020-12-01T21:36:51
| 2020-12-01T21:36:51
| 278,396,931
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
a = [1, -1, -3, 4]
print(abs(-1))
print(all([1, 2, 3]))
print(any([1, 2, 3, 0]))
print(chr(97))
print()
print(a)
def positive(x):
print(x)
return x > 1
a = [1, -3, 2, 0, -5, 6]
print(list(filter(positive,a)))
print(a)
print(max("ABS"))
|
UTF-8
|
Python
| false
| false
| 244
|
py
| 57
|
built_in_fuction.py
| 23
| 0.540984
| 0.454918
| 0
| 14
| 16.5
| 31
|
Yousef11111/tensorD
| 13,297,218,777,793
|
136da2cd1d1e86995a178bad0e1ef3c6b015b384
|
6a8a93fae7503f4fc0d127fd36a363c170356adf
|
/tensorD/dataproc/provider.py
|
4852b528cf2e67ea5dad699a065ddb2a525c6cf6
|
[
"MIT"
] |
permissive
|
https://github.com/Yousef11111/tensorD
|
3ae34b44dd95eb4b9c2a894a9daeeb94cd4c64c5
|
342e360d348713a18b4e80fcc0f840a136748b66
|
refs/heads/master
| 2020-04-10T00:47:35.499414
| 2018-12-09T21:57:50
| 2018-12-09T21:57:50
| 159,216,309
| 1
| 0
|
MIT
| true
| 2018-11-26T18:44:10
| 2018-11-26T18:44:09
| 2018-10-04T14:44:34
| 2018-02-02T16:13:51
| 12,481
| 0
| 0
| 0
| null | false
| null |
# Created by ay27 at 17/2/7
import numpy as np
class Provider(object):
"""
Base Data Provider, should be noted that the batch size should be given in initializer.
"""
@property
def batch_size(self):
raise NotImplementedError
def full_tensor(self):
"""
Returns
-------
tf.Tensor
dense or sparse tensor hold the full data
"""
raise NotImplementedError
def data_queue(self, shuffled=True):
"""
Parameters
----------
shuffled : bool
shuffle the queue data or not, default is **True**
Returns
-------
tf.Tensor
a data queue to read data continuous according to the **fix** batch size
"""
raise NotImplementedError
# class OrdProvider(Provider):
# """
# Data Provider, split data in given order(mode).
# """
#
# def __init__(self, reader, order, task_cnt=1, task_index=0, batch_size=1, sparse=False, shape=None):
# self.reader = reader
# self.order = order
# self.task_index = task_index
# self.task_cnt = task_cnt
# self.is_sparse = sparse
# self.shape = shape
# self.batch_size = batch_size
#
# # store in dense
# self.data = None
#
# self._split_size = None
#
# self._offset = None
#
# if self.is_sparse:
# self._read_sparse()
# else:
# self._read_dense()
#
# def __iter__(self):
# return self
#
# def __next__(self):
# """
#
# Yields
# ------
# ndarray
# batch of data
# """
# cur_index = 0
# while cur_index < self._split_size:
# end = min(cur_index + self.batch_size, self._split_size)
# yield self.data[cur_index:end]
# cur_index += self.batch_size
# raise StopIteration()
#
# def _read_sparse(self):
# input_data = np.array([row for row in self.reader.next()])
# if not self.shape:
# self.shape = np.max(input_data, axis=0)[:self.order]
# for _ in range(self.order):
# self.shape[_] = int(self.shape[_])
#
# self._split_size = int(self.shape[self.order] / self.task_cnt)
# self._offset = self.task_index * self._split_size
#
# split_shape = self.shape.copy()
# split_shape[self.order] = self._split_size
# self.data = np.zeros(split_shape)
# for row in input_data:
# if self._offset <= row[self.order] < self._offset + self._split_size:
# row[self.order] -= self._offset
# self.data.itemset(row[:-1], row[-1])
#
# def _read_dense(self):
# self.data = np.asarray(
# [row for (i, row) in enumerate(self.reader.next()) if
# self._offset <= i < self._offset + self._split_size])
# if not self.shape:
# self.shape = self.data.shape
#
# self._split_size = int(self.shape[self.order] / self.task_cnt)
# self._offset = self.task_index * self._split_size
|
UTF-8
|
Python
| false
| false
| 3,140
|
py
| 67
|
provider.py
| 47
| 0.520064
| 0.515924
| 0
| 111
| 27.288288
| 106
|
kolaSamuel/CodeChef
| 12,979,391,211,437
|
840f0f5c53976a244cdad0ea31bf02f9f0e0bb1f
|
ed0dc3aa118dd058a2b10127d367bcbcd46234fe
|
/Chef and Sign Sequences.py
|
51112bfa04b91716481a4abd0220a7aa0f33a69b
|
[] |
no_license
|
https://github.com/kolaSamuel/CodeChef
|
a73cd3593d76d59e633a5c279c05a782159ccddd
|
2d531de1194b8b139540c9a1967f036272925193
|
refs/heads/master
| 2020-03-14T02:36:42.763221
| 2018-04-28T11:07:12
| 2018-04-28T11:07:12
| 131,402,458
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
t = input()
for i in xrange(t):
s = raw_input()
ans = 0;state = 0
#print s
for j in s:
if j == '>':
state+=1
ans = max(state,ans)
elif j == '<':
if state == 0 :ans+=1
else:state-=1
print ans+1 if ans > 1 else 0
|
UTF-8
|
Python
| false
| false
| 295
|
py
| 57
|
Chef and Sign Sequences.py
| 56
| 0.410169
| 0.379661
| 0
| 13
| 21.692308
| 33
|
Kontowicz/Daily-coding-problem
| 6,193,342,843,057
|
5219ff67f06b5b052c0d7b4336daaa7ec026aa9f
|
3947d30d4362a7bffc1f42a28610d902f5ce6760
|
/day_206.py
|
5ed16fcb1bddcd99ff6ad4c399d1978a780be54b
|
[] |
no_license
|
https://github.com/Kontowicz/Daily-coding-problem
|
899b5978cd076fe86883ad01938c5704a7d88003
|
edd97375bc48c243950622651dd6286f3611f3c2
|
refs/heads/master
| 2021-06-29T17:22:09.357868
| 2020-10-01T19:21:44
| 2020-10-01T19:21:44
| 161,390,419
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# A permutation can be specified by an array P,
# where P[i] represents the location of the element at i in the permutation.
def solution(array, order):
result = []
for item in order:
result.append(array[item])
return result
assert solution(["a", "b", "c"], [2, 1, 0]) == ["c", "b", "a"]
|
UTF-8
|
Python
| false
| false
| 302
|
py
| 171
|
day_206.py
| 170
| 0.635762
| 0.625828
| 0
| 12
| 24.166667
| 76
|
LucBanda/icfp17_Punters
| 14,688,788,183,371
|
1c338afff067bb6e6c4b5eebd87eba08dd48d9a9
|
72a2a2fe4c5dbbcd8b991e108f0d19b1253df144
|
/UCT.py
|
9f8882f4ed19318e224ce3d81961ac6b1123a71e
|
[] |
no_license
|
https://github.com/LucBanda/icfp17_Punters
|
94280c2f008b6dcafea939beec7e0f4cfc75c3c4
|
b8f0242764cbe132c7a64df24ee0b9493de2779a
|
refs/heads/master
| 2021-01-02T23:51:13.875698
| 2017-09-04T07:59:22
| 2017-09-04T07:59:22
| 99,509,536
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# This is a very simple implementation of the UCT Monte Carlo Tree Search algorithm in Python 2.7.
# The function UCT(rootstate, itermax, verbose = False) is towards the bottom of the code.
# It aims to have the clearest and simplest possible code, and for the sake of clarity, the code
# is orders of magnitude less efficient than it could be made, particularly by using a
# state.GetRandomMove() or state.DoRandomRollout() function.
#
# Example GameState classes for Nim, OXO and Othello are included to give some idea of how you
# can write your own GameState use UCT in your 2-player game. Change the game to be played in
# the UCTPlayGame() function at the bottom of the code.
#
# Written by Peter Cowling, Ed Powley, Daniel Whitehouse (University of York, UK) September 2012.
#
# Licence is granted to freely use and distribute for any sensible/legal purpose so long as this comment
# remains in any distributed code.
#
# For more information about Monte Carlo Tree Search check out our web site at www.mcts.ai
from math import *
import time
import random
import sys
import numpy as np
from functools import reduce
class Node:
""" A node in the game tree. Note wins is always from the viewpoint of playerJustMoved.
Crashes if state not specified.
"""
def __init__(self, move = None, parent = None, state = None):
self.move = move # the move that got us to this node - "None" for the root node
self.parentNode = parent # "None" for the root node
self.childNodes = np.array([])
self.wins = 0
self.visits = 0
self.untriedMoves = state.GetMoves() # future child nodes
self.playerJustMoved = state.playerJustMoved # the only part of the state that the Node needs later
def UCTSelectChild(self):
""" Use the UCB1 formula to select a child node. Often a constant UCTK is applied so we have
lambda c: c.wins/c.visits + UCTK * sqrt(2*log(self.visits)/c.visits to vary the amount of
exploration versus exploitation.
"""
s = sorted(self.childNodes, key=lambda c: c.wins / c.visits + sqrt(2 * log(self.visits) / c.visits))[-1]
return s
def AddChild(self, m, s):
""" Remove m from untriedMoves and add a new child node for this move.
Return the added child node
"""
n = Node(move = m, parent = self, state = s)
self.untriedMoves.remove(m)
self.childNodes = np.append(self.childNodes, n)
return n
def Update(self, result):
""" Update this node - one additional visit and result additional wins. result must be from the viewpoint of playerJustmoved.
"""
self.visits += 1
self.wins += result
def __repr__(self):
return "[M:" + str(self.move) + " W/V:" + str(self.wins) + "/" + str(self.visits) + " U:" + str(self.untriedMoves) + "]"
def TreeToString(self, indent):
s = self.IndentString(indent) + str(self)
for c in self.childNodes:
s += c.TreeToString(indent+1)
return s
def IndentString(self,indent):
s = "\n"
for i in range (1,indent+1):
s += "| "
return s
def ChildrenToString(self):
s = ""
for c in self.childNodes:
s += str(c) + "\n"
return s
class UCT:
def __init__(self, rootstate, timeout, depthMax, displayDebug = False):
self.timeout = timeout
self.depthMax = depthMax
self.rootNode = Node(state = rootstate)
self.rootState = rootstate
self.displayDebug = displayDebug
self.multipleRollout = 1
self.charged = False
def playMove(self, move):
for child in self.rootNode.childNodes:
if child.move == move:
self.rootNode = child
break
self.rootState.DoMove(move)
def run(self):
""" Conduct a UCT search for itermax iterations starting from rootstate.
Return the best move from the rootstate.
Assumes 2 alternating players (player 1 starts), with game results in the range [0.0, 1.0]."""
startTime = time.time()
number_of_addchild = 0
number_of_evolution = 0
while(time.time() - startTime < self.timeout-0.20):
#iterMax = 20000
#while iterMax:
# iterMax -= 1
explored = 0
node = self.rootNode
state = self.rootState.Clone()
#clear graphics
if self.displayDebug:
state.clearDisplay()
state.display('b-')
# Select
while node.untriedMoves == [] and node.childNodes != []: # node is fully expanded and non-terminal
node = node.UCTSelectChild()
state.DoMove(node.move)
explored += 1
if self.displayDebug:
state.displayMove(node.move, "r-")
# Expand
if node.untriedMoves != []: # if we can expand (i.e. state/node is non-terminal)
m = random.choice(node.untriedMoves)
state.DoMove(m)
node = node.AddChild(m,state) # add child and descend tree
number_of_addchild += 1
number_of_evolution += 1
if self.displayDebug:
state.displayMove(m, 'g-')
else:
break
# Rollout - this can often be made orders of magnitude quicker using a state.GetRandomMove() function
bestResult = 0
for i in range(0, self.multipleRollout):
#if True:
stateRollout = state.Clone()
depthAllowed=self.depthMax
rolloutNode = node
while depthAllowed > 0: # while state is non-terminal
m = stateRollout.GetRandomMove()
if m:
stateRollout.DoMove(m)
number_of_evolution += 1
if self.displayDebug:
stateRollout.displayMove(m, 'y-')
depthAllowed -= 1
# Backpropagate
while rolloutNode != None: # backpropagate from the expanded node and work back to the root node
rolloutNode.Update(stateRollout.GetResult(node.playerJustMoved)) # state is terminal. Update node with result from POV of node.playerJustMoved
rolloutNode = rolloutNode.parentNode
if self.charged and explored < 5:
self.multipleRollout = max(self.multipleRollout - 1, 1)
elif not self.charged and explored > 1:
self.charged = True
elif explored > 10:
self.multipleRollout += 1
print("explored : " + str(number_of_addchild) + " evolved : " + str(number_of_evolution) + " explored : " + str(explored) + " multiple factor : " + str(self.multipleRollout), file=sys.stderr)
bestMoves = sorted(self.rootNode.childNodes, key = lambda c: c.visits)
if bestMoves:
return bestMoves[-1].move # return the move that was most visited
else:
return None
|
UTF-8
|
Python
| false
| false
| 7,197
|
py
| 9
|
UCT.py
| 6
| 0.593442
| 0.586216
| 0
| 169
| 41.591716
| 199
|
BlackLacost/.dotfiles
| 6,021,544,194,459
|
f83c3b8c2b80edee5f23f6b99b749685fb290ff6
|
dc8c31f934fbdc9e99acae19a149f490608f85d5
|
/config/anki/addons21/814349176/awesometts/gui/__init__.py
|
950e5f546d27f0c613708aa6458bddf18630a196
|
[] |
no_license
|
https://github.com/BlackLacost/.dotfiles
|
db54b64c648f2a22c1e580543b8e2b2ddd8eb35e
|
9df3c2b2811b6b134b5ba2f3d7bb3c7bdc42e691
|
refs/heads/master
| 2023-05-28T12:41:12.718697
| 2023-05-12T16:05:52
| 2023-05-12T16:05:52
| 181,217,550
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# AwesomeTTS text-to-speech add-on for Anki
# Copyright (C) 2010-Present Anki AwesomeTTS Development Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
GUI classes for AwesomeTTS
"""
from .common import (
Action,
Button,
HTMLButton,
Filter,
ICON,
)
from .configurator import Configurator
from .generator import (
BrowserGenerator,
EditorGenerator,
)
from .stripper import BrowserStripper
from .templater import Templater
from .updater import Updater
from .reviewer import Reviewer
__all__ = [
# common
'Action',
'Button',
'HTMLButton',
'Filter',
'ICON',
# dialog windows
'Configurator',
'BrowserGenerator',
'EditorGenerator',
'BrowserStripper',
'Templater',
'Updater',
# headless
'Reviewer',
]
|
UTF-8
|
Python
| false
| false
| 1,477
|
py
| 142
|
__init__.py
| 84
| 0.670278
| 0.666215
| 0
| 64
| 21.078125
| 71
|
lgrosz/machine_learning_library
| 16,441,134,815,104
|
d3c01c466b80eabc7aa378656c2ae90e005611d6
|
ea8e1a3c66a2174708f0e527e762f5ebde490d1a
|
/myml/perceptron.py
|
f8c9e4cbcdb99bcc7bf8d19f9ab2306e78a457ad
|
[] |
no_license
|
https://github.com/lgrosz/machine_learning_library
|
9a8a0877cb803eb3ad0136147ee6f8da3d1fd4be
|
218e8595ffab8ee3c82d33334be7e08651278204
|
refs/heads/master
| 2023-04-30T02:54:23.247215
| 2020-12-09T00:02:11
| 2020-12-09T00:02:11
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
class Perceptron:
def __init__(self, rate, niter):
self.rate = rate
self.niter = niter
def fit(self, X, d):
"""
Creates the hypothesis w from X and d.
Keyword arguments:
;param numpy.ndarray X: the training input domain
;param numpy.ndarray d: the desired output for the training input domain
"""
# todo check types, this should be done for all public facing functions
# make x[j, 0] = 1, so we can use w[0] as the bias
X = np.array([np.insert(x, 0, 1) for x in X])
self.errors = np.array([])
nfeatures = X[0].size
self.weights = np.empty((0, nfeatures))
for i in range(self.niter):
# initialize weight array
w = None
if (self.weights.size < 1):
w = np.array([0 for _ in range(nfeatures)])
else:
w = np.copy(self.weights[i-1])
# calculate y's for each sample and set the weight
itererrors = np.array([])
for j, x_j in enumerate(X):
d_j = d[j]
y_j = Perceptron.f(w, x_j)
w = np.array([w[i] + self.rate * (d_j - y_j) * x_j[i] for i in range(nfeatures)])
itererrors = np.append(itererrors, abs(d_j-y_j))
# record the weight for this iteration
self.weights = np.append(self.weights, [w], axis=0)
# record the number of errors
itererror = np.sum(itererrors)# / X.shape[0]
self.errors = np.append(self.errors, itererror)
if (itererror == 0):
break
def net_input(self, X):
"""
Returns numpy array weighted samples
;param numpy.ndarray X: sample array
"""
w = self.weights[self.weights.shape[0]-1]
X = np.array([np.insert(x, 0, 1) for x in X])
return np.array([np.dot(w, x_j) for x_j in X])
def predict(self, X):
"""
Returns numpy array of labels
;param numpy.ndarray X: sample array
"""
w = self.weights[self.weights.shape[0]-1]
X = np.array([np.insert(x, 0, 1) for x in X])
return np.array([Perceptron.f(w, x_j) for x_j in X])
@staticmethod
def f(w, x):
"""
Returns 1 if w dot x > 0, -1 otherwise
Since x[0] is 1, w[0] acts as a bias
;param numpy.ndarray w: weight vector
;param numpy.ndarray x: input features
"""
if (np.dot(w, x) > 0):
return 1
else:
return -1
|
UTF-8
|
Python
| false
| false
| 2,609
|
py
| 21
|
perceptron.py
| 16
| 0.516673
| 0.505174
| 0
| 86
| 29.313953
| 97
|
adandamudi/hlast
| 12,266,426,615,613
|
b91c177e7f9042f771e39250832f18f6244df45b
|
295d62b0d1abe97f8a3d1500c44666040870d4b0
|
/tests/toy-simple/v2/ex6.py
|
f9ae471c3e95edf2feae3ac20e18eaae19434938
|
[] |
no_license
|
https://github.com/adandamudi/hlast
|
3c5e493e15ce9c64a5644ca1d3138e7c87629cbb
|
94d3e2b12434c137399546a71da5ad063d32d201
|
refs/heads/main
| 2023-02-02T13:56:00.955051
| 2020-12-18T06:45:17
| 2020-12-18T06:45:17
| 311,791,616
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
i=0
# Variable name change + adding a new statement
b = 0
if b == 0:
c = (3, None)
b = b - 1
|
UTF-8
|
Python
| false
| false
| 95
|
py
| 50
|
ex6.py
| 45
| 0.568421
| 0.515789
| 0
| 6
| 14.833333
| 47
|
LuisPuelloCP/Validador_Contrase-a
| 8,005,819,075,095
|
082ea5f8a66116b01f9829ca3b68a2f225074ead
|
50122bee3d0c3a268c8d55ea333881d80937844b
|
/Ventana.py
|
977d5572f8ae20980354fdde7c7c4eae92238c76
|
[] |
no_license
|
https://github.com/LuisPuelloCP/Validador_Contrase-a
|
90adb65ec459bce9e8c39410e30aefc603e4aeac
|
9a53d412371509e629ad238acb678c4df19a8264
|
refs/heads/master
| 2021-03-06T17:22:52.161108
| 2020-03-14T21:36:34
| 2020-03-14T21:36:34
| 246,212,306
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from tkinter import *
import re
class Ventana:
def __init__(self):
self.ventana = Tk()
self.ventana.title("validador De Contraseña")
self.ventana.iconbitmap("password.ico")
self.frame = Frame(self.ventana, width = 1200, height = 600)
self.frame.pack()
self.label = Label(self.frame, text = "Validador de contraseña", font=20)
self.label.grid(row=0, column=2)
self.label2 = Label(self.frame, text = "Ingrese contraseña", font=15)
self.label2.grid(row=1, column=1, padx=20, pady=20)
self.label3 = Label(self.frame, text = "Resultado", font=15)
self.label3.grid(row=2, column=1, padx=20, pady=20 )
self.label5 = Label(self.frame, text = """La contraseña debe contener:
[entre 1 y 2]mayusculas, [entre 4 y 20]minusculas,
[ entre 2 y 10]numeros, y simbolos especiales [@#$%&/?¿<>^-]""")
self.label5.grid(row=3, column=2, padx=20, pady=20)
self.contraseña = StringVar() #creo una variable de tipo string
self.cuadroTexto = Entry(self.frame, textvariable = self.contraseña)#Cuadro para ingresar la contraseña
self.cuadroTexto.grid(row=1, column=2, padx=20, pady=20)# posicion del cuadro de texto
self.cuadroTexto.config(justify="center", show="°")
self.boton = Button(self.frame, text = "Validar", command = self.codigoBoton)
self.boton.grid(row=1,column=3, padx=20, pady=20)
self.ventana.mainloop()
def codigoBoton(self):
self.verificar = self.contraseña.get()
self.validador = re.search("(?=.*[A-Z]{1,2})(?=.*[a-z]{4,20})(?=.*[0-9]{2,20})(?=.*[@#$%&/?¿<>^-])",self.verificar)
if self.validador != None:
self.label4 = Label(self.frame, text = "La contraseña es segura")
self.label4.grid(row=2, column=2, padx=20, pady=20)
self.label.after(2000 , self.label4.destroy)
else:
self.label4 = Label(self.frame, text = "La contraseña no es segura")
self.label4.grid(row=2, column=2, padx=20, pady=20)
self.label.after(2000 , self.label4.destroy)
|
UTF-8
|
Python
| false
| false
| 2,150
|
py
| 6
|
Ventana.py
| 5
| 0.610669
| 0.566214
| 0
| 49
| 42.612245
| 123
|
BlenderCN-Org/compAS
| 558,345,794,576
|
309fa8c2658c3abe8e7e3110fbe86a7dcb66c453
|
60a267a7136b3cec2727824122bc6cda28c331e5
|
/tools/MeshTools/meshtools.py
|
fe16d03c15fc4ef56694fdbe101cd963d27e1531
|
[
"MIT"
] |
permissive
|
https://github.com/BlenderCN-Org/compAS
|
4a257637d181188c0b68210f1126fa826be226d5
|
9796066a2dc26f39fe6ad0a0d44a1ef8a84a608a
|
refs/heads/master
| 2020-05-30T02:11:18.495302
| 2017-03-21T13:42:17
| 2017-03-21T13:42:17
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""A Toolbar providing an interface to common mesh tools."""
from compas.geometry.elements.polyhedron import Polyhedron
from compas.datastructures.mesh.mesh import Mesh
from compas.datastructures.mesh.algorithms.tri.subdivision import loop_subdivision
from compas.datastructures.mesh.algorithms.subdivision import quad_subdivision
from compas.datastructures.mesh.algorithms.subdivision import doosabin_subdivision
from compas.datastructures.mesh.algorithms.subdivision import _catmullclark_subdivision
from compas_rhino.datastructures.mesh import RhinoMesh
# from compas_rhino.datastructures.mixins.keys import SelectComponents
# from compas_rhino.datastructures.mixins.attributes import EditAttributes
# from compas_rhino.datastructures.mixins.geometry import EditGeometry
# from compas_rhino.datastructures.mixins.geometry import DisplayGeometry
# from compas_rhino.datastructures.mixins.labels import DisplayLabels
import compas_rhino.utilities as rhino
try:
import rhinoscriptsyntax as rs
except ImportError as e:
import platform
if platform.system() == 'Windows':
raise e
__author__ = 'Tom Van Mele'
__copyright__ = 'Copyright 2016, Block Research Group - ETH Zurich'
__license__ = 'MIT license'
__email__ = 'vanmelet@ethz.ch'
class ControlMesh(Mesh):
""""""
def __init__(self, **kwargs):
super(ControlMesh, self).__init__(**kwargs)
self.attributes.update({
'layer' : None,
'color.vertex' : None,
'color.edge' : None,
'color.face' : None,
'color.normal:vertex' : None,
'color.normal:face' : None,
})
self.dva.update({
'is_fixed' : False,
})
self.dea.update({
'weight' : 1.0,
'q' : 1.0,
})
class MeshTools(object):
""""""
def __init__(self):
self.mesh = None
self.layers = {'MeshTools' : {'layers': {
'Mesh': {'layers': {}},
'Subd': {'layers': {
'LoopMesh' : {'layers' : {}},
'QuadMesh' : {'layers' : {}},
'DooSabinMesh' : {'layers' : {}},
'CatmullClarkMesh' : {'layers' : {}},
}},
}}}
def init(self):
rhino.create_layers(self.layers)
rhino.clear_layers(self.layers)
def from_xxx(self):
options = ['mesh', 'surface', 'surface_uv', 'polyhedron', 'obj', 'json']
option = rs.GetString('From what ...', options[0], options)
if option not in options:
return
if option == 'mesh':
guid = rhino.select_mesh()
if guid:
self.mesh = RhinoMesh.from_guid(guid)
self.mesh.name = 'Mesh'
self.mesh.layer = 'MeshTools::Mesh'
self.mesh.draw(show_faces=False)
if option == 'surface':
guid = rhino.select_surface()
if guid:
self.mesh = RhinoMesh.from_surface(guid)
self.mesh.name = 'Mesh'
self.mesh.layer = 'MeshTools::Mesh'
self.mesh.draw(show_faces=False)
if option == 'surface_uv':
guid = rhino.select_surface()
if guid:
self.mesh = RhinoMesh.from_surface_uv(guid)
self.mesh.name = 'Mesh'
self.mesh.layer = 'MeshTools::Mesh'
self.mesh.draw(show_faces=False)
if option == 'polyhedron':
faces = ['f4', 'f6', 'f8', 'f12', 'f20']
f = rs.GetString('Number of faces ...', faces[0], faces)
if f not in faces:
return
f = int(f[1:])
tet = Polyhedron.generate(f)
if tet:
self.mesh = RhinoMesh.from_vertices_and_faces(tet.vertices, tet.faces)
self.mesh.name = 'Mesh'
self.mesh.layer = 'MeshTools::Mesh'
self.mesh.draw(show_faces=False)
if option == 'obj':
raise NotImplementedError
if option == 'json':
raise NotImplementedError
def to_xxx(self):
options = ['obj', 'json']
option = rs.GetString('Export format ...', options[0], options)
if option not in options:
return
if option == 'obj':
raise NotImplementedError
if option == 'json':
raise NotImplementedError
def edit(self):
"""Edit mesh attributes."""
# select a mesh by clicking on it
# provide support for fixed vertices
# provide support for edge weights
# provide support for force densities
mesh = self.mesh
options = ['vertices', 'edges', 'faces']
option = rs.GetString('Edit attributes of ...', options[0], options)
if option not in options:
return
if option == 'vertices':
keys = mesh.select_vertices()
if not keys:
return
names = sorted(mesh.dva.keys())
mesh.edit_vertex_attributes(keys, names)
if option == 'edges':
keys = mesh.select_edges()
if not keys:
return
names = sorted(mesh.dea.keys())
mesh.edit_edge_attributes(keys, names)
if option == 'faces':
keys = mesh.select_faces()
if not keys:
return
names = sorted(mesh.dfa.keys())
mesh.edit_face_attributes(keys, names)
def modify(self):
"""Modfy geometry and/or topology of a mesh."""
# select a mesh by clicking on it
mesh = self.mesh
options = ['Move', 'MoveVertex', 'MoveFace', 'MoveEdge', 'SplitEdge', 'SwapEdge', 'CollapseEdge']
option = rs.GetString('Mesh Operation ...', options[0], options)
if option not in options:
return
if option == 'Move':
raise NotImplementedError
if option == 'MoveVertex':
raise NotImplementedError
if option == 'MoveFace':
raise NotImplementedError
if option == 'MoveEdge':
raise NotImplementedError
if option == 'SplitEdge':
raise NotImplementedError
if option == 'SwapEdge':
raise NotImplementedError
if option == 'CollapseEdge':
raise NotImplementedError
def modify_tri(self):
"""Modfy geometry and/or topology of a triangle mesh.
The avaialable operations are specific to triangle meshes, because they
use the properties of the triangular geometry and topology to simplify
and speed up the operations. The effect of the operations is also slightly
different, because they preserve the triangular nature of the mesh.
Raises:
Exception :
If the selected mesh is not a triangle mesh.
"""
mesh = self.mesh
if not mesh.is_trimesh():
raise Exception('TriMesh operations are only available for trianlge meshes.')
options = ['SplitEdge', 'SwapEdge', 'CollapseEdge']
option = rs.GetString('TriMesh Operation ...', options[0], options)
if option not in options:
return
if option == 'SplitEdge':
raise NotImplementedError
if option == 'SwapEdge':
raise NotImplementedError
if option == 'CollapseEdge':
raise NotImplementedError
def subd(self):
"""Subdivide the control mesh and draw as separate subd mesh."""
options = ['Quad', 'DooSabin', 'CatmullClark']
option = rs.GetString('Subdivision scheme ...', options[0], options)
if option not in options:
return
loops = ['k1', 'k2', 'k3', 'k4', 'k5']
k = rs.GetString('Subd level ...', loops[0], loops)
if k not in loops:
return
k = int(k[1:])
if option == 'Quad':
# Quad subdivision.
# Interpolation
# This should be removed
raise NotImplementedError # properly :)
subd = quad_subdivision(self.mesh, k=k)
subd.name = 'QuadMesh'
subd.layer = 'MeshTools::Subd::QuadMesh'
subd.draw(show_vertices=False, show_edges=False)
if option == 'DooSabin':
# Doo-Sabin scheme for quad subdivision.
# Approximation
subd = doosabin_subdivision(self.mesh, k=k)
subd.name = 'DooSabinMesh'
subd.layer = 'MeshTools::Subd::DooSabinMesh'
subd.draw(show_vertices=False, show_edges=False)
if option == 'CatmullClark':
# Catmull-Clark scheme for quad subdivision.
# Approximation
subd = _catmullclark_subdivision(self.mesh, k=k)
subd.name = 'CatmullClarkMesh'
subd.layer = 'MeshTools::Subd::CatmullClarkMesh'
subd.draw(show_vertices=False, show_edges=False)
def subd_tri(self):
"""Apply subdivision algorithms that are specific to trianlge meshes.
Raises:
Exception :
If the selected mesh is not a tiangle mesh.
"""
if not self.mesh.is_trimesh():
raise Exception('TriSubdivision schemes are only available for trianlge meshes.')
options = ['Loop']
option = rs.GetString('TriSubdivision scheme ...', options[0], options)
if option not in options:
return
loops = ['k1', 'k2', 'k3', 'k4', 'k5']
k = rs.GetString('Subd level ...', loops[0], loops)
if k not in loops:
return
k = int(k[1:])
if option == 'Loop':
# Loop subdivision.
# Approximation
subd = loop_subdivision(self.mesh, k=k)
subd.name = 'LoopMesh'
subd.layer = 'MeshTools::Subd::LoopMesh'
subd.draw(show_vertices=False, show_edges=False)
def smooth(self):
options = ['umbrella', 'area', 'forcedensity']
option = rs.GetString('Weighting scheme...', options[0], options)
if option not in options:
return
if option == 'umbrella':
raise NotImplementedError
if option == 'area':
raise NotImplementedError
if option == 'forcedensity':
raise NotImplementedError
def smooth_tri(self):
options = ['cotangent']
option = rs.GetString('Tri Weighting scheme...', options[0], options)
if option not in options:
return
if option == 'cotangent':
raise NotImplementedError
def relax(self):
raise NotImplementedError
# ==============================================================================
# Debugging
# ==============================================================================
if __name__ == "__main__":
from compas_rhino.ui.rui import Rui
from compas_rhino.ui.rui import get_macros
from compas_rhino.ui.rui import update_macro
toolbars = [{'name' : 'MeshTools', 'items' : [
{'type': 'normal', 'left_macro' : 'init', },
{'type': 'normal', 'left_macro' : 'from_xxx', },
{'type': 'normal', 'left_macro' : 'to_xxx', },
{'type': 'separator', },
{'type': 'normal', 'left_macro' : 'edit', },
{'type': 'normal', 'left_macro' : 'modify', },
{'type': 'normal', 'left_macro' : 'subd', },
{'type': 'normal', 'left_macro' : 'smooth', },
{'type': 'normal', 'left_macro' : 'relax', },
{'type': 'separator', },
{'type': 'normal', 'left_macro' : 'modify_tri', },
{'type': 'normal', 'left_macro' : 'subd_tri', },
]}]
toolbargroups = [{'name' : 'MeshTools', 'toolbars' : ['MeshTools', ]}]
macros = get_macros(MeshTools, 'mtools')
init_script = [
'-_RunPythonScript ResetEngine (',
'from compas_rhino.datastructures.toolbars.meshtools import MeshTools;',
'mtools = MeshTools();',
'mtools.init()',
')',
]
update_macro(macros, 'init', 'script', ''.join(init_script))
rui = Rui('./mtools.rui')
rui.init()
rui.add_macros(macros)
rui.add_toolbars(toolbars)
rui.add_toolbargroups(toolbargroups)
rui.write()
|
UTF-8
|
Python
| false
| false
| 12,308
|
py
| 311
|
meshtools.py
| 105
| 0.550699
| 0.547449
| 0
| 340
| 35.2
| 105
|
bitsteller/cells2flows
| 17,858,474,052,106
|
1dc6262f8eab2e592cc69bae41153263cb38d7fe
|
e28085b6d31b107abd5b21209dccd636e5112009
|
/util.py
|
6b021690cb125b19d72a9810dcf98a3342226364
|
[] |
no_license
|
https://github.com/bitsteller/cells2flows
|
7ea5bfa68dfbb89ca1627b12d88efc7f06d2e3dc
|
00b5714697926a1183d45eb93ed42f9f8b1642de
|
refs/heads/master
| 2016-08-11T20:52:03.574094
| 2015-09-07T14:26:58
| 2015-09-07T14:26:58
| 36,836,400
| 3
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import collections, sys, itertools, multiprocessing, re, datetime, time
from exceptions import KeyboardInterrupt, ValueError
import psycopg2
import config
def parse_trajectory(linestr):
"""Reads a line from csv file and parses the trajectory
Args:
linestr: one-line string from the a STEM csv file
Returns:
A tuple (userid, sequence) where sequence is a list of tuples (lat, lon, timestr) for each position in the trajectory"""
userid, trajectory = re.match(r"([0-9]+)[\s]+\[([\S\s]+)\]",linestr).groups()
sequence = [] #list of tuples containing userid, lat, lon, time
for lat, lon, timestr in re.findall(r"\[\[([\-0-9.]+), ([\-0-9.]+)\], '([0-9:\- ]+)'\]",trajectory):
t = datetime.datetime.strptime(timestr.rpartition("-")[0], "%Y-%m-%d %H:%M:%S")
sequence.append((float(lat), float(lon), t))
return (userid, sequence)
def parse_antenna(linestr):
"""Reads a line from antenna csv file
Args:
linestr: one-line string from the a antenna csv file
Returns:
A list of tuples (lon, lat, srid) containing the antenna positions extracted,
where lat/lon are in the coordinate system given by the SRID;
alternatively a tuple (id, lon, lat, srid) if a specific id shoul be assigned to the antenna"""
lon, lat = re.match(r"([0-9.]+),([0-9.]+)",linestr).groups()
return (float(lon), float(lat), 32611)
def parse_trip(linestr):
"""Reads a line from a trip csv file
Args:
linestr: one-line string from the a trip csv file
Returns:
A list of tuples (userid, cellpath) containing the user id and a list of visited cell on the trip"""
try:
data = re.match(r"([0-9]+),([01]),([0-9.]+),([0-9.]+),([0-9 ]*)",linestr).groups()
userid, commute_direction, orig_TAZ, dest_TAZ, cellpathstr = data
if int(commute_direction) == 1 or len(cellpathstr) == 0:
return None #ignore trips for afternoon commute or with empty cellpaths
try:
cellpath = [int(cell) for cell in cellpathstr.strip(" ").split(" ")]
return (userid, cellpath)
except Exception, e:
print("Line '" + linestr + "' could will be ignored, because '" + cellpathstr + "' is not a valid cellpath")
return None
except Exception, e:
print("Line '" + linestr + "' has an invalid syntax and will be ignored.")
return None
def parse_taz(feature):
"""Parses a geojson feature dict
Args:
feature: geojson feature dict
Returns:
A tuple (taz_id, pglinestr), where a pglinestr is a postgis linestring describing the TAZ polygon
"""
taz_id = int(feature["properties"]["TAZ_ID"])
linestr = to_pglinestring([(lat, lon) for lon, lat in feature["geometry"]["coordinates"][0]])
return (taz_id, linestr)
def to_pglinestring(points):
"""Converts a list of (lat,lon) points to a postgis LINESTRING
Args:
points: A list of tuples (lat,lon) describing the points of the LINESTRING
Returns:
A postgis LINESTRING following the given points
"""
return "LINESTRING (" + ",".join([str(lat) + " " + str(lon) for lon, lat in points]) + ")"
def confirm(prompt_str, allow_empty=False, default=False):
"""Prompts the user to confirm an action and returns the users decision.
Args:
prompt_str:
A description of the action that the user should confirm (for example "Delete file x?")
allow_empty:
If true, the default action assumed, even if the user just pressed enter
default:
The default action (true: accept, false: decline)
Returns:
True if the user accepted the action and false if not.
"""
fmt = (prompt_str, 'y', 'n') if default else (prompt_str, 'n', 'y')
if allow_empty:
prompt = '%s [%s]|%s: ' % fmt
else:
prompt = '%s %s|%s: ' % fmt
while True:
ans = raw_input(prompt).lower()
if ans == '' and allow_empty:
return default
elif ans == 'y':
return True
elif ans == 'n':
return False
else:
print("Please enter y or n.")
def chunks(seq, n):
"""Partionions a sequence into chunks
Args:
seq: the sequence to split in chunks
n: the maximum chunksize
Return:
A generator that yields lists containing chunks of the original sequence
"""
if n <= 0:
raise ValueError("Chunksize must be non-negative")
chunk = []
for el in seq:
chunk.append(el)
if len(chunk) >= n:
yield chunk
chunk = []
if len(chunk) > 0:
yield chunk
def od_chunks(chunksize = 200):
"""Returns a generator that returns OD pair chunks based on the cell ids
Returns:
A generator that returns tuples of the form ([list of origins], [list of destinations])"""
for origin in config.CELLS:
for destinations in chunks(config.CELLS, chunksize):
yield ([origin], destinations)
def get_random_od_data(limit):
conn = db_connect()
cur = conn.cursor()
sql = " SELECT orig_cell, dest_cell, interval, flow \
FROM (SELECT * FROM od ORDER BY random() LIMIT %s) AS od"
cur.execute(sql, (limit,))
result = []
for orig_cell, dest_cell, interval, flow in cur:
result.append({"interval": interval, "orig_cells": [orig_cell], "dest_cells": [dest_cell], "flow": flow})
conn.commit()
return result
def db_login(force_password=False):
"""Makes sure that config.PASSWORD is set to the database password.
If config.PASSWORD is alread defined, this function will not do anything. Otherwise
it will try to fetch the password from the systems keychain. If no password is stored
in the keychain yet, the user is prompted to enter the password and optinally store it
in the system keychain.
Args:
force_password: If set to True, the user is prompted even if the password
is stored in the keychain (useful if the password needs to be changed
"""
if "PASSWORD" in dir(config) != None: #password already set in config.py
return
import keyring, getpass
config.PASSWORD = keyring.get_password(config.DATABASE, config.USER)
if config.PASSWORD == None or force_password == True:
while 1:
print("A password is needed to continue. Please enter the password for")
print(" * service: postgresql")
print(" * database: " + config.DATABASE)
print(" * user: " + config.USER)
print("to continue.")
config.PASSWORD = getpass.getpass("Please enter the password:\n")
if config.PASSWORD != "":
break
else:
print ("Authorization failed (no password entered).")
# store the password
if confirm("Do you want to securely store the password in the keyring of your operating system?",default=True):
keyring.set_password(config.DATABASE, config.USER, config.PASSWORD)
print("Password has been stored. You will not have to enter it again the next time. If you need to edit the password use the keychain manager of your system.")
def db_connect():
if "PASSWORD" in dir(config) == None:
db_login()
return psycopg2.connect("dbname=" + config.DATABASE + " user=" + config.USER + " password=" + config.PASSWORD + " host=localhost " + " port=" + str(config.PORT))
def partition(mapped_values):
"""Organize the mapped values by their key.
Returns an unsorted sequence of tuples with a key and a sequence of values.
Args:
mapped_values: a list of tuples containing key, value pairs
Returns:
A list of tuples (key, [list of values])
"""
partitioned_data = collections.defaultdict(list)
for key, value in mapped_values:
partitioned_data[key].append(value)
return partitioned_data.items()
class MapReduce(object):
def __init__(self, map_func, reduce_func, num_workers=multiprocessing.cpu_count(), initializer = None):
"""
map_func
Function to map inputs to intermediate data. Takes as
argument one input value and returns a tuple with the key
and a value to be reduced.
reduce_func
Function to reduce partitioned version of intermediate data
to final output. Takes as argument a key as produced by
map_func and a sequence of the values associated with that
key.
num_workers
The number of workers to create in the pool. Defaults to the
number of CPUs available on the current host.
"""
self.map_func = map_func
self.reduce_func = reduce_func
self.mappool = multiprocessing.Pool(num_workers, maxtasksperchild = 1000, initializer = initializer)
self.reducepool = multiprocessing.Pool(num_workers, maxtasksperchild = 1000, initializer = initializer)
self.request_stop = False
self.num_workers = num_workers
self.enqueued = 0
def stop(self):
self.request_stop = True
self.mappool.terminate()
self.reducepool.terminate()
def xinputs(self, inputs):
for value in inputs:
while self.enqueued - self.tasks_finished > 100*self.chunksize:
time.sleep(1)
self.enqueued += 1
if self.request_stop:
raise KeyboardInterrupt("Abort requested")
yield value
def __call__(self, inputs, chunksize=10, pipe=False, length = None, out = True):
"""Process the inputs through the map and reduce functions given. Don't call one MapReducer from different threads,
as it is not thread-safe.
inputs:
An iterable containing the input data to be processed.
chunksize:
The portion of the input data to hand to each worker. This
can be used to tune performance during the mapping phase.
pipe:
When set to true, key/value pairs are passed from map directly to reduce function just once.
Only applicable, when all values for every key are generated at once (no partioning or
reducing of the result of reduce)
length:
The length of the input iterable for the status indicator. If None, len(inputs) is used.
out:
The result is returned as output by default. If out=True, an empty list is returned (if the result is irrelevant and
only the side effects of the map/reduce functions are desired).
Returns:
A list containing the resulting tuples (key, value).
"""
self.chunksize = chunksize
self.enqueued = 0
self.tasks_finished = 0
if length == None:
length = len(inputs)
#map
start = time.time()
result = []
mapped = []
for response in self.mappool.imap_unordered(self.map_func, self.xinputs(inputs), chunksize=chunksize):
if pipe:
mapped.extend(response)
else:
result.extend(response)
if self.request_stop:
raise KeyboardInterrupt("Abort requested")
self.tasks_finished += 1
if self.tasks_finished % (chunksize*self.num_workers) == 0:
#partition
partitioned_data = []
if pipe:
partitioned_data = partition(mapped)
else:
partitioned_data = partition(result)
#reduce
reduced = self.reducepool.map(self.reduce_func, partitioned_data)
if self.request_stop:
raise KeyboardInterrupt("Abort requested")
if pipe:
mapped = []
if out:
if pipe:
result.extend(reduced)
mapped = []
else:
result = reduced
est = datetime.datetime.now() + datetime.timedelta(seconds = (time.time()-start)/self.tasks_finished*(length-self.tasks_finished))
sys.stderr.write('\rdone {0:%}'.format(float(self.tasks_finished)/length) + " ETA " + est.strftime("%Y-%m-%d %H:%M"))
#partition
partitioned_data = []
if pipe:
partitioned_data = partition(mapped)
else:
partitioned_data = partition(result)
#reduce
reduced = self.reducepool.map(self.reduce_func, partitioned_data)
if pipe:
mapped = []
if out:
if pipe:
result.extend(reduced)
mapped = []
else:
result = reduced
sys.stderr.write('\rdone 100% ')
print("")
return result
def void(arg):
return arg
class ParMap(MapReduce):
def __init__(self, map_func, num_workers=multiprocessing.cpu_count(), initializer = None):
"""
map_func
Function to map inputs to intermediate data. Takes as
argument one input value and returns a tuple with the key
and a value to be reduced.
num_workers
The number of workers to create in the pool. Defaults to the
number of CPUs available on the current host.
"""
self.map_func = map_func
self.mappool = multiprocessing.Pool(num_workers, maxtasksperchild = 1000, initializer = initializer)
self.request_stop = False
self.num_workers = num_workers
self.enqueued = 0
def stop(self):
self.request_stop = True
self.mappool.terminate()
def __call__(self, inputs, chunksize=10, length = None):
"""Process the inputs through the map and reduce functions given.
inputs
An iterable containing the input data to be processed.
chunksize=1
The portion of the input data to hand to each worker. This
can be used to tune performance during the mapping phase.
"""
self.chunksize = chunksize
self.enqueued = 0
self.tasks_finished = 0
if length == None:
length = len(inputs)
#map
self.tasks_finished = 0
start = time.time()
result = []
for response in self.mappool.imap_unordered(self.map_func, self.xinputs(inputs), chunksize=chunksize):
result.append(response)
if self.request_stop:
raise KeyboardInterrupt("Abort requested")
self.tasks_finished += 1
if self.tasks_finished % (chunksize) == 0:
est = datetime.datetime.now() + datetime.timedelta(seconds = (time.time()-start)/self.tasks_finished*(length-self.tasks_finished))
sys.stderr.write('\rdone {0:%}'.format(float(self.tasks_finished)/length) + " ETA " + est.strftime("%Y-%m-%d %H:%M"))
sys.stderr.write('\rdone 100% ')
print("")
return result
class Timer(object):
"""measures time"""
def __init__(self, description):
super(Timer, self).__init__()
self.start = time.time()
self.description = description
def stop(self):
self.end = time.time()
print(self.description + " took " + str(self.end-self.start) + "s.")
#make sure config.CELLS exsits
if not hasattr(config, "CELLS"):
try:
conn = db_connect()
cur = conn.cursor()
cur.execute("SELECT MIN(id) AS min, MAX(id) AS max FROM ant_pos")
mincell, maxcell = cur.fetchone()
config.CELLS = range(mincell, maxcell+1)
conn.close()
except Exception, e:
pass
#make sure config.TRIPS exsits
if not hasattr(config, "TRIPS"):
try:
conn = db_connect()
cur = conn.cursor()
cur.execute("SELECT MIN(id) AS min, MAX(id) AS max FROM trips_original")
mintrip, maxtrip = cur.fetchone()
config.TRIPS = xrange(mintrip, maxtrip+1)
conn.close()
except Exception, e:
pass
#make sure config.TRIPS exsits
if not hasattr(config, "INTERVALS"):
try:
conn = db_connect()
cur = conn.cursor()
cur.execute("SELECT array_agg(DISTINCT interval) FROM taz_od")
config.INTERVALS = cur.fetchone()[0]
conn.close()
except Exception, e:
pass
|
UTF-8
|
Python
| false
| false
| 14,338
|
py
| 66
|
util.py
| 52
| 0.688938
| 0.68315
| 0
| 446
| 31.14574
| 162
|
IlyaTrofimov/mlforhealthlabpub
| 4,329,327,049,664
|
308e0e093d512710d78b3bc253d94ef105009d42
|
653eaef652627b155569b5fe9ab9bb3607fc1e78
|
/alg/survivalquilts/class_SurvivalQuilts.py
|
babc2f6edf03db481774a13f51de72da928e3b25
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/IlyaTrofimov/mlforhealthlabpub
|
11ab86a83bd2ffd2574364a956b322b0c62406ae
|
190cbad2faae9e559ffe7a68143df7f747d70adc
|
refs/heads/main
| 2023-04-16T03:58:38.423288
| 2021-04-21T10:22:43
| 2021-04-21T10:22:43
| 358,528,623
| 0
| 0
|
NOASSERTION
| true
| 2021-04-16T08:25:26
| 2021-04-16T08:25:25
| 2021-04-15T13:41:13
| 2021-04-13T20:43:17
| 17,437
| 0
| 0
| 0
| null | false
| false
|
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import sys, os
import GPyOpt
from sklearn.model_selection import train_test_split, StratifiedKFold
#user defined
from class_UnderlyingModels import CoxPH, CoxPHRidge, Weibull, LogNormal, LogLogistic, RandomSurvForest
from utils_eval import calc_metrics
class SurvivalQuilts():
def __init__(self, K=10, num_bo=50, num_outer=3, num_cv=10, step_ahead=5):
self.K = K #number of time-horizons for temporal quilting
self.step_ahead = step_ahead #step_ahead calculation for robust selection
self.num_bo = num_bo # BO iteration
self.num_outer = num_outer # maximum number of BO
self.num_cv = num_cv # number of cross-validation
self.lmbda = 0.
self.rho = 0.5
self.SEED = 1234
# self.model_names = ['CoxPH', 'Weibull', 'LogNormal']
self.model_names = ['CoxPH', 'CoxPHRidge', 'Weibull', 'LogNormal', 'LogLogistic', 'RandomSurvForest']
self.M = len(self.model_names)
self.ens_domain = [{'name': 'w_' + str(m), 'type': 'continuous', 'domain': (0,1),'dimensionality': 1} for m in range(self.M)]
def train(self, X, T, Y):
t_start = int(T[Y.iloc[:,0] == 1].quantile(0.1))
t_end = int(np.max(T[Y.iloc[:,0] == 1]))
self.time_horizons = [t for t in np.linspace(t_start, t_end, self.K, dtype=int)]
self.all_time_horizons = [t for t in range(int(np.min(T[Y.iloc[:,0] == 1])), int(np.max(T[Y.iloc[:,0] == 1])))]
### INITIAL TRAINING - UNDERLYING MODELS
print('initial training of underlying models...')
metric_CINDEX, metric_BRIER = np.zeros([self.num_cv, self.M, self.K]), np.zeros([self.num_cv, self.M, self.K])
self.CV_pulled_models = []
for cv_idx in range(self.num_cv):
print('CV.. {}/{}'.format(cv_idx+1, self.num_cv))
pulled_models, tmp_CINDEX, tmp_BRIER = self._get_models_pulled_CV(X, T, Y, seed=cv_idx)
metric_CINDEX[cv_idx,:,:] = tmp_CINDEX
metric_BRIER[cv_idx,:,:] = tmp_BRIER
self.CV_pulled_models.append(pulled_models)
X_inits = np.zeros([1,self.M])
X_inits[0, np.argmax(np.mean(metric_CINDEX, axis=0)[:,0])] = 1 #put more weights on the "best" one (at the first step)
X_inits = self._get_normalized_X_step(X_inits)
W_prev = np.zeros([self.K, self.M])
W_prev[:,:] = X_inits
### BAYESIAN OPTIMIZATION -- TEMPORAL QUILTING
for k in range(self.K):
lmbda_ = self.lmbda
rho_ = self.rho
print('TIME K = ' +str(k))
print(W_prev)
### INITIALIZATION FOR TIME-STEP k
X_inits = np.zeros([1,self.M])
X_inits[0, np.argmax(np.mean(metric_CINDEX, axis=0)[:,k])]=1 #put more weights on the "best" one (at the first step)
X_inits = self._get_normalized_X_step(X_inits)
beta_ = np.median(np.mean(np.mean(metric_BRIER, axis=0)[:,k:(k+self.step_ahead+1)], axis=1))
W = np.copy(W_prev)
W[k:,:] = X_inits
Yo_inits, Yc_inits = [], []
tmp_o_prev, tmp_c_prev = self._get_Y_step_pulled(W_prev, X, T, Y, K_step=k)
tmp_o, tmp_c = self._get_Y_step_pulled(W, X, T, Y, K_step=k)
Yo_next = np.asarray(tmp_o[0])
Yc_next = self._get_AL_constraint(tmp_c[0], beta_, lmbda_, rho_)
Yo_inits.append(Yo_next)
Yc_inits.append(Yc_next)
Yo_inits = np.asarray(Yo_inits).reshape([-1,1])
Yc_inits = np.asarray(Yc_inits).reshape([-1,1])
for out_itr in range(self.num_outer):
X_step_ens = X_inits
Y_step_ens = Yo_inits + Yc_inits
print(X_inits)
print(Yo_inits + Yc_inits)
for itr in range(self.num_bo):
gp = GPyOpt.methods.BayesianOptimization(f = None, domain = self.ens_domain, X = X_step_ens,
Y = Y_step_ens, acquisition_type='EI',
model_type='GP', exact_feval = True,
cost_withGradients=None)
X_next = gp.suggest_next_locations()
X_next = self._get_normalized_X_step(X_next)
W[k:, :] = X_next
if itr < (self.num_bo-1):
tmp_o, tmp_c = self._get_Y_step_pulled(W, X, T, Y, K_step=k)
Yo_next = np.asarray(tmp_o[0]).reshape([-1,1])
Yc_next = self._get_AL_constraint(tmp_c[0], beta_, lmbda_, rho_)
Y_next = Yo_next + Yc_next
X_step_ens = np.vstack([X_step_ens, X_next])
Y_step_ens = np.vstack([Y_step_ens, Y_next])
print('=========== BO Finished ===========')
GP_ens = gp.model.model
if GP_ens is not None:
X_opt = X_step_ens[np.argmin(Y_step_ens,axis=0)]
W[k:, :] = X_opt
print('out_itr: ' + str(out_itr) + ' | BEST X: ' + str(X_opt) )
tmp_o, tmp_c = self._get_Y_step_pulled(W, X, T, Y, K_step=k)
print(tmp_o[0])
if max(0, tmp_c[0] - beta_) < 0.005*beta_: #1% off from the median
print('====================================')
print('THRESHOLD SATISFIED')
print('BEST: ' + str(X_opt))
print('Objective val.: ' + str(tmp_o[0]))
print('Constraint val.: ' + str(tmp_c[0]))
print('====================================')
break
else:
raise ValueError('BO failed...')
lmbda_ = max(0, lmbda_ + 1./rho_ * tmp_c[0])
if tmp_c[0] <= 0.:
rho_ = rho_
else:
rho_ = rho_/2.
X_inits = X_opt
Yo_inits = np.asarray(tmp_o[0]).reshape([-1,1])
Yc_inits = self._get_AL_constraint(tmp_c[0], beta_, lmbda_, rho_)
print( 'out_itr: {} | Lambda: {} | Rho: {}'.format(out_itr, lmbda_, rho_) )
thres_split = abs(tmp_o_prev[0] * 0.005) # 2% improvement -> update
if -(tmp_o[0] - tmp_o_prev[0]) > thres_split: # since tmp_o is negative C-index
W_prev = np.copy(W) #only update if W is not significantly better
### FINAL MODEL:
self.quilting_patterns = np.copy(W_prev)
self.underlying_models = self._get_trained_models(X, T, Y)
def predict(self, X, eval_time_horizons=None):
'''
The underlying models are trained and quilting patterns are given after training.
- self.underlying_models
- self.quilting_patterns
eval_time_horizons is either a list of evaluation times or None
- None gives all the possible prediction values.
output: risk
'''
pred_all = self._get_ensemble_prediction(self.underlying_models, self.quilting_patterns, X, self.all_time_horizons)
if eval_time_horizons:
pred = np.zeros([np.shape(pred_all)[0], len(eval_time_horizons)])
for t, eval_time in enumerate(eval_time_horizons):
pred[:, t] = pred_all[:, np.where(np.asarray(self.all_time_horizons) <= eval_time)[0][-1]]
else:
pred = np.copy(pred_all)
return pred
def _make_ModelList(self):
models = []
for tmp_name in self.model_names:
if tmp_name == 'CoxPH':
models += [CoxPH()]
elif tmp_name == 'CoxPHRidge':
models += [CoxPHRidge()]
elif tmp_name == 'Weibull':
models += [Weibull()]
elif tmp_name == 'LogNormal':
models += [LogNormal()]
elif tmp_name == 'LogLogistic':
models += [LogLogistic()]
elif tmp_name == 'RandomSurvForest':
models += [RandomSurvForest()]
return models
def _get_ensemble_prediction(self, models, W_, X_, all_time_horizons_):
for m in range(self.M):
tmp_pred_ = models[m].predict(X_, all_time_horizons_)
if m == 0:
pred_ = np.zeros(np.shape(tmp_pred_))
else:
for tt in range(self.K):
if tt == 0:
tmp_time_idx1 = np.asarray(all_time_horizons_) <= self.time_horizons[tt]
tmp_time_idx2 = np.asarray(all_time_horizons_) > self.time_horizons[tt]
increment = tmp_pred_[:, tmp_time_idx1] - np.matmul(tmp_pred_[:, tmp_time_idx1][:,[0]], np.ones([1,np.sum(tmp_time_idx1)]))
pred_[:, tmp_time_idx1] = pred_[:, tmp_time_idx1] + W_[tt,m] * increment
pred_[:, tmp_time_idx2] = pred_[:, tmp_time_idx2] + W_[tt,m] * np.matmul(increment[:,[-1]], np.ones([1,np.sum(tmp_time_idx2)]))
elif tt == (self.K - 1): #the last index
tmp_time_idx1 = np.asarray(all_time_horizons_) > self.time_horizons[tt-1]
increment = tmp_pred_[:, tmp_time_idx1] - np.matmul(tmp_pred_[:, tmp_time_idx1][:,[0]], np.ones([1,np.sum(tmp_time_idx1)]))
pred_[:, tmp_time_idx1] = pred_[:, tmp_time_idx1] + W_[tt,m] * increment
else:
tmp_time_idx1 = (np.asarray(all_time_horizons_) > self.time_horizons[tt-1]) & (np.asarray(all_time_horizons_) <= self.time_horizons[tt])
tmp_time_idx2 = np.asarray(all_time_horizons_) > self.time_horizons[tt]
increment = tmp_pred_[:, tmp_time_idx1] - np.matmul(tmp_pred_[:, tmp_time_idx1][:,[0]], np.ones([1,np.sum(tmp_time_idx1)]))
pred_[:, tmp_time_idx1] = pred_[:, tmp_time_idx1] + W_[tt,m] * increment
pred_[:, tmp_time_idx2] = pred_[:, tmp_time_idx2] + W_[tt,m] * np.matmul(increment[:,[-1]], np.ones([1,np.sum(tmp_time_idx2)]))
return pred_
def _get_Y_step_pulled(self, W_, X_, T_, Y_, K_step):
metric_CINDEX_ = np.zeros([self.num_cv])
metric_BRIER_ = np.zeros([self.num_cv])
for cv_idx in range(self.num_cv):
_,X_va, T_tr,T_va, Y_tr,Y_va = train_test_split(X_, T_, Y_, test_size=0.20, random_state=cv_idx+self.SEED)
pred = self._get_ensemble_prediction(self.CV_pulled_models[cv_idx], W_, X_va, self.time_horizons)
new_K_step = min(K_step + 1 + self.step_ahead, self.K)
for k in range(K_step, new_K_step):
eval_time = self.time_horizons[k]
tmp_C, tmp_B = calc_metrics(T_tr, Y_tr, T_va, Y_va, pred[:, k], eval_time)
metric_CINDEX_[cv_idx] += 1./len(self.time_horizons) * tmp_C
metric_BRIER_[cv_idx] += 1./len(self.time_horizons) * tmp_B
metric_CINDEX_[cv_idx] += 1./(new_K_step - K_step) * tmp_C
metric_BRIER_[cv_idx] += 1./(new_K_step - K_step) * tmp_B
Output_CINDEX = (- metric_CINDEX_.mean(),1.96*np.std(metric_CINDEX_)/np.sqrt(self.num_cv))
Output_BRIER = (metric_BRIER_.mean(),1.96*np.std(metric_BRIER_)/np.sqrt(self.num_cv))
return Output_CINDEX, Output_BRIER
def _get_models_pulled_CV(self, X, T, Y, seed):
X_tr, X_va, T_tr, T_va, Y_tr, Y_va = train_test_split(X, T, Y, test_size=0.20, random_state=seed+self.SEED)
pulled_models = self._get_trained_models(X_tr, T_tr, Y_tr)
metric_CINDEX, metric_BRIER = np.zeros([self.M, self.K]), np.zeros([self.M, self.K])
for m, model in enumerate(pulled_models):
pred = model.predict(X_va, self.time_horizons)
for t, eval_time in enumerate(self.time_horizons):
tmp_C, tmp_B = calc_metrics(T_tr, Y_tr, T_va, Y_va, pred[:, t], eval_time)
metric_CINDEX[m, t] = tmp_C
metric_BRIER[m, t] = tmp_B
return pulled_models, metric_CINDEX, metric_BRIER
def _get_trained_models(self, X, T, Y):
models = self._make_ModelList()
for m in range(self.M):
models[m].fit(X, T, Y)
return models
def _get_AL_constraint(self, g, beta_, lmbda_, rho_):
return np.asarray(lmbda_ * (g-beta_) + 0.5 / rho_ * max(0,(g-beta_))**2).reshape([-1,1])
def _get_normalized_X_step(self, X_step_):
for k in range(np.shape(X_step_)[0]):
X_step_[k, :] = X_step_[k, :]/(np.sum(X_step_[k, :])+1e-8)
return X_step_
|
UTF-8
|
Python
| false
| false
| 13,399
|
py
| 422
|
class_SurvivalQuilts.py
| 287
| 0.488619
| 0.477349
| 0
| 311
| 42.083601
| 160
|
brave-experiments/regional-filterlist-gen
| 9,972,914,068,658
|
a5c0e2722551b13c8dc2304c62e2c9887795ad32
|
cb9da139a319a3096674fab3dbb4d426e0a039b6
|
/statistics/python/page_graph_vanity_stats.py
|
24ad6b0028fff6efdf287cf3ea85932121d63dc9
|
[] |
no_license
|
https://github.com/brave-experiments/regional-filterlist-gen
|
24820af8aee4a7ffdcc8d25bde8f348a0b2aa98a
|
3ecffd2134a9b4aed8a96985e3207900d932249c
|
refs/heads/master
| 2022-12-01T20:04:53.103477
| 2020-08-14T21:36:26
| 2020-08-14T21:36:26
| 199,906,188
| 0
| 1
| null | false
| 2020-08-14T21:36:28
| 2019-07-31T18:07:45
| 2020-08-14T21:36:19
| 2020-08-14T21:36:27
| 637,815
| 0
| 1
| 0
|
Python
| false
| false
|
from networkx import graphml
import argparse
import os
import psycopg2
import psycopg2.extras
from s3fs.core import S3FileSystem
from tempfile import TemporaryDirectory
from urllib.parse import urlsplit
from tqdm import tqdm
import html
def generate_vanity_stats(bucket, s3, region):
pg_conn = psycopg2.connect(os.environ['PG_CONNECTION_STRING'] + '/' + region)
page_graph_cur = pg_conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
page_graph_cur.execute('select distinct on (queried_url) file_name from graphml_mappings')
total_nodes = 0
total_edges = 0
total_graph_files = 0
total_size_mb = 0
for entry in tqdm(page_graph_cur.fetchall()):
graphml_path = entry['file_name']
with TemporaryDirectory() as temp_dir:
local_file = os.path.join(temp_dir, graphml_path.split('/')[-1])
try:
s3.s3.download_file(bucket, graphml_path, local_file)
except:
print('cannot find file ' + graphml_path)
continue
with open(local_file, 'r') as graphml_file:
page_graph_data = ''
for line in graphml_file:
page_graph_data += line
try:
page_graph = graphml.parse_graphml(page_graph_data)
except:
continue
total_graph_files += 1
total_nodes += len(page_graph.nodes)
total_edges += len(page_graph.edges)
total_size_mb += (os.path.getsize(local_file) / 1000000)
return total_nodes / total_graph_files, total_edges / total_graph_files, total_size_mb / total_graph_files
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generates some PageGraph vanity stats')
parser.add_argument('--aws-access-key', help='aws access key')
parser.add_argument('--aws-secret-key', help='aws secret key')
parser.add_argument('--pg-bucket', help='aws bucket address')
args = parser.parse_args()
s3Bucket = S3FileSystem(anon=False, key=args.aws_access_key, secret=args.aws_secret_key)
print('sri lanka: ')
print(generate_vanity_stats(args.pg_bucket, s3Bucket, 'sri_lanka'))
print('hungary: ')
print(generate_vanity_stats(args.pg_bucket, s3Bucket, 'hungary'))
print('albania: ')
print(generate_vanity_stats(args.pg_bucket, s3Bucket, 'albania'))
|
UTF-8
|
Python
| false
| false
| 2,430
|
py
| 45
|
page_graph_vanity_stats.py
| 17
| 0.62963
| 0.618519
| 0
| 68
| 34.75
| 110
|
sjmiller609/crack_vigenere
| 816,043,829,435
|
c812ee2e65b0223e3f85d36e72af26729b884a67
|
c6bc7ac73e9378eac3a8d476d59c20fd6388d005
|
/crack.py
|
2cb6e0e1b565ccc74b6e448caf129100b17bc6cf
|
[] |
no_license
|
https://github.com/sjmiller609/crack_vigenere
|
5df3987ed9e01392dee11c85dffd5d128ae1aef0
|
cb0237841c820999c7adb181cad00cfc851792d4
|
refs/heads/master
| 2021-01-01T06:16:45.306471
| 2017-07-17T22:43:26
| 2017-07-17T22:43:26
| 97,402,370
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import enchant
import sys
import requests
import re
from cryptolib.crypto import Vigenere_Key, Vigenere_Cipher, Vigenere_Message
alphabet = {\
"a":0,
"b":1,
"c":2,
"d":3,
"e":4,
"f":5,
"g":6,
"h":7,
"i":8,
"j":9,
"k":10,
"l":11,
"m":12,
"n":13,
"o":14,
"p":15,
"q":16,
"r":17,
"s":18,
"t":19,
"u":20,
"v":21,
"w":22,
"x":23,
"y":24,
"z":25}
def string_to_array(string):
result = []
for char in string:
result.append(alphabet[char])
return result
def get_offsets(words):
offsets = []
counter = 0
for i in range(0,len(words)):
offsets.append(counter)
counter += len(words[i])
return offsets
#this doesn't need to be fast.
def sort_by_len(pairs):
if len(pairs) == 0: return []
results = []
shortest_len = 10000000000000000
shortest = None
for key in pairs:
if len(pairs[key]) < shortest_len:
shortest_len = len(pairs[key])
shortest = key
temp = [[shortest,pairs[shortest]]]
pairs.pop(shortest)
return temp+sort_by_len(pairs)
def get_ciphers_shortest_word_first(cipher):
c_words = cipher.split()
c_words = [string_to_array(word) for word in c_words]
offsets = get_offsets(c_words)
word_off_pairs = {}
for i in range(0,len(c_words)):
word_off_pairs[offsets[i]] = c_words[i]
word_off_pairs = sort_by_len(word_off_pairs)
ciphers = []
for i in range(0,len(c_words)):
ciphers.append(Vigenere_Cipher(word_off_pairs[i][1],word_off_pairs[i][0]))
return ciphers
def get_ciphers(cipher):
c_words = cipher.split()
c_words = [string_to_array(word) for word in c_words]
offsets = get_offsets(c_words)
word_off_pairs = []
for i in range(0,len(c_words)):
word_off_pairs.append([offsets[i],c_words[i]])
ciphers = []
for i in range(0,len(c_words)):
ciphers.append(Vigenere_Cipher(word_off_pairs[i][1],word_off_pairs[i][0]))
return ciphers
english_dict = enchant.Dict("en_US")
def crack_cipher(key_len,cipher,key=None):
#it's a performance improvement to check the smallest word first, because it's the fastest to decrypt.
# and we can more quickly eliminate keys that don't decrypt the small cipher words into real english words
cipher_words = get_ciphers_shortest_word_first(cipher)
#if we are not starting from an existing key, instantiate a new key object
if not key:
key = Vigenere_Key(key_len)
while not key.overflowed:
all_are_words = True
for i in range(0,len(cipher_words)):
#decrypt and see if it's a word or not
if not english_dict.check(cipher_words[i].decrypt(key).string()):
all_are_words = False
break
if all_are_words:
return key
key.increment()
#if the key overflows, then we have checked the entire key space.
return None
def main():
if len(sys.argv) != 2:
print("usage: $ "+sys.argv[0]+" 'my cipher text here'")
quit()
C = sys.argv[1].lower()
re_cipher = re.compile("[A-Za-z ]*")
if not re_cipher.match(C):
print("cipher must only contain letters and spaces")
quit()
cipher = sys.argv[1]
key_len = 0
max_key_len = 6
key = None
while not key and key_len <= max_key_len:
key_len += 1
print("cracking key space with |K| = "+str(key_len))
key = crack_cipher(key_len,C)
if key_len > max_key_len:
print("key space exhausted. failed to crack cipher with max key length as "+str(max_key_len))
quit()
print("------")
print("key: "+key.string())
c_words = get_ciphers(C)
message = ""
for i in range(0,len(c_words)):
message += c_words[i].decrypt(key).string()+" "
message = message.strip()
print("message: "+message)
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false
| false
| 4,072
|
py
| 3
|
crack.py
| 2
| 0.566798
| 0.547397
| 0
| 148
| 26.513514
| 110
|
sntciitbhu/sntc_website_beta
| 18,622,978,229,236
|
212829b98ec3ef78060264a439e8f6be261f002d
|
5f6874113f86669d3220c9d5c247dab0a8abca01
|
/apps/clubs/urls.py
|
528372318f34f62d754321b2184da8d1bde9926b
|
[] |
no_license
|
https://github.com/sntciitbhu/sntc_website_beta
|
f5e0f0b0deec9b291b7c4c6cf0d54b7cf069596c
|
26a17ac7a401229a53fd428132fe072bdbb260b9
|
refs/heads/master
| 2021-12-23T19:26:03.161225
| 2020-05-18T09:54:25
| 2020-05-18T09:54:25
| 252,948,125
| 0
| 0
| null | false
| 2021-09-22T18:50:51
| 2020-04-04T08:35:08
| 2020-05-18T09:54:29
| 2021-09-22T18:50:49
| 38,516
| 0
| 0
| 4
|
JavaScript
| false
| false
|
"""sntc_website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from apps.main.admin import admin
from django.urls import path
from . import views
app_name = 'clubs'
urlpatterns = [
path("aero/", views.aero, name = 'Aero-Modelling Club'),
path("astro/", views.astro, name = 'Astronomy Club'),
path("biz/", views.biz, name = 'Business Club'),
path("csi/", views.csi, name = 'Club of Sustainibility and Innovation'),
path("cops/", views.cops, name = 'Club of Programmers'),
path("robo/", views.robo, name = 'Robotics Club'),
path("sae/", views.sae, name = 'Society of Automotive Engineers'),
]
|
UTF-8
|
Python
| false
| false
| 1,195
|
py
| 62
|
urls.py
| 45
| 0.679498
| 0.672803
| 0
| 31
| 37.548387
| 77
|
parkerjackman/Project-Portfolio
| 6,622,839,587,578
|
2350f5b9d8f19711e997e681998484ebe33bd002
|
9028ab13db23394dc79ab974d0d70991261640eb
|
/CS 1 - Python/Assignment 13/polygon.py
|
ae1d6a7970ec4b6e6c95b3751bea93ac0e8f1bfa
|
[] |
no_license
|
https://github.com/parkerjackman/Project-Portfolio
|
ff1eb8cc629470faf97e42cec653bdf4e1dd5189
|
da8d69eb829adf3f57bb62b1a74cfca52dffa765
|
refs/heads/master
| 2020-12-01T16:22:47.693359
| 2019-12-30T17:11:52
| 2019-12-30T17:11:52
| 230,696,969
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Parker Jackman
Assignment 13
Polygon Class
"""
class Polygon:
def __init__(self, sides):
self.__sides = sides
def getNumSides(self):
return self.__sides
def __add__(self, other):
return self.__sides + other.getNumSides()
def __sub__(self, other):
return self.__sides - other.getNumSides()
def __lt__(self, other):
return self.__sides < other.getNumSides()
def __gt__(self, other):
return self.__sides > other.getNumSides()
def __eq__(self, other):
return self.__sides == other.getNumSides()
def __len__(self):
return self.__sides * 5
def __str__(self):
return "I'm a polygon with " + str(self.__sides) + " sides"
|
UTF-8
|
Python
| false
| false
| 765
|
py
| 75
|
polygon.py
| 66
| 0.538562
| 0.534641
| 0
| 33
| 21.242424
| 67
|
ArcticGizmo/Thesis
| 7,722,351,246,584
|
82a25340f966c0eb7266fc6d34f6ab16129a1da4
|
eacddb32685fdf6a25c062b1eae87945a61325c3
|
/Application/packages/guiComponents/winForms.py
|
dd4e46ae1d0fcc77d7d11fcd6dbb9deaa18111d2
|
[] |
no_license
|
https://github.com/ArcticGizmo/Thesis
|
4f48a82f679a1014096ae6d42cde886aebc98f21
|
7cbd8db2eeb0666566bbbae1e28500fff65ee807
|
refs/heads/master
| 2021-08-23T23:40:47.731801
| 2017-12-07T03:43:01
| 2017-12-07T03:43:01
| 113,398,661
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import npyscreen as ns
import PyCapture2
from .. other import Utilities as u
from .. opencvController import cvWindowController as wc, cvWindowObjects as wo
import winButtonSets as bs
# base class that allows instantiation of un instantiated windows
class winFormBase():
# give a return form id for on_ok and a group of bindings that can be used by the window
def bind(self, ret_form_id, bindings=None):
pass
# Opening menu for the terminal
class MainMenu(ns.ActionFormMinimal):
def create(self):
self.add(ns.TitleText, name="Welcome to sentry mode", editable=False)
self.nextrely += 1
# camera settings window
self.butCamera = self.add(ns.ButtonPress, name="Camera Settings")
self.butCamera.whenPressed = self.on_cameraSettings
# PTU settings window
self.butManual = self.add(ns.ButtonPress, name="PTU Control")
self.butManual.whenPressed = self.on_PTU
# Payload operations
self.butPayload = self.add(ns.ButtonPress, name="Payload Control")
self.butPayload.whenPressed = self.on_payload
# Wide angle lens operations
self.butWideAngle = self.add(ns.ButtonPress, name="Wide Angle Control")
self.butWideAngle.whenPressed = self.on_wideAngle
# Credits
self.nextrely += 1
self.butCredits = self.add(ns.ButtonPress, name="Credits")
self.butCredits.whenPressed = self.on_credits
def on_cameraSettings(self):
self.parentApp.switchForm("CAM_INFO")
def on_PTU(self):
self.parentApp.switchForm("PTU")
def on_payload(self):
self.parentApp.switchForm("PAYLOAD")
def on_wideAngle(self):
self.parentApp.switchForm("WIDE")
pass
def on_credits(self):
self.parentApp.switchForm("CREDITS")
# Predefined function for when "ok" is pressed
def on_ok(self):
self.parentApp.setNextForm(None)
# All controls for the camera information
class CameraInfo(ns.ActionFormMinimal, winFormBase):
def bind(self, ret_form_id, bindings=None):
self.CAMpayload = bindings[0]
self.CAMwide = bindings[1]
self.ret_form_id = ret_form_id
self.info = []
# Buttons
self.butPayload = bs.add_button(self, self.CAMpayload.name, self.on_displayPayload)
self.butWide = bs.add_button(self, self.CAMwide.name, self.on_displayWide, prevButton=self.butPayload)
# Payload Camera Info
self.camInfoPayload = bs.add_text_mult(self, "Camera Information", rows=8)
self.on_displayPayload()
# Py Capture version
libVer = PyCapture2.getLibraryVersion()
self.pyCaptureInfo = "PyCapture2 library version " + str(libVer[0]) + "." + str(libVer[1]) + "." + str(
libVer[3])
self.pyCaptureInfoDisp = bs.add_text(self, "Py Capture Information", value=self.pyCaptureInfo)
# Display payload information
def on_displayPayload(self):
temp = self.CAMpayload.get_camera_info()[:]
temp.insert(0, "Payload Camera")
self.info = temp
self.displayInfo()
# Display wide angle information
def on_displayWide(self):
temp = self.CAMwide.get_camera_info()[:]
temp.insert(0, "Wide Angle")
self.info = temp
self.displayInfo()
# Display camera informaiton
def displayInfo(self):
self.camInfoPayload.values = self.info
self.display()
# Called when the window is closed
def on_ok(self):
self.parentApp.setNextForm(self.ret_form_id)
# Contains the credits of the project
class Credits(ns.ActionFormMinimal, winFormBase):
def bind(self, ret_form_id, bindings=None):
self.ret_from_id = ret_form_id
self.title = self.add(ns.TitleText, name="Credits", editable=False)
self.title.value = "Jonathan Howell 2017 Thesis Project"
# Called when the window is closed
def on_ok(self):
self.parentApp.setNextForm(self.ret_from_id)
# Allows for mode changing (YET TO BE COMPLETE)
class ModeChangeMenu(ns.SplitForm):
def create(self):
return
def assign(self, serialLink):
self.PTU = serialLink
self.show_aty = 5
self.show_atx = 15
self.panModes = self.add(ns.TitleSelectOne, name="Pan Modes", scroll_exit=True,
values=['F', 'H', 'Q', 'E', 'A'], check_value_change=True,
begin_entry_at=10, field_width=18, max_height=6,
value=u.toInt(self.PTU.panMode))
self.nextrelx = 30
self.nextrely = 2
self.tiltModes = self.add(ns.TitleSelectOne, name="Tilt Modes", scroll_exit=True,
values=['F', 'H', 'Q', 'E', 'A'], check_value_change=True,
begin_entry_at=10, field_width=18, max_height=6,
value=u.toInt(self.PTU.tiltMode))
self.nextrelx = 0
self.updateBut = self.add(ns.ButtonPress, name="Update")
self.updateBut.whenPressed = self.on_update
self.draw_line_at = self.nextrely
self.nextrely += 1
self.nextrelx += 2
self.status = self.add(ns.TitleText, name="Status:", editable=False, begin_entry_at=10)
def on_update(self):
s = self.PTU.change_mode(self.panModes.value, self.tiltModes.value)
if s is not None:
self.status.value = s
self.display()
def afterEditing(self):
self.parentApp.setNextForm("MAIN")
# All control for the payload camera
class PayloadMenu(ns.ActionFormMinimal, ns.SplitForm, winFormBase):
def bind(self, ret_form_id, bindings=None):
self.set = bindings[0]
self.ret_form_id = ret_form_id
# control positioning for form
self.show_atx = 0
self.show_aty = 0
# Create status bar
self.status = bs.createStatusBar(self)
# Create button sets
bs.buttsetBasicOpenClose(self, self.set.get_cv_func(wo.DisplayFeed), statusBar=self.status)
self.nextrely += 1
bs.buttsetBasicOpenClose(self, self.set.get_cv_func(wo.DisplayCrosshair), statusBar=self.status)
# Called when the window is closed
def on_ok(self):
self.status.value = ""
self.parentApp.setNextForm(self.ret_form_id)
# Allows for the control of the PTU unit in degs
class PTUDegMenu( ns.SplitForm, winFormBase):
# Used to assign a PTU controller to the window
def bind(self, ret_form_id, bindings=None):
# save input
self.PTU = bindings[0]
self.set = bindings[1] # Allows for ptu display graphic
self.set.get_cv_func(wo.PTUDisplay).assign([self.PTU])
self.ret_form_id = ret_form_id
# Status output
self.status = bs.createStatusBar(self)
# Pan range
panRange = u.round_float_array(self.PTU.panRangeDeg[:])
self.panRange = bs.add_text(self, "Pan Range:", value=panRange)
# Tilt range
tiltRange = u.round_float_array(self.PTU.tiltRangeDeg[:])
self.tiltRange = bs.add_text(self, "Tilt Range:", value=tiltRange)
# pan and tilt speed
self.nextrely += 1
self.panSpeed = bs.add_text(self, "Pan speed:", value=u.round_float(self.PTU.panSpeedDeg), editable=True)
self.tiltSpeed = bs.add_text(self, "Tilt speed:", value=u.round_float(self.PTU.tiltSpeedDeg), editable=True)
# update speed button
self.updateSpeed = bs.add_button(self, "Update Speed", self.on_update_speed)
# pan and tilt position
self.nextrely += 1
self.panPos = bs.add_text(self, "Pan Pos:", value=u.round_float(self.PTU.panPosDeg), editable=True)
self.tiltPos = bs.add_text(self, "Tilt Pos:", value=u.round_float(self.PTU.tiltPosDeg), editable=True)
self.updatePos = bs.add_button(self, "Update Pos", self.on_update_pos, verOffset=1)
self.getPos = bs.add_button(self, "Get Position", self.on_get_pos, verOffset=1)
self.reset = bs.add_button(self, "Reset", self.on_reset, prevButton=self.getPos)
self.stop = bs.add_button(self, "Stop", self.on_stop, prevButton=self.reset)
self.nextrely += 1
bs.buttsetBasicOpenClose(self, self.set.get_cv_func(wo.PTUDisplay), statusBar=self.status)
# This is called to update all displayed inputs
def update(self):
self.panPos.value = str(u.round_float(self.PTU.panPosDeg))
self.tiltPos.value = str(u.round_float(self.PTU.tiltPosDeg))
self.panSpeed.value = str(u.round_float(self.PTU.panSpeedDeg))
self.tiltSpeed.value = str(u.round_float(self.PTU.tiltSpeedDeg))
self.display()
def on_stop(self):
self.PTU.write("H ")
self.status.value = "STOP!"
self.display()
def on_update_pos(self):
s = "POS ("
s += self.PTU.set_pan_deg(self.panPos.value)
s += ", "
s += self.PTU.set_tilt_deg(self.tiltPos.value)
s += ")"
self.status.value = s
self.update()
def on_update_speed(self):
s = "SPD: ("
s += self.PTU.set_pan_speed_deg(self.panSpeed.value)
s += ", "
s += self.PTU.set_tilt_speed_deg(self.tiltSpeed.value)
s += ")"
self.status.value = s
self.update()
def on_get_pos(self):
self.PTU.get_pan()
self.PTU.get_tilt()
s = "Cur: ("
s += str(u.round_float(self.PTU.panPosDeg))
s += ", "
s += str(u.round_float(self.PTU.tiltPosDeg))
s += ")"
self.status.value = s
self.display()
def on_reset(self):
self.PTU.set_pan(0)
self.PTU.set_tilt(0)
self.panPos.value = "0"
self.tiltPos.value = str(self.PTU.tiltPosDeg)
self.status.value = "Reset"
self.display()
def afterEditing(self):
self.status.value = ""
self.parentApp.setNextForm(self.ret_form_id)
# Allows for the control of the PTU unit in counts (legacy)
class PTUMenu(ns.SplitForm, winFormBase):
def bind(self, ret_form_id, bindings=None):
self.PTU = bindings[0]
self.ret_form_id = ret_form_id
# Control centering of form
self.show_atx = 12
self.show_aty = 2
# Pan and tilt ranges
self.panRange = bs.add_text(self, "Pan Range:", value=self.PTU.panRange)
self.tiltRange = bs.add_text(self, "Tilt Range:", value=self.PTU.tiltRange)
# pan and tilt speed
self.nextrely += 1
self.panSpeed = bs.add_text(self, "Pan speed:", value=self.PTU.panSpeed, editable=True)
self.tiltSpeed = bs.add_text(self, "Tilt speed:", value=self.PTU.tiltSpeed, editable=True)
# update speed button
self.nextrely += 1
self.updateSpeed = bs.add_button(self, "Update Speed", self.on_update_speed)
# pan and tilt position
self.nextrely += 1
self.panPos = self.add(ns.TitleText, name="Pan Pos:")
self.panPos.value = str(self.PTU.panPos)
self.tiltPos = self.add(ns.TitleText, name="Tilt Pos:")
self.tiltPos.value = str(self.PTU.tiltPos)
# Update pos button
self.nextrely += 1
self.updatePos = self.add(ns.ButtonPress, name="Update Pos")
self.updatePos.whenPressed = self.on_update_pos
# Get current position
self.nextrely += 1
self.getPos = self.add(ns.ButtonPress, name="Get Position")
self.getPos.whenPressed = self.on_getPos
# Reset to default
self.nextrely += -1
self.nextrelx += int(len(self.getPos.name)) + 4
self.reset = self.add(ns.ButtonPress, name="Reset")
self.reset.whenPressed = self.on_reset
# stop button
self.nextrely += -1
self.nextrelx += int(len(self.reset.name)) + 4
self.stop = self.add(ns.ButtonPress, name="Stop")
self.stop.whenPressed = self.on_stop
# Status output
self.draw_line_at = self.nextrely
self.nextrelx = 2
self.nextrely += 1
self.status = self.add(ns.TitleText, name="Status:", editable=False, begin_entry_at=10)
# This is called to update all displayed values
def update(self):
self.panPos.value = str(self.PTU.panPos)
self.tiltPos.value = str(self.PTU.tiltPos)
self.panSpeed.value = str(self.PTU.panSpeed)
self.tiltSpeed.value = str(self.PTU.tiltSpeed)
self.display()
def on_stop(self):
self.PTU.write("H ")
self.status.value = "STOP!"
self.display()
def on_update_pos(self):
s = "POS ("
s += self.PTU.set_pan(self.panPos.value)
s += ", "
s += self.PTU.set_tilt(self.tiltPos.value)
s += ")"
self.status.value = s
self.update()
def on_update_speed(self):
s = "SPD: ("
s += self.PTU.set_pan_speed(self.panSpeed.value)
s += ", "
s += self.PTU.set_tilt_speed(self.tiltSpeed.value)
s += ")"
self.status.value = s
self.update()
def on_getPos(self):
self.PTU.get_pan()
self.PTU.get_tilt()
s = "Cur: ("
s += str(self.PTU.panPos)
s += ", "
s += str(self.PTU.tiltPos)
s += ")"
self.status.value = s
self.display()
def on_reset(self):
self.PTU.set_pan(0)
self.PTU.set_tilt(0)
self.panPos.value = "0"
self.tiltPos.value = "0"
self.status.value = "Reset"
self.display()
def afterEditing(self):
self.status.value = ""
self.parentApp.setNextForm(self.ret_form_id)
# All controls for the wide angle camera
class WideMenu(ns.ActionFormMinimal, ns.SplitForm, winFormBase):
def bind(self, ret_form_id, bindings=None):
self.set = bindings[0]
self.PTU = bindings[1]
self.set.get_cv_func(wo.MotionCalibration).assign([self.PTU])
self.ret_form_id = ret_form_id
# control positioning of form
self.show_atx = 0
self.show_aty = 0
# Create status bar
self.status = bs.createStatusBar(self)
# Create button sets
bs.buttsetBasicOpenClose(self, self.set.get_cv_func(wo.DisplayFeedInverted), statusBar=self.status)
bs.targettingButtonSet(self, self.set.get_cv_func(wo.MotionCalibration), self.PTU, statusBar=self.status)
bs.thresholdButtonSet(self, self.set.get_cv_func(wo.ThresholdingBasic), statusBar=self.status)
bs.motionButtonSetBasic(self, self.set.get_cv_func(wo.DisplayMotionBasic), statusBar=self.status)
# Called when the window is closed
def on_ok(self):
self.status.value = ""
self.parentApp.setNextForm(self.ret_form_id)
|
UTF-8
|
Python
| false
| false
| 14,760
|
py
| 20
|
winForms.py
| 19
| 0.614431
| 0.609214
| 0
| 436
| 32.837156
| 116
|
internnos/Wave-Classification
| 4,698,694,231,480
|
b460b84fd410b0e981196e5c854a578d30285b76
|
d67223485d10c4749205f00182d49be76f0ae45d
|
/data juli nov 2013 sinabung/Run/merge_training_set.py
|
d2556ada739fafd0edf7164d4311180cd94cd052
|
[] |
no_license
|
https://github.com/internnos/Wave-Classification
|
9db5bb2ac70de55a2faa6c87ffcb4c2719d202a8
|
706eac724b2ad78462ae055b2dec439ac27afc9e
|
refs/heads/master
| 2022-06-12T19:16:59.919501
| 2017-08-25T01:14:03
| 2017-08-25T01:14:03
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 6 14:50:50 2017
@author: Amajid Sinar
"""
import pandas as pd
import numpy as np
#---------------------------------------------------------------------------------
#Merge training set
VTA_training_set = pd.read_csv("dataset/VTA-training-set.csv", delimiter=";", header=None).values
VTB_training_set = pd.read_csv("dataset/VTB-training-set.csv", delimiter=";", header=None).values
training_set = np.concatenate((VTA_training_set,VTB_training_set))
#Merge test set
VTA_test_set = pd.read_csv("dataset/VTA-test-set.csv", delimiter=";", header=None).values
VTB_test_set = pd.read_csv("dataset/VTB-test-set.csv", delimiter=";", header=None).values
test_set = np.concatenate((VTA_test_set,VTB_test_set))
#Create csv
np.savetxt('dataset/training-set.csv',training_set,fmt="%s",delimiter=",")
np.savetxt('dataset/test-set.csv',test_set,fmt="%s",delimiter=",")
|
UTF-8
|
Python
| false
| false
| 905
|
py
| 14
|
merge_training_set.py
| 7
| 0.646409
| 0.633149
| 0
| 24
| 36.708333
| 97
|
shasfin/begfor
| 5,136,780,889,825
|
6aa749e0f58e67958a8cdb5149a2b926f43a68ea
|
90820dc5862b39406116a3d7067afa39b88b8bc1
|
/Kurs2-Solutions/Woche4/Teil1/exercise1_0.py
|
35b1ed8ca3df4cb6f66c27ef5f530d0a39668df8
|
[] |
no_license
|
https://github.com/shasfin/begfor
|
860470fdc301d819dc0f25829fa266a97d580aab
|
0ae3991569b6cab3b0e66c9d9e0368a06cd05ae7
|
refs/heads/main
| 2023-02-15T03:24:38.462368
| 2021-01-09T21:05:30
| 2021-01-09T21:05:30
| 305,988,691
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from gturtle import *
makeTurtle()
hideTurtle()
setPenColor("Light Sea Green")
setPenWidth(10)
def bar(l):
forward(l)
back(l)
right(90)
penUp()
forward(20)
penDown()
left(90)
groesse = 5
repeat 22:
bar(groesse)
groesse += 10
|
UTF-8
|
Python
| false
| false
| 269
|
py
| 73
|
exercise1_0.py
| 71
| 0.605948
| 0.557621
| 0
| 21
| 11.809524
| 30
|
jonparrott/Darth-Vendor
| 16,209,206,594,160
|
6b7e776e5ed4e267ec83525fc3440f5996922944
|
0f4dd9e486832834ece5d35ac052d91432a3f89f
|
/darth_bootstrap.py
|
2055372f7d6c1e26718a0f8ec8923c55e9bd512b
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/jonparrott/Darth-Vendor
|
b5a6c686e50e0005a249406747c24131a40f9d69
|
bf97b8d51e12a875c6fe7ce8bd9131f3afe16ea2
|
refs/heads/master
| 2016-09-05T21:53:46.134513
| 2015-06-01T20:49:02
| 2015-06-01T20:49:02
| 24,393,952
| 11
| 4
| null | false
| 2015-03-22T08:31:32
| 2014-09-24T00:00:51
| 2015-02-07T18:03:04
| 2015-03-22T08:31:32
| 231
| 7
| 2
| 2
|
Python
| null | null |
import os
import shutil
import darth
def bootstrap():
cwd = os.getcwd()
appengine_config_path = os.path.join(cwd, 'appengine_config.py')
print('Copying darth.py to the current directory...')
shutil.copyfile(darth.__file__.replace('pyc', 'py'), os.path.join(cwd, 'darth.py'))
print('Copied!')
if os.path.exists(appengine_config_path):
print("""
You already have an appengine_config.py file in this directory so darth will not create one for you.
To make sure third party packages are available, please include the following lines in your appengine_config.py:
import darth
darth.vendor('lib')
""")
else:
print("Creating appengine_config.py")
with open(appengine_config_path, "w") as f:
f.write("""
# Use darth to setup the third-party packages folder
import darth
darth.vendor('lib')
""")
print("""Darth bootstrapping complete.
You can now install packages using "pip install -t lib package-name" and import them as usual.""")
|
UTF-8
|
Python
| false
| false
| 976
|
py
| 7
|
darth_bootstrap.py
| 6
| 0.705943
| 0.705943
| 0
| 35
| 26.885714
| 112
|
Aasthaengg/IBMdataset
| 7,301,444,445,330
|
c22635e94f7d7f5c99783ecf1b05db4a76846889
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04030/s512344805.py
|
b845e58b760e57c45c9acefc047ded74c82eaf3d
|
[] |
no_license
|
https://github.com/Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
s = input()
ans = ''
for i in range(len(s)):
if s[i] == '0':
ans += '0'
elif s[i] == '1':
ans += '1'
else:
if ans == '':
continue
else:
ans = ans[:-1]
print(ans)
|
UTF-8
|
Python
| false
| false
| 229
|
py
| 202,060
|
s512344805.py
| 202,055
| 0.349345
| 0.327511
| 0
| 13
| 16.692308
| 26
|
tete1987/selenium_code
| 15,169,824,527,785
|
e80da9d886eabf2f6849d2cf2f2348d2b2c28c50
|
6dcc75f2c4d700db6d95024a2ffbca0566e1641f
|
/test_chromede.py
|
31348cfc34dd81b89c9914e72ebc4d788bd55dc9
|
[] |
no_license
|
https://github.com/tete1987/selenium_code
|
a5d7cfa3d2cc614399c63fa039cd6b43153882f4
|
44867e6eb1aa27963c6c96586e6ed22deb533794
|
refs/heads/main
| 2023-02-15T04:20:23.218946
| 2021-01-08T03:37:25
| 2021-01-08T03:37:25
| 317,390,025
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/12/1 16:33
# @Author : TETE
# @File : test_chromede.py
from time import sleep
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
class TestChrome:
def setup_method(self,method):
option = Options()
option.debugger_address = '127.0.0.1:9222'
self.driver = webdriver.Chrome(options=option)
def teardown(self):
self.driver.quit()
def test_chrome(self):
self.driver.get("https://work.weixin.qq.com/wework_admin/frame")
self.driver.find_element(By.XPATH,'//*[@id="menu_contacts"]/span').click()
sleep(3)
|
UTF-8
|
Python
| false
| false
| 712
|
py
| 25
|
test_chromede.py
| 25
| 0.661517
| 0.629213
| 0
| 25
| 27.52
| 82
|
jkrlr/BlogWebApp
| 2,207,613,218,932
|
0b6c11c6cae86610e2d6d0221e79e6582faf7ef5
|
724880942a300ed9d471f998f10ed2a3834827f5
|
/BloggingApp/mysite/account/admin.py
|
8ca6f7f539a5d27f99bfa7d50bfa6c65e0a76465
|
[] |
no_license
|
https://github.com/jkrlr/BlogWebApp
|
b1bbcb2e8c8cc1bc3721650a38e64a9e408212bd
|
82778f81840a5d82610cafd99abb6d1d1e760854
|
refs/heads/master
| 2023-02-08T03:59:56.899657
| 2021-01-04T10:43:01
| 2021-01-04T10:43:01
| 285,786,796
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import MyUser, Request
# Register your models here.
admin.site.register(MyUser, UserAdmin)
admin.site.register(Request)
|
UTF-8
|
Python
| false
| false
| 216
|
py
| 3
|
admin.py
| 1
| 0.814815
| 0.814815
| 0
| 8
| 26
| 47
|
shengg/pyscf
| 9,371,618,683,527
|
d2e6ec0435429e0add21a8f3e08c60380621e3e1
|
2efa12def6387c8dc68314845b31c1c8ab581b89
|
/lib/vhf/test/test_nrdirect.py
|
a650a6b4d819f568a916436ae42b05cb3211ef07
|
[
"BSD-2-Clause"
] |
permissive
|
https://github.com/shengg/pyscf
|
49ece585e9b7b4c3c3c2e5eff2ce97caa8a4a826
|
5cb6ab6e85c123c912500a534252097da2f79e03
|
refs/heads/0.11
| 2020-12-24T08:32:27.182845
| 2015-03-09T14:58:24
| 2015-03-09T14:58:24
| 31,979,613
| 0
| 0
| null | true
| 2015-03-10T20:17:24
| 2015-03-10T20:17:23
| 2015-03-09T14:43:03
| 2015-03-09T14:43:23
| 4,523
| 0
| 0
| 0
| null | null | null |
#!/usr/bin/env python
import os
import ctypes
import _ctypes
import unittest
import numpy
from pyscf import lib
from pyscf import scf
from pyscf import gto
from pyscf import ao2mo
libcvhf1 = lib.load_library('libcvhf')
mol = gto.Mole()
mol.verbose = 0
mol.output = None#'out_h2o'
mol.atom = [
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
#mol.atom = [
# [1 , (0. , -0.757 , 0.587)],
# [1 , (0. , 0.757 , 0.587)] ]
#
#mol.basis = {'H': 'sto-3g',}
mol.build()
rhf = scf.RHF(mol)
rhf.scf()
nao = mol.nao_nr()
npair = nao*(nao+1)//2
c_atm = numpy.array(mol._atm, dtype=numpy.int32)
c_bas = numpy.array(mol._bas, dtype=numpy.int32)
c_env = numpy.array(mol._env)
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
cintopt = ctypes.c_void_p()
vhfopt = ctypes.c_void_p()
# for each dm1, call namejk
def runjk(dm1, ncomp, intorname, unpackname, filldot, *namejk):
fdrv = getattr(libcvhf1, 'CVHFnr_direct_drv')
intor = ctypes.c_void_p(_ctypes.dlsym(libcvhf1._handle,
intorname))
funpack = ctypes.c_void_p(_ctypes.dlsym(libcvhf1._handle,
unpackname))
fdot = ctypes.c_void_p(_ctypes.dlsym(libcvhf1._handle, filldot))
njk = len(namejk)
if dm1.ndim == 2:
n_dm = 1
dm1 = (dm1,)
else:
n_dm = dm1.shape[0]
fjk = (ctypes.c_void_p*(njk*n_dm))()
dms = (ctypes.c_void_p*(njk*n_dm))()
for i, symb in enumerate(namejk):
f1 = ctypes.c_void_p(_ctypes.dlsym(libcvhf1._handle, symb))
for j in range(n_dm):
dms[i*n_dm+j] = dm1[j].ctypes.data_as(ctypes.c_void_p)
fjk[i*n_dm+j] = f1
vjk = numpy.zeros((njk,n_dm*ncomp,nao,nao))
fdrv(intor, fdot, funpack, fjk, dms,
vjk.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(njk*n_dm), ctypes.c_int(ncomp),
cintopt, vhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if n_dm * ncomp == 1:
vjk = vjk.reshape(njk,nao,nao)
return vjk
class KnowValues(unittest.TestCase):
def test_direct_jk(self):
numpy.random.seed(15)
dm1 = numpy.random.random((nao,nao))
dm1 = dm1 + dm1.T
vj0, vk0 = scf._vhf.incore(rhf._eri, dm1, 1)
vj1, vk1 = runjk(dm1, 1, 'cint2e_sph', 'CVHFunpack_nrblock2tril',
'CVHFfill_dot_nrs8',
'CVHFnrs8_ij_s2kl', 'CVHFnrs8_jk_s2il')
vj1 = lib.hermi_triu(vj1, 1)
vk1 = lib.hermi_triu(vk1, 1)
self.assertTrue(numpy.allclose(vj0,vj1))
self.assertTrue(numpy.allclose(vk0,vk1))
dm1 = numpy.array((dm1,dm1))
vj1, vk1 = runjk(dm1, 1, 'cint2e_sph', 'CVHFunpack_nrblock2tril',
'CVHFfill_dot_nrs8',
'CVHFnrs8_ij_s2kl', 'CVHFnrs8_jk_s2il')
vj1[0] = lib.hermi_triu(vj1[0], 1)
vk1[0] = lib.hermi_triu(vk1[0], 1)
vj1[1] = lib.hermi_triu(vj1[1], 1)
vk1[1] = lib.hermi_triu(vk1[1], 1)
self.assertTrue(numpy.allclose(vj0,vj1[0]))
self.assertTrue(numpy.allclose(vk0,vk1[0]))
self.assertTrue(numpy.allclose(vj0,vj1[1]))
self.assertTrue(numpy.allclose(vk0,vk1[1]))
dm1 = numpy.random.random((nao,nao))
eri1 = ao2mo.restore(1, rhf._eri, nao)
vj0 = numpy.einsum('ijkl,kl->ij', eri1, dm1)
vk0 = numpy.einsum('ijkl,jk->il', eri1, dm1)
vj1, vj2 = runjk(dm1, 1, 'cint2e_sph', 'CVHFunpack_nrblock2tril',
'CVHFfill_dot_nrs4',
'CVHFnrs4_ij_s2kl', 'CVHFnrs4_kl_s2ij')
vj1 = lib.hermi_triu(vj1.copy(), 1)
vj2 = lib.hermi_triu(vj2.copy(), 1)
self.assertTrue(numpy.allclose(vj0,vj1))
self.assertTrue(numpy.allclose(vj0,vj2))
vk1 = runjk(dm1, 1, 'cint2e_sph', 'CVHFunpack_nrblock2tril',
'CVHFfill_dot_nrs4',
'CVHFnrs4_il_s1jk', 'CVHFnrs4_jk_s1il')
self.assertTrue(numpy.allclose(vk0,vk1[0]))
self.assertTrue(numpy.allclose(vk0,vk1[1]))
dm1 = dm1 + dm1.T
vk0 = numpy.einsum('ijkl,jk->il', eri1, dm1)
vk1 = runjk(dm1, 1, 'cint2e_sph', 'CVHFunpack_nrblock2tril',
'CVHFfill_dot_nrs4',
'CVHFnrs4_il_s1jk', 'CVHFnrs4_jk_s1il',
'CVHFnrs4_il_s2jk', 'CVHFnrs4_jk_s2il')
vk1[2] = lib.hermi_triu(vk1[2].copy())
vk1[3] = lib.hermi_triu(vk1[3].copy())
self.assertTrue(numpy.allclose(vk0,vk1[0]))
self.assertTrue(numpy.allclose(vk0,vk1[1]))
self.assertTrue(numpy.allclose(vk0,vk1[2]))
self.assertTrue(numpy.allclose(vk0,vk1[3]))
dm1 = numpy.random.random((nao,nao))
vj0 = numpy.einsum('ijkl,kl->ij', eri1, dm1)
vk0 = numpy.einsum('ijkl,jk->il', eri1, dm1)
vk1 = runjk(dm1, 1, 'cint2e_sph', 'CVHFunpack_nrblock2rect',
'CVHFfill_dot_nrs2kl',
'CVHFnrs2ij_ij_s1kl', 'CVHFnrs2ij_kl_s2ij',
'CVHFnrs2ij_jk_s1il', 'CVHFnrs2ij_il_s1jk')
vk1[1] = lib.hermi_triu(vk1[1].copy())
self.assertTrue(numpy.allclose(vj0,vk1[0]))
self.assertTrue(numpy.allclose(vj0,vk1[1]))
self.assertTrue(numpy.allclose(vk0,vk1[2]))
self.assertTrue(numpy.allclose(vk0,vk1[3]))
vk1 = runjk(dm1, 1, 'cint2e_sph', 'CVHFunpack_nrblock2trilu',
'CVHFfill_dot_nrs2kl',
'CVHFnrs2ij_ij_s1kl', 'CVHFnrs2ij_kl_s2ij',
'CVHFnrs2ij_jk_s1il', 'CVHFnrs2ij_il_s1jk')
vk1[1] = lib.hermi_triu(vk1[1].copy())
self.assertTrue(numpy.allclose(vj0,vk1[0]))
self.assertTrue(numpy.allclose(vj0,vk1[1]))
self.assertTrue(numpy.allclose(vk0,vk1[2]))
self.assertTrue(numpy.allclose(vk0,vk1[3]))
vk1 = runjk(dm1, 1, 'cint2e_sph', 'CVHFunpack_nrblock2tril',
'CVHFfill_dot_nrs2ij',
'CVHFnrs2kl_ij_s2kl', 'CVHFnrs2kl_kl_s1ij',
'CVHFnrs2kl_jk_s1il', 'CVHFnrs2kl_il_s1jk')
vk1[0] = lib.hermi_triu(vk1[0].copy())
self.assertTrue(numpy.allclose(vj0,vk1[0]))
self.assertTrue(numpy.allclose(vj0,vk1[1]))
self.assertTrue(numpy.allclose(vk0,vk1[2]))
self.assertTrue(numpy.allclose(vk0,vk1[3]))
vk1 = runjk(dm1, 1, 'cint2e_sph', 'CVHFunpack_nrblock2rect',
'CVHFfill_dot_nrs1',
'CVHFnrs1_ij_s1kl', 'CVHFnrs1_kl_s1ij',
'CVHFnrs1_jk_s1il', 'CVHFnrs1_il_s1jk')
self.assertTrue(numpy.allclose(vj0,vk1[0]))
self.assertTrue(numpy.allclose(vj0,vk1[1]))
self.assertTrue(numpy.allclose(vk0,vk1[2]))
self.assertTrue(numpy.allclose(vk0,vk1[3]))
if __name__ == '__main__':
print('Full Tests for nrvhf')
unittest.main()
|
UTF-8
|
Python
| false
| false
| 7,058
|
py
| 34
|
test_nrdirect.py
| 26
| 0.560074
| 0.508926
| 0
| 186
| 36.94086
| 73
|
Trackerming/bitcoin-sv
| 17,798,344,480,193
|
82c6cede1d0ca8bcff6c14932763d858dce480f8
|
89b76f3a8d1e790b19d4aff0ff2cd6fc938b7741
|
/test/functional/genesis_upgrade_tests/sigops_count_limit.py
|
0a1529c9010dd6ead9dbef9b4c40dfb01f76432f
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
https://github.com/Trackerming/bitcoin-sv
|
27a47d57887fde8445fee5a7d8719c47a95d7f16
|
fb50a64e3ea0334a86b2c80daf5147c5bc2693c4
|
refs/heads/master
| 2021-07-07T07:20:26.060533
| 2020-08-05T02:35:56
| 2020-08-05T02:35:56
| 160,283,904
| 0
| 0
|
MIT
| true
| 2020-08-05T02:35:57
| 2018-12-04T02:24:56
| 2018-12-04T02:25:01
| 2020-08-05T02:35:56
| 69,417
| 0
| 0
| 0
|
C++
| false
| false
|
from genesis_upgrade_tests.test_base import GenesisHeightBasedSimpleTestsCase
from test_framework.height_based_test_framework import SimpleTestDefinition
from test_framework.script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN, OP_DROP
from test_framework.cdefs import MAX_TX_SIGOPS_COUNT_POLICY_BEFORE_GENESIS, MAX_TX_SIGOPS_COUNT_BEFORE_GENESIS
class SigOpLimitCountDefaultTestCase(GenesisHeightBasedSimpleTestsCase):
ARGS = GenesisHeightBasedSimpleTestsCase.ARGS + ['-banscore=1000000', '-whitelist=127.0.0.1']
NAME = "SigOps Count Limit Simple test"
TESTS_PRE_GENESIS_DEFAULT = [
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"", test_tx_locking_script=CScript([OP_CHECKSIG] * MAX_TX_SIGOPS_COUNT_POLICY_BEFORE_GENESIS)),
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"", test_tx_locking_script=CScript([OP_CHECKSIG] * (MAX_TX_SIGOPS_COUNT_POLICY_BEFORE_GENESIS + 1)),
p2p_reject_reason=b'bad-txns-too-many-sigops'),
# Framework puts all the transactions that are considered valid into one block - added 2 transactions
# with 1 sigop to drop the density
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"",
test_tx_locking_script=CScript([OP_CHECKSIG])),
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"",
test_tx_locking_script=CScript([OP_CHECKSIG] + [b"a" * 500, OP_DROP]*1000)),
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"",
test_tx_locking_script=CScript([OP_CHECKSIG] + [b"a" * 500, OP_DROP]*1000)),
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"",
test_tx_locking_script=CScript([OP_CHECKSIG] * MAX_TX_SIGOPS_COUNT_BEFORE_GENESIS),
p2p_reject_reason=b'bad-txns-too-many-sigops'),
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"", test_tx_locking_script=CScript([OP_CHECKSIG] * (MAX_TX_SIGOPS_COUNT_BEFORE_GENESIS + 1)),
p2p_reject_reason=b'flexible-bad-txn-sigops',
block_reject_reason=b'bad-txn-sigops'),
]
TESTS_POST_GENESIS_DEFAULT = [
SimpleTestDefinition("GENESIS", CScript([OP_TRUE]),
"GENESIS", b"", test_tx_locking_script=CScript(
[OP_CHECKSIG] * (MAX_TX_SIGOPS_COUNT_POLICY_BEFORE_GENESIS + 1)))
]
TESTS = TESTS_PRE_GENESIS_DEFAULT + TESTS_POST_GENESIS_DEFAULT
class SigOpLimitCountPolicyTestCase(GenesisHeightBasedSimpleTestsCase):
ARGS = GenesisHeightBasedSimpleTestsCase.ARGS + ['-banscore=1000000', '-whitelist=127.0.0.1', '-maxtxsigopscountspolicy=9000']
NAME = "SigOps Count Limit Simple test with policy parameter"
TESTS_PRE_GENESIS_POLICY = [
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"", test_tx_locking_script=CScript([OP_CHECKSIG] * MAX_TX_SIGOPS_COUNT_POLICY_BEFORE_GENESIS)),
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"", test_tx_locking_script=CScript([OP_CHECKSIG] * (MAX_TX_SIGOPS_COUNT_POLICY_BEFORE_GENESIS + 1)),
p2p_reject_reason=b'bad-txns-too-many-sigops'),
# Framework puts all the transactions that are considered valid into one block - added 2 transactions
# with 1 sigop to drop the density
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"",
test_tx_locking_script=CScript([OP_CHECKSIG])),
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"",
test_tx_locking_script=CScript([OP_CHECKSIG] + [b"a" * 500, OP_DROP] * 1000)),
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"",
test_tx_locking_script=CScript([OP_CHECKSIG] + [b"a" * 500, OP_DROP] * 1000)),
# Sum of all sigops (4000+4001+20000) is compared to MAX_BLOCK_SIGOPS_PER_MB (20000), thus failing with bad-blk-sigops
# MAX_BLOCK_SIGOPS_PER_MB should be increased to at least 22002
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"",
test_tx_locking_script=CScript([OP_CHECKSIG] * MAX_TX_SIGOPS_COUNT_BEFORE_GENESIS),
p2p_reject_reason=b'bad-txns-too-many-sigops'),
SimpleTestDefinition("PRE-GENESIS", CScript([OP_TRUE]),
"PRE-GENESIS", b"", test_tx_locking_script=CScript([OP_CHECKSIG] * (MAX_TX_SIGOPS_COUNT_BEFORE_GENESIS + 1)),
p2p_reject_reason=b'flexible-bad-txn-sigops',
block_reject_reason=b'bad-txn-sigops'),
]
TESTS_POST_GENESIS_POLICY = [
SimpleTestDefinition("GENESIS", CScript([OP_TRUE]),
"GENESIS", b"", test_tx_locking_script=CScript([OP_CHECKSIG] * 9000)),
SimpleTestDefinition("GENESIS", CScript([OP_TRUE]),
"GENESIS", b"", test_tx_locking_script=CScript([OP_CHECKSIG] * (9000 + 1)),
p2p_reject_reason=b'bad-txns-too-many-sigops'),
]
TESTS = TESTS_PRE_GENESIS_POLICY + TESTS_POST_GENESIS_POLICY
|
UTF-8
|
Python
| false
| false
| 5,916
|
py
| 261
|
sigops_count_limit.py
| 230
| 0.576572
| 0.558654
| 0
| 94
| 61.946809
| 145
|
diljots99-old/Django-Movie-APi
| 5,514,738,026,178
|
fbbd62d1f05d842192f39586c74af9fd6b536a08
|
5a846f56fe1e91e3d76b5bdaebea257bce845964
|
/MediaStreaming/ApiApp/urls.py
|
10cb8baed9a0c2851f1a84135d3b9c0dc67eef31
|
[] |
no_license
|
https://github.com/diljots99-old/Django-Movie-APi
|
6dfe2118cc6875cfc38ae3b67810b4370e18cae4
|
beba716d5cd10c833cbf22da8fb1074ca10a28dd
|
refs/heads/main
| 2023-02-19T17:52:45.543379
| 2021-01-21T11:41:25
| 2021-01-21T11:41:25
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.urls import path,include
from rest_framework import routers
from ApiApp import views,movies,users,people,torrent
from rest_framework.urlpatterns import format_suffix_patterns
# router = routers.DefaultRouter()
# router.register("movies",views.MovieApi)
# router.register("users",views.UserApi)
# router.urls
# urlpatterns = [
# path('', include(router.urls)),
# ]
app_name = 'ApiApp'
urlpatterns = [
path('', views.ApiHome.as_view()),
path('movie/now_playing/', movies.get_now_playing_movies.as_view()),
path('movie/top_rated/', movies.get_top_rated_movies.as_view()),
path('movie/popular/', movies.get_popular_movies.as_view()),
path('movie/new_releases/', movies.get_new_releases.as_view()),
path('movie/poster/<int:movie_id>', movies.get_movie_poster.as_view(),name="movie_poster"),
path('movie/poster_urls/<int:movie_id>', movies.get_movie_poster_urls.as_view()),
path('movie/backdrop/<int:movie_id>', movies.get_movie_backdrop.as_view(),name="movie_backdrop"),
path('movie/backdrop_urls/<int:movie_id>', movies.get_movie_backdrop_urls.as_view()),
path('movie/details/<int:movie_id>', movies.get_complete_movie_details.as_view(),name='movie_details'),
path('movie/credits/<int:movie_id>', movies.movie_credits.as_view()),
path('movie/similar/<int:movie_id>', movies.similar_movies.as_view()),
path('search/movie/', movies.search_movie.as_view()),
path("user/",users.Users.as_view()),
path("user/verify/",users.updateVerifiedUser.as_view()),
path("user/history/<str:pk>",users.Users_History.as_view()),
path("user/favourites/<str:pk>",users.Users_Favourites.as_view()),
path("user/watchlist/<str:pk>",users.Users_Watchlist.as_view()),
path('file/torrent/<int:torrent_id>',torrent.get_torrent_file.as_view()),
path('people/profile_picture/<int:people_id>',people.get_people_profile_picture.as_view())
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
UTF-8
|
Python
| false
| false
| 1,974
|
py
| 39
|
urls.py
| 27
| 0.695035
| 0.695035
| 0
| 53
| 36.226415
| 107
|
Save-Pets/Save-Pets-ML
| 14,199,161,887,341
|
03ba24504d9007878e2285811ad0e71d5aaf2754
|
6c5b99b113915155845da1c119c13a364d14c34a
|
/SVM-Classifier/preprocess.py
|
1ba65c0dec40448c783417885f93717de10d39e8
|
[] |
no_license
|
https://github.com/Save-Pets/Save-Pets-ML
|
b5228ef182f7593891898db2babd1a5f56767ace
|
fcd9ff0256b71000765e35c799641ab657dd2ddb
|
refs/heads/main
| 2023-06-06T04:07:27.768355
| 2021-06-13T12:05:16
| 2021-06-13T12:05:16
| 339,622,073
| 2
| 2
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import argparse
import cv2
import numpy as np
from PIL import Image
from histo_clahe import histo_clahe
parser = argparse.ArgumentParser(description='Argparse Tutorial')
parser.add_argument('--dir', default='0',help='dataset directory')
parser.add_argument('--savedir', default='./Dog-Data/train',help='save directory')
opt = parser.parse_args()
path = './image/' + opt.dir
file_list = os.listdir(path)
rotate = [0, 15, 30, -15, -30]
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
for file in file_list:
s = os.path.splitext(file)
savedir = []
createFolder(opt.savedir + '/' + opt.dir)
#파일 저장 디렉토리 ./Dog-Data/train/imagename-i.jpg
for i in range(10):
savedir.append(opt.savedir + '/' + opt.dir + '/' + s[0] + '-' + str(i) + s[1])
#사이즈 1 rotate 저장
img = histo_clahe(path + '/' + file)
height, width, channel = img.shape
for i in range(5):
matrix = cv2.getRotationMatrix2D((width/2, height/2), rotate[i], 1)
dst = cv2.warpAffine(img, matrix, (width, height))
cv2.imwrite(savedir[i],dst)
#사이즈 1/2 rotate 저장
img = cv2.resize(img,(int(width / 2), int(height / 2)))
height, width, channel = img.shape
for i in range(5):
matrix = cv2.getRotationMatrix2D((width/2, height/2), rotate[i], 1)
dst = cv2.warpAffine(img, matrix, (width, height))
cv2.imwrite(savedir[i+5],dst)
|
UTF-8
|
Python
| false
| false
| 1,628
|
py
| 7
|
preprocess.py
| 3
| 0.609296
| 0.585427
| 0
| 51
| 29.176471
| 86
|
abdo3247/addons
| 8,186,207,710,959
|
8563e438418906cebfbebadb0651aad3f87d14cf
|
ea07b85d8ebf242ada2ca331befdec9a16773cf2
|
/product_dimension/sale.py
|
42a9d984cfb4b0ffdb13e665da7bcfcbf4bade24
|
[] |
no_license
|
https://github.com/abdo3247/addons
|
88386d1c16b5f657086c9be136dbcc57c4d9f63e
|
33a8d50a36beb1375a07e450995360f2bc44dd60
|
refs/heads/master
| 2021-01-15T18:58:59.089160
| 2016-01-30T13:08:34
| 2016-01-30T13:09:17
| 52,379,103
| 0
| 0
| null | true
| 2016-02-23T17:46:39
| 2016-02-23T17:46:39
| 2016-01-30T12:57:33
| 2016-02-22T21:53:04
| 318
| 0
| 0
| 0
| null | null | null |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2015 BADEP. All Rights Reserved.
# Author: Khalid Hazam<k.hazam@badep.ma>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.model
def _prepare_order_line_procurement(self, order, line, group_id=False):
res = super(SaleOrder, self)._prepare_order_line_procurement(order, line, group_id)
res['product_dimension_qty'] = line.product_dimension_qty
res['dimensions'] = [(0, 0, {'dimension': d.dimension.id, 'quantity': d.quantity}) for d in line.dimensions]
return res
SaleOrder()
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
dimensions = fields.One2many('sale.order.line.dimension', 'sale_order_line', readonly=True, states={'draft': [('readonly', False)]})
product_visible_qty = fields.Float('Quantité', compute='get_visible_qty')
product_dimension_qty = fields.Integer('Quantité', required=True, default=1)
@api.multi
@api.onchange('dimensions', 'product_dimension_qty')
def onchange_set_name(self):
if self.product_id:
if self.dimensions:
str_dim='('
for d in self.dimensions:
str_dim += str(d.quantity) + d.dimension.name + '*'
str_dim = str_dim[:-1] + ')'
name = str(self.product_dimension_qty) + ' ' + self.product_id.name + '(s) ' + str_dim
if self.product_id.description_sale:
name += '\n' + self.product_id.description_sale
self.name = name
else:
self.name = self.product_id.name
@api.multi
@api.depends('product_uom_qty')
def get_visible_qty(self):
self.product_visible_qty = self.product_uom_qty
@api.multi
@api.onchange('dimensions', 'product_dimension_qty')
def onchange_dimensions(self):
qty = self.product_dimension_qty
for d in self.dimensions:
qty *= d.quantity / d.dimension.multiplier
if qty != self.product_uom_qty:
self.product_uom_qty = qty
@api.multi
def onchange_product_uom(self, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, fiscal_position=False, context=None):
res = super(SaleOrderLine, self).onchange_product_uom(pricelist=pricelist, product=product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, fiscal_position=fiscal_position)
if uom:
res['value'].update(dimensions = [(0, 0, {'dimension':d.id, 'quantity':d.multiplier, 'sale_order_line': self.id}) for d in self.env['product.uom'].browse(uom).dimensions])
res['value'].update(product_dimension_qty = qty)
return res
SaleOrderLine()
class SaleOrderLineDimension(models.Model):
_name = "sale.order.line.dimension"
dimension = fields.Many2one('product.uom.dimension', required=True, ondelete='cascade')
quantity = fields.Float('Quantité', digits_compute=dp.get_precision('Product UoS'), required=True)
sale_order_line = fields.Many2one('sale.order.line', required=True, ondelete='cascade')
extrapolated_qty = fields.Integer(string='Quantité extrapolée', compute='get_extrapolated_qty')
@api.multi
@api.depends('quantity')
def get_extrapolated_qty(self):
if self.dimension.rounding != 0:
self.extrapolated_qty = round(self.quantity / self.dimension.rounding)
else:
self.extrapolated_qty = self.quantity + self.dimension.offset
SaleOrderLineDimension()
|
UTF-8
|
Python
| false
| false
| 4,845
|
py
| 7
|
sale.py
| 5
| 0.613017
| 0.609298
| 0
| 103
| 46
| 183
|
cfe-lab/stocky
| 9,517,647,554,983
|
e74e2bcbc7e966b98bc9b7ec65b9edd31d54e2cd
|
176fa7e597c2f2337f80a9d4e393e636e5823129
|
/stocky-devel/stocky/deprecated_code/datawidgets.py
|
e218dbc27bad613fd41847ad0ac3140905e72ad2
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/cfe-lab/stocky
|
0133abf16bbf068d8ef3684af4d7b22e96756e37
|
6c4114551d9b61c5ba3db2c237e995fecdd2f6b3
|
refs/heads/master
| 2021-04-15T17:01:15.610547
| 2019-04-08T18:51:52
| 2019-04-08T18:51:52
| 126,519,914
| 2
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from org.transcrypt.stubs.browser import FormData, __new__
import qailib.common.serversocketbase as serversocketbase
import qailib.common.dataelements as dataelements
import qailib.transcryptlib.guiforms as guiforms
class socket_controller(base_controller):
"""The base class of all main programs on the client side.
The controller uses a server_socket to communicate with the server via
a data_cache, and responds to events received.
"""
def __init__(self, myname: str, ws: serversocketbase.base_server_socket) -> None:
super().__init__(myname)
self._ws = ws
self._dcache = dataelements.data_cache("datacache", ws)
self._dcache.addObserver(self, base.MSGD_LOG_MESSAGE)
self._dcache.addObserver(self, base.MSGD_DATA_CACHE_READY)
# NOTE: server messages are sent to the datacache
# ws.addObserver(self, base.MSGD_SERVER_MSG)
class FormWidget(base_widget):
"""A Basic Form widget.
Note that, in 'normal' http-based web sites, the form HTML element has
an 'action' attribute (URL on the server to which the form data
is sent)
and a 'method' attribute (PUT or GET http method).
Here, however, we are using websockets to communicate with the server, and we must
therefore NOT allow the form to be submitted in the conventional sense.
There are at least two ways of doing this:
a) use a submit input element and override the onsubmit method of the form
b) use button input element and override the onclick method of the form.
As the messaging system already uses B, we opt for that.
Note that in addition, catching the onsubmit events would require called javascript
calls ev.preventDefault() and ev.stopPropagation() in the event handler.
"""
def __init__(self,
contr: base_controller,
parent: 'base_widget',
idstr: str,
attrdct: dict,
jsel,
formbuilder: guiforms.HtmlFormBuilder,
my_user_data: dataelements.record) -> None:
super().__init__(contr, parent, "{}-pp".format(idstr), attrdct, jsel)
self._formbuilder = formbuilder
self._userdata = my_user_data
self.form_el = html.form(self, idstr, None, None)
html_tab, self._field_lst = formbuilder.gen_table(self.form_el,
'usertable',
dict(border=1, width='100%'),
my_user_data)
self.addItem(html_tab)
self.addSubmitButton('usermod-button', 'Submit')
self.addObserver(contr, base.MSGD_FORM_SUBMIT)
def addItem(self, menu_itm: html.element) -> html.element:
"""Append an html item to the end of the form elements."""
self.form_el.appendChild(menu_itm)
return menu_itm
def addSubmitButton(self, button_idstr: str, buttontext: str) -> None:
attrdct = {"value": buttontext, "type": "submit"}
self.button = html.input_button(self.form_el, button_idstr, attrdct, None)
# self.button._el.onsubmit = self._onsubmit
self.button.addObserver(self, base.MSGD_BUTTON_CLICK)
def rcvMsg(self,
whofrom: base.base_obj,
msgdesc: base.MSGdesc_Type,
msgdat: Optional[base.MSGdata_Type]) -> None:
if whofrom == self.button:
print("FormWidget received my button click!")
# this doesn't seem to exist...
# self.handle_submission()
else:
print("menulst: relaying message {}, {}".format(msgdesc, msgdat))
self.relayMsg(whofrom, msgdesc, msgdat)
def OLDgetFormData(self) -> dict:
"""Return a dict containing the current values of the form elements.
This method reaches into the nether regions of javascript and uses
a FormData element to retrieve the keys and values of any input elements
in the form.
See here for the specification of FormData:
https://developer.mozilla.org/en-US/docs/Web/API/FormData/Using_FormData_Objects
NOTE: this should work, and it did use to; However now it returns an empty
string. We work around this by extracting the data from the form ourselves.
"""
fd = __new__(FormData(self.form_el._el))
return dict([tt for tt in fd.entries()])
|
UTF-8
|
Python
| false
| false
| 4,471
|
py
| 134
|
datawidgets.py
| 69
| 0.628047
| 0.627153
| 0
| 94
| 46.521277
| 88
|
Evgen1177/alco
| 541,165,924,545
|
bbb0199e5ab1b12c277f5d6cd45b97031ce2db4e
|
d509c3ab42df4e78609f1ab575dd17c53f5862d0
|
/src/beer/urls.py
|
26b02ad364eae4bfb51b582dbf295f703f1c8b23
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/Evgen1177/alco
|
84588553b5e053580d5fe0a218e0e064aa0e4aca
|
04c2b41a8c570e505c44bd5b08895e9be9d10d8d
|
refs/heads/master
| 2022-04-30T14:01:46.884494
| 2019-12-03T10:26:54
| 2019-12-03T10:26:54
| 213,933,194
| 0
| 0
|
Apache-2.0
| false
| 2023-03-27T20:44:42
| 2019-10-09T13:57:57
| 2019-12-03T10:27:10
| 2022-04-22T22:38:48
| 5,033
| 0
| 0
| 4
|
Python
| false
| false
|
from django.urls import path
from . import views
urlpatterns = [
path("",views.beerView.as_view(), name="beer")
]
|
UTF-8
|
Python
| false
| false
| 127
|
py
| 30
|
urls.py
| 22
| 0.637795
| 0.637795
| 0
| 7
| 16.142857
| 50
|
robertdfrench/break
| 6,708,738,955,188
|
bc742b6dd729e3dfdc010a3d63af7de26ba26c4f
|
c3fca8e9e94c670522feadb9ca71b06a4a45a7f8
|
/tests/test_provides.py
|
394da5d6dd9f434d8b5bb97642eb6eb2b1f5bb74
|
[
"MIT"
] |
permissive
|
https://github.com/robertdfrench/break
|
d3c0e95a18867011f20de00eae675bfd04459f8e
|
726b187892817d8b6ea7413e46ceed306d330e21
|
refs/heads/master
| 2021-01-12T11:52:30.345164
| 2019-07-25T20:31:05
| 2019-07-25T20:31:05
| 69,597,626
| 0
| 0
| null | false
| 2016-10-12T20:45:26
| 2016-09-29T18:49:36
| 2016-09-29T19:41:30
| 2016-10-12T20:45:26
| 22
| 0
| 0
| 1
|
Python
| null | null |
import breakable
import unittest
class TestProvides(unittest.TestCase):
def test_run_if_not_exists(self):
@breakable.provides("nothing")
def dummy(self):
return 5
self.assertEqual(dummy(self), 5)
def test_dont_run_if_exists(self):
@breakable.provides("README.md")
def dummy(self):
return 5
self.assertEqual(dummy(self), None)
|
UTF-8
|
Python
| false
| false
| 409
|
py
| 22
|
test_provides.py
| 16
| 0.618582
| 0.611247
| 0
| 16
| 24.5625
| 43
|
hangdragon/DNN
| 5,437,428,609,734
|
b1eedf881e47a44f80bb2a7e8b85c0a1eace4d39
|
4749276f3075c477598eba4be32db32e23226f86
|
/pypy/speed.py
|
d9236465352be678f4b794ad02b7a71b5d33c285
|
[] |
no_license
|
https://github.com/hangdragon/DNN
|
1eca7268f72e412cca4624e3236736e3f22d7921
|
1bc80d067a74168d8eaf452e54eb2f1e97a62036
|
refs/heads/master
| 2022-07-31T18:04:47.551383
| 2020-05-25T13:34:45
| 2020-05-25T13:34:45
| 266,782,474
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding : utf-8 -*-
print("{}문의 {} : {}".format("for", "iterable" , "dictionary"))
dict_a = {"key1" : 12 , "key2" :34}
for i in dict_a:
print(i,end = ' ')
print("\n")
print("{}문의 {} : {}".format("for", "iterable" , "dictionary.items()"))
dict_a = {"key1" : 12 , "key2" :34}.items()
for i in dict_a:
print(i,end = ' ')
print("\n")
print("{}문의 {} : {}".format("for", "iterable" , "enumerate()"))
list_a = enumerate([i*i for i in range(5)])
for i in list_a :
print(i,end = ' ')
print("\n")
print("{}문의 {} : {}".format("for", "iterable" , "list(enumerate())"))
list_a = list(enumerate([i * i for i in range(5)]))
for i in list_a:
print(i,end=' ')
print("\n")
#for의 iterable로 enumerate()와 list(enumerate())는 완전 똑같고, for 의 반복자로 2개가 올 수 있다. 글고 첫번쨰는 인덱스, 두번쨰는 값!
print("{}문의 {} : {}".format("for", "iterable" , "str"))
str_a = "{}".format("웅애야웅애야 ")
for i in range(len(str_a)) :
print("{}번째 반복 : {}".format(i+1,str_a[i]), end = "\n")
print("\n")
#for문에서 iterable자리에는 container(list,tuple,dict,set,str) ,range() enumerate, items등이 있다.
|
UTF-8
|
Python
| false
| false
| 1,201
|
py
| 105
|
speed.py
| 91
| 0.549953
| 0.53408
| 0
| 49
| 20.877551
| 99
|
hsaransa/std2
| 3,539,053,100,771
|
d79db3d2bf4e8264c29337330a11688fb5d822ec
|
8dac8141dcd231de445b8676b6c82ac6b07a2b95
|
/scripts/xcb2.py
|
4fcd693acbe8ad9445a228f925d5f450211d30c9
|
[] |
no_license
|
https://github.com/hsaransa/std2
|
8e6a14962bd44ddedc134152ea21029d23829a77
|
f0787cecfe6935ea514925824dbf977e0b1e4c87
|
refs/heads/master
| 2021-01-17T12:32:03.329648
| 2010-05-06T16:50:29
| 2010-05-06T16:50:29
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import os, sys, fnmatch, time, re
import xml.parsers.expat
consts = []
classes = []
funcs = []
int_types = ['CARD8', 'INT16', 'CARD16', 'CARD32', 'BYTE', 'BOOL', 'INT32', 'INT8']
ignore_types = []
struct_types = []
def mangle(n):
if n.isupper():
return n.lower()
s = n[0].lower() + ''.join(['_' + i.lower() if i.isupper() or i.isdigit() else i for i in n[1:]])
s = re.sub(r'_(.)_', r'_\1', s)
return s
def add_class(a, b):
for c, d in classes:
if a == c:
break
else:
classes.append((a, b))
dada = {}
st = []
def start_element(name, attrs):
global current, struct_name, reply_name, request_name, request_fields, request_failed
global cookie_name, event_name, event_number
dada.setdefault(name, [])
dada[name].append(attrs)
if name == 'struct':
current = 'struct'
struct_name = mangle(attrs['name'])
if name == 'request':
current = 'request'
request_name = mangle(attrs['name'])
request_fields = []
request_failed = False
cookie_name = 'void'
if name == 'reply':
current = 'reply'
reply_name = request_name
cookie_name = reply_name
if name == 'event':
current = 'event'
event_name = mangle(attrs['name'])
event_number = attrs['number']
if name == 'error':
current = 'error'
def end_element(name):
global dada, classes, consts, funcs, struct, struct_name, xids, current, cdata
global request_fields, request_failed, cookie_name, event_name, event_number
attrs = dada[name].pop()
if name == 'struct':
struct_types.append(attrs['name'])
if struct_name == 'setup':
classes.append((struct_name, 'free_setup'))
else:
classes.append((struct_name, 'free'))
if name == 'xidtype':
int_types.append(attrs['name'])
#classes.append((attrs['name'].lower(), None))
if name == 'xidunion':
int_types.append(attrs['name'])
if name == 'union':
ignore_types.append(attrs['name'])
if name == 'typedef':
if attrs['oldname'] in int_types:
int_types.append(attrs['newname'])
if name == 'field' and current in ('struct', 'event'):
f = attrs['name']
t = attrs['type']
if t in ignore_types or t in struct_types:
return
if current == 'struct':
n = struct_name
else:
n = event_name + '_event'
ff = f
ff = '_class' if ff == 'class' else ff
ff = '_new' if ff == 'new' else ff
print 'static void %s_get_%s(void* ret, void* const* args) {' % (n, f)
if t in int_types:
print ' *(int*)ret = ((xcb_%s_t*)args[0])->%s;' % (n, ff)
else:
raise RuntimeError("bad struct field %s" % t)
print '}'
funcs.append(('%s_get_%s' % (n, f), 'i', n))
print 'static void %s_set_%s(void* ret, void* const* args) {' % (n, f)
print ' (void)ret;'
if t in int_types:
print ' ((xcb_%s_t*)args[0])->%s = *(int*)args[1];' % (n, ff)
else:
raise RuntimeError("bad struct field %s" % t)
print '}'
funcs.append(('%s_set_%s' % (n, f), '', n + " i"))
if name == 'field' and current == 'request':
f = attrs['name']
t = attrs['type']
if t in ignore_types or t in struct_types:
raise RuntimeError("bad field %s" % t)
request_fields.append(t)
if name == 'list' and current == 'request':
if attrs['type'] == 'char':
if request_name == 'set_font_path':
request_fields.append('INT32')
request_fields.append('cb')
else:
request_failed = True
#raise RuntimeError("bad field %s" % attrs['type'])
if name == 'valueparam' and current == 'request':
if attrs['value-mask-type'] == 'CARD32':
request_fields.append('INT32')
request_fields.append('valueparam32')
else:
request_failed = True
if name == 'list' and (current == 'struct' or current == 'reply'):
if current == 'struct':
nn = struct_name
nn2 = struct_name
else:
nn = reply_name + '_reply'
nn2 = reply_name
n = attrs['name']
t = attrs['type']
print 'static void %s_get_%s(void* ret, void* const* args) {' % (nn2, n)
print ' int i = *(int*)args[1];'
print ' xcb_%s_t* s = args[0];' % nn
print ' int size = xcb_%s_%s_length(s);' % (nn2, n)
print ' if (i < 0 || i >= size) {'
print ' *(void**)ret = 0;'
print ' return;'
print ' }'
if t in int_types:
print ' *(int*)ret = xcb_%s_%s(s)[i];' % (nn2, n)
ret = 'i'
elif t == 'char':
print ' *(char**)ret = strndup(xcb_%s_%s(s), size);' % (nn2, n)
ret = 'ms'
else:
if t == 'void':
print ' *(void**)ret = 0; // TODO: fix this'
else:
print ' *(void**)ret = copy_struct(&xcb_%s_%s_iterator(s).data[i]);' % (nn2, n)
ret = t.lower()
print '}'
funcs.append(('%s_get_%s' % (nn2, n), ret, '%s i' % nn))
print 'static void %s_%s_length(void* ret, void* const* args) {' % (nn2, n)
print ' xcb_%s_t* s = args[0];' % nn
print ' *(int*)ret = xcb_%s_%s_length(s);' % (nn2, n)
print '}'
funcs.append(('%s_%s_length' % (nn2, n), 'i', '%s' % nn))
if name == 'request' and not request_failed:
add_class(cookie_name + '_cookie', None)
print 'static void %s(void* ret, void* const* args) {' % request_name
print ' xcb_%s_cookie_t c =' % cookie_name
print ' xcb_%s(CONN(args[0])' % request_name,
args = 'connection '
for i, f in enumerate(request_fields):
print ','
print ' ',
if f in int_types:
print '*(int*)args[%d] /* %s */' % (i+1, f),
args += 'i '
elif f == 'valueparam32':
print '(uint32_t*)args[%d]' % (i+1),
args += 'valueparam32 '
elif f == 'cb':
print '((struct std2_buffer*)args[%d])->data' % (i+1),
args += 'cb '
else:
print '%s args[%d]' % (f, i+1),
print
print ' );'
print ' *(void**)ret = (void*)c.sequence;'
print '}'
funcs.append((request_name, cookie_name + '_cookie', args.strip()))
if name == 'event':
print 'static void to_%s_event(void* ret, void* const* args) {' % event_name
print ' xcb_generic_event_t* ev = args[0];'
print ' if ((ev->response_type & ~0x80) != XCB_%s) {' % event_name.upper()
print ' *(void**)ret = 0;'
print ' return;'
print ' }'
print ' *(void**)ret = copy_struct((xcb_%s_event_t*)ev);' % event_name
print '}'
add_class(event_name + '_event', 'free')
funcs.append(("to_" + event_name + '_event', event_name + '_event', "generic_event"))
def cdata_element(c):
global cdata
cdata = c
def output_macros():
print '#define XCB_CLASSES \\'
for a, b, in classes:
if b:
print 'STD2_CLASS("%s", %s) \\' % (a, b)
else:
print 'STD2_CLASS("%s", 0) \\' % a
print
print '#define XCB_FUNCS \\'
for a, b, c in funcs:
print 'STD2_FUNC("%s", "%s", "%s", %s) \\' % (a, b, c, a)
print
def process(fn):
x = xml.parsers.expat.ParserCreate()
x.StartElementHandler = start_element
x.EndElementHandler = end_element
x.CharacterDataHandler = cdata_element
x.ParseFile(open(fn))
if __name__ == '__main__':
process(sys.argv[1])
output_macros()
|
UTF-8
|
Python
| false
| false
| 7,981
|
py
| 44
|
xcb2.py
| 6
| 0.486029
| 0.478136
| 0
| 241
| 32.116183
| 101
|
rajisarma/CSC326SearchEngine
| 532,575,971,835
|
e4874337d1049e0a020b5fbbf7cf65fd574c63a4
|
e38db199f2d61ec14e44c3cc8577d2288482fcff
|
/Lab1.py
|
3b743e42569e9088d8d99536cbd917ceabfd9992
|
[] |
no_license
|
https://github.com/rajisarma/CSC326SearchEngine
|
4e7bf4e086b16d06871b41b02a7aa6b31e5f9406
|
a6ac2d63e8a027e32dcf1e862c029618f18f98fe
|
refs/heads/master
| 2021-07-24T17:35:44.796115
| 2017-11-05T18:40:55
| 2017-11-05T18:40:55
| 105,304,959
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from bottle import *#route, run, request
from collections import OrderedDict
import re
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import flow_from_clientsecrets
from googleapiclient.errors import HttpError
from googleapiclient.discovery import build
import httplib2
import json
from string import Template
#for localhost
#MAIN = "http://localhost:8080/redirect"
#for AWS instance
MAIN = "http://0.0.0.0:80/redirect"
#for localhost:
#HOME_LINK = "https://accounts.google.com/logout?continue=https://appengine.google.com/_ah/logout?continue=http://localhost:8080"
HOME_LINK = "https://accounts.google.com/logout?continue=https://appengine.google.com/_ah/logout?continue=http://ec2-54-156-234-124.compute-1.amazonaws.com"
#from json file, get values needed to start login process
with open("client_secrets.json") as json_file:
client_secrets = json.load(json_file)
CLIENT_ID = client_secrets["web"]["client_id"]
CLIENT_SECRET = client_secrets["web"]["client_secret"]
SCOPE = client_secrets["web"]["auth_uri"]
REDIRECT_URI = client_secrets["web"]["redirect_uris"][0]
GOOGLE_SCOPE = 'https://www.googleapis.com/auth/plus.me https://www.googleapis.com/auth/userinfo.email'
cache = {}
dict = {}
last10 = []
logged_in = 'Login'
user_email = ''
user_name = ''
pic_link = ''
@route('/', 'GET')
def main():
#CSS formatting for the query page
#sign in button
sign_in = '''
<div align = "right">
<form action = "/login" method = "get">
<input id = "signin" type = "submit" value = "Sign In">
</form>
</div>
'''
#sign out button (displays when user is logged in, along with user's google profile picture)
sign_out = '''
<div align = "right">
<h3>Welcome, $user_name</h3>
<img src = $pic_link alt = "profilepic" style = "width:100px;height:auto"/>
<br>
<form action = "/logout" method = "get">
<input id = "signin" type = "submit" value = "Sign Out">
</form>
</div>
'''
sign_out = Template(sign_out).safe_substitute(user_name = user_name, pic_link = pic_link)
#search bar and search button
f ='''
<br><br><br><br>
<style>
table {
border-collapse: collapse;
border: 1px solid black;
display: inline-block;
}
th, td {
padding: 10px;
}
#searchbar {
font-size: 30px;
width: 600px;
}
#button {
width: 85px;
font-size: 20px;
}
#signin {
width: 150px;
font-size: 20px;
}
</style>
<body bgcolor = "#F0B27A">
<center>
<img src = "https://images.gr-assets.com/photos/1507480997p8/3630322.jpg" alt = "logo.gif" style = "width:506px;height:197px;background-color:#F0B27A;"/>
<br> <br>
<form action = "/search" method = "get">
<input id = "searchbar" name = "keywords" type = "text">
<input id = "button" type = "submit" value = "Search">
<br>
</form>
</center>
'''
#if most searched table contains values and user is logged in, display table
if bool(dict) and logged_in == 'Logout':
i=0
history = ['<center><br><br><b>Search History:</b><br><br><table id = "history">']
history.append('<tr><td align="center"><b> Word </b></td>')
history.append('<td align="center"><b> Count </b></td></tr>')
for k in sorted(dict,key=dict.get,reverse=True): #display top 20 searches on query page
if i<20:
history.append('<tr><td align="center"> %s </td>' % k)
history.append('<td align="center"> %d </td></tr>' % dict[k])
i += 1
history.append('</table><center>')
#if recent search table contains values and user is logged in, display table by printing list in reverse to put the most recently searched word first
if len(last10)>0:
recent = ['''
<center>
<style>
table {
border-collapse: collapse;
border: 1px solid black;
display: inline-block;
}
th, td {
padding: 10px;
}
</style>
<br><b>Most Recent Searches:</b><br><br>
<body bgcolor = "#F0B27A">
<table id = "recent">
''']
recent.append('<tr><td align="center"><b> Word </b></td>')
count = 0
for q in reversed(last10):
if count<10:
recent.append('<tr><td align="center">%s</td>' %q)
count += 1
recent.append('</table></center>')
if logged_in == 'Logout':
return sign_out, f, '\n'.join(history), '\n'.join(recent)
else:
return sign_in, f
else:
if logged_in == 'Logout':
return sign_out, f
else:
return sign_in, f
@route('/login', 'GET')
def login():
global logged_in
if logged_in == "Login":
flow = flow_from_clientsecrets("client_secrets.json",scope = 'https://www.googleapis.com/auth/plus.me https://www.googleapis.com/auth/userinfo.email', redirect_uri = REDIRECT_URI)
uri = flow.step1_get_authorize_url()
redirect(str(uri))
else:
logout()
@route('/redirect')
def redirect_page():
global logged_in
global user_email, user_name, pic_link, link
global http
global dict, last10
code = request.query.get('code','')
flow = OAuth2WebServerFlow(client_id = CLIENT_ID, client_secret = CLIENT_SECRET, scope = GOOGLE_SCOPE, redirect_uri = REDIRECT_URI)
credentials = flow.step2_exchange(code)
token = credentials.id_token['sub']
http = httplib2.Http()
http = credentials.authorize(http)
users_service = build('oauth2', 'v2', http=http)
user_document = users_service.userinfo().get().execute()
user_email = user_document['email']
user_name = user_document['name']
pic_link = user_document['picture']
link = user_document['link']
#if user is not logging in for the first time during the session, get data of previous search history
if user_email in cache:
dict = cache[user_email][0]
last10 = cache[user_email][1]
logged_in = 'Logout'
return main()
@route('/logout', method='GET')
def logout():
global logged_in
global dict, user_email, last10
#before logging out, update search history values for user in cache to be displayed upon next login
cache[user_email] = [dict, last10]
logged_in = 'Login'
user_email = ''
dict = {}
last10 = []
redirect(HOME_LINK)
return main()
@route('/search', method='GET')
def search():
global logged_in
global last10
string = request.query['keywords']
string = re.sub(r'[^\w\s]','',string) #filter punctuation
l = string.lower().split() #split by whitespaces
cur = OrderedDict()
updateHistory(l,cur)
if logged_in == 'Logout':
updateHistory(l,dict)
updateLast10(l,last10)
back = '<br><br><center><form action = "/"> <input id = "button" type = "submit" value = "Back"> </form></center>'
#CSS formatting for results page
sign_in = '''
<div align = "right">
<form action = "/login" method = "get">
<input id = "signin" type = "submit" value = "Sign In">
</form>
</div>
'''
sign_out = '''
<div align = "right">
<h3>Welcome, $user_name</h3>
<img src = $pic_link alt = "profilepic" style = "width:100px;height:auto"/>
<br>
<form action = "/logout" method = "get">
<input id = "signin" type = "submit" value = "Sign Out">
</form>
</div>
'''
sign_out = Template(sign_out).safe_substitute(user_name = user_name, pic_link = pic_link)
#if search term contains more than one word, display results table
if len(l)>1:
results = ['''
<center>
<style>
table {
border-collapse: collapse;
border: 1px solid black;
}
th, td {
padding: 10px;
}
#button {
width: 70px;
font-size: 20px;
}
#signin {
width: 150px;
font-size: 20px;
}
</style>
<br><b>Results:</b><br><br>
<body bgcolor = "#F0B27A">
<table id = "results">
''']
out = '<center><br><br> Number of words in search phrase: '+str(len(l))+'</center>'
results.append('<tr><td><b> Word </b></td>')
results.append('<td><b> Count </b></td></tr>')
for k in cur: #display results table
results.append('<tr><td align="center"> %s </td>' % k)
results.append('<td align="center"> %d </td></tr>' % cur[k])
results.append('</table></center>')
string = '''
<center>
<br><br><br>
<br><br><br>
<br><br><br>
Search query: ''' +string+ '''</center>'''
if logged_in == "Logout":
return sign_out, string, out, '\n'.join(results), back
else:
return sign_in, string, out, '\n'.join(results), back
else:
string = '''
<style>
#signin {
width: 150px;
font-size: 20px;
}
</style>
<center>
<br><br><br>
<br><br><br>
<br><br><br>
<style>
#button {
width: 70px;
font-size: 20px;
}
</style>
<body bgcolor = "#F0B27A"> Search query: ''' +string + '''</body></center>'''
if logged_in == "Logout":
return sign_out, string, back
else:
return sign_in, string, back
def updateHistory(l,dict):
#inputs: a list to process and a dictionary
#checks if each string in the list exists in the dictionary
#if yes: increments its count, if no: adds it to the dictionary
for i in l:
if not dict.get(i):
dict[i] = 1
else:
dict[i] += 1
def updateLast10(l, last10):
#inputs: list l containing search terms, list last10 that will hold most recent search terms: the last term in the list being the most recent
for i in l:
if i in last10:
last10.remove(i)
last10.append(i)
else:
print l
print last10
last10.append(i)
run(host='0.0.0.0',port=80, debug=True)
#run localhost
#run(host='localhost', port=8080, debug=True)
|
UTF-8
|
Python
| false
| false
| 9,952
|
py
| 10
|
Lab1.py
| 6
| 0.590635
| 0.571342
| 0
| 328
| 29.341463
| 181
|
LukaszMalucha/Linkedin-Analytics
| 13,511,967,123,126
|
baa670b83e3c2e88eecd89eb1d64a701eb9f0081
|
f8ba5d8ce3c2439afb6d14caeeebe20300b16a02
|
/app/core/tests/test_permissions.py
|
b258513db8bea3b30f67130eb58f3c1cfd83f44b
|
[
"MIT"
] |
permissive
|
https://github.com/LukaszMalucha/Linkedin-Analytics
|
37644d8e047e82f082f7e23427fdcbc3562e7074
|
8ee8cba29f313a1cc38772e2928d3c52614819af
|
refs/heads/master
| 2023-03-10T18:49:28.372749
| 2022-09-03T11:55:17
| 2022-09-03T11:55:17
| 142,476,957
| 13
| 6
|
MIT
| false
| 2023-03-05T14:54:40
| 2018-07-26T18:08:15
| 2022-12-01T13:53:03
| 2023-03-05T14:54:40
| 3,846
| 10
| 5
| 12
|
Python
| false
| false
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.permissions import IsAdminOrReadOnly
COMPANIES_URL = reverse("api:companies-list")
class TestIsAdminOrReadOnly(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
email="test@gmail.com",
password="test1234",
name="Test User"
)
self.user_superuser = get_user_model().objects.create_superuser(
email="superuser@gmail.com",
password="test1234",
)
self.permission = IsAdminOrReadOnly()
def test_superuser_has_no_admin_or_read_only_permission(self):
admin_permission = self.user_superuser.has_perm(IsAdminOrReadOnly)
self.assertTrue(admin_permission)
def test_user_has_no_admin_or_read_only_permission(self):
admin_permission = self.user.has_perm(IsAdminOrReadOnly)
self.assertFalse(admin_permission)
def test_user_cant_access_unsafe_methods(self):
payload = {}
self.client = APIClient()
self.client.force_authenticate(user=self.user)
response = self.client.post(COMPANIES_URL, payload)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_can_access_safe_methods(self):
payload = {}
self.client = APIClient()
self.client.force_authenticate(user=self.user)
response = self.client.get(COMPANIES_URL, payload)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_superuser_can_access_unsafe_methods(self):
payload = {}
self.client = APIClient()
self.client.force_authenticate(user=self.user_superuser)
response = self.client.post(COMPANIES_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
|
UTF-8
|
Python
| false
| false
| 1,964
|
py
| 42
|
test_permissions.py
| 23
| 0.681263
| 0.672607
| 0
| 54
| 35.37037
| 74
|
Herna7liela/New
| 2,997,887,195,998
|
da821ba825b696929f33f2f6fe4c483cd3466e85
|
b72ba14c048ae402f424da508938fe3ca2743897
|
/Python_project/Project_trying.py
|
d677f0500a6d77a6de10c313f4ae4598d08494b6
|
[] |
no_license
|
https://github.com/Herna7liela/New
|
d517205b9f80d045278ab5c1026b2dcff76ab276
|
63bccd23d4118ceffb62b3b03981773ee75a6802
|
refs/heads/master
| 2021-01-16T21:16:19.777030
| 2015-05-01T10:50:00
| 2015-05-01T10:50:00
| 32,325,269
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# STEP 1:
# get the file in the correct format .gb to be able to work with it
# have to start by opening a a reader. The file to open is the argument to the python program
# python3 Project_final_code.py file.gb (use the sys.argv function after importing sys)
# STEP 2:
# get the .gb file content into exp text = "...."
gbfile = open('sequence.gb', 'r')
read_gbfile = gbfile.readlines()
#print (read_gbfile)
# set up a structure to be able to answer questions
def normalize_space(read_gbfile):
#Return s stripped of leading/trailing whitespace and with internal runs of whitespace replaced by a single SPACE
return ' '.join(read_gbfile.split())
new_gb = [normalize_space(i) for i in read_gbfile]
#print (new_gb)
basic = new_gb[0:11]
print (basic)
dict_exam={}
intro = basic[0].split(" ")
if "LOCUS" == intro[0]:
dict_exam[intro[0]] = intro[1:]
intro = basic[1].split(" ")
if "DEFINITION" == intro[0]:
dict_exam[intro[0]] = basic[1][11:]
intro = basic[2].split(" ")
if "ACCESSION" == intro[0]:
dict_exam[intro[0]] = basic[2][11:]
print (dict_exam)
#count = 0
#new_pos = 1
#for content in intro:
#dict_exam = {}
#if intro == basic[count].split(" "):
#if key == intro[count]:
#dict_exam[intro[count]] = basic
#basic_info = []
#for content in intro:
## print (content)
#dic = {}
#dic["Locus"] = content[1]
#dic["Length"] = content[2:4]
#dic["Type_nucl"] = content[4:7]
##dic["Submit_date"] = content[]
#basic_info += [dic]
#print (basic_info)
#for content in new_gb:
# print (content)
#definition = read_gbfile[1:3]
#print(definition)
#accession = read_gbfile[3]
#print ([accession])
#GI = read_gbfile[4]
#print (GI)
## try to possible get the script of how to parse files with biopython!!!!
## look at bioparsers and biogenbank
# set up functions so long for R, S, M, T, F, E, Q
# can then add the contents of the functions to the set up functions later on
# STEP 3:
#
# create a regular expression
# use the pipe
# import module from builtin re and then search for expression
# use re.compile to create the expression that will search for the expression
|
UTF-8
|
Python
| false
| false
| 2,161
|
py
| 85
|
Project_trying.py
| 83
| 0.655252
| 0.639519
| 0
| 83
| 25.048193
| 117
|
minhqlrt/python
| 5,360,119,202,403
|
0d80c6b0b885b8e6094b961f7cae19b6845d5a91
|
ba8ccfeded36d9565bbb9dd266ed5b23767b75ac
|
/auto pull push script.py
|
610e847fd3f09769990f1d668496b6bf90589f2a
|
[] |
no_license
|
https://github.com/minhqlrt/python
|
2fb225b1c535903454b60ae2b6f48bd59a29bd3e
|
c6b70edebac8043d796f1ce5ccf09e3412b7381c
|
refs/heads/master
| 2020-05-21T20:23:42.412959
| 2018-07-27T23:43:43
| 2018-07-27T23:43:43
| 62,386,629
| 0
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import subprocess
def myprocess(data):
proc = subprocess.Popen(data,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
procOutput, procError = proc.communicate()
print("")
print(procOutput)
print(procError)
print("")
return proc.returncode
defaultPath = 'G:\GitHub'
fileOrFiles = input('all or one: ')
if fileOrFiles == 'one':
myFiles = [(input('Input the name of the folder: '))]
print(myFiles)
elif fileOrFiles == 'all':
myFiles = ['FE','git','Japanese','java','Lap trinh shell','linux','lpic1','lpic2','network','python','raspberry-pi','selenium','software','testing','training']
while True:
condition = input('pull or add or commit or push or exit: ')
if condition == 'pull':
for fileName in myFiles:
currentDir = os.chdir(os.path.join(defaultPath,fileName))
pullCmd = r"""cmd /K git pull origin master"""
myprocess(pullCmd)
continue
elif condition == 'push':
for fileName in myFiles:
currentDir = os.chdir(os.path.join(defaultPath,fileName))
addCmd = r"""cmd /K git add *"""
if myprocess(addCmd) == 0:
commitCmd = r"""cmd /K git commit -m 'abc'"""
if myprocess(commitCmd) == 0:
pushCmd = r"""cmd /K git push origin master"""
myprocess(pushCmd)
continue
elif condition == 'exit':
break
|
UTF-8
|
Python
| false
| false
| 1,450
|
py
| 104
|
auto pull push script.py
| 18
| 0.597241
| 0.594483
| 0
| 40
| 35.25
| 163
|
JavonDavis/Competive-Programming-Python
| 15,453,292,372,341
|
e0d7f32e93d9ad5d3bd1bb177d25c5fd049ebaff
|
ae13f3efbe664b81417786e992919040033e23b1
|
/toolbox/loop_subsets.py
|
7103244a1edf924806a4212228919a29c20e94d5
|
[] |
no_license
|
https://github.com/JavonDavis/Competive-Programming-Python
|
266044c2c8d5e6c10ad9ece8955d86aa385ff6c8
|
09ff389067397016858345f5ce0962f816bf4d4b
|
refs/heads/master
| 2021-07-02T09:17:48.259626
| 2017-09-22T07:11:09
| 2017-09-22T07:11:09
| 87,370,735
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/bin/python
import sys
# Nice algo to loop subsets
def solve(n, a):
bit_array = [0 for _ in xrange(n)]
bit_array[n-1], bit_array[n-2], bit_array[n-3] = 1, 1, 1
subset_rep = snoob(7) # Decides length of subset
while len(subset_rep) <= n:
subset_rep = snoob(int(subset_rep, 2))
print "done"
def snoob(x):
smallest = x & -x
ripple = x + smallest
ones = x ^ ripple
ones = (ones >> 2)/smallest
return "{0:b}".format(ripple | ones)
def nexthi_same_count_ones(a):
c = (a & -a);
r = a+c;
return ((((r ^ a) >> 2) / c) | r)
solve(pow(10, 5)[1,2])
|
UTF-8
|
Python
| false
| false
| 600
|
py
| 119
|
loop_subsets.py
| 105
| 0.553333
| 0.525
| 0
| 28
| 20.428571
| 60
|
lucidvoci/ResearchProjectPortal
| 3,590,592,675,593
|
970d6ada7f34e34bf0e27cdb1bc4319dc43ca87f
|
24532792200e38657d863152fe9368c9cdc5dd5f
|
/src/portal/portal.py
|
c2c0de06e890cb3412f0604ab82649fef58c16c1
|
[] |
no_license
|
https://github.com/lucidvoci/ResearchProjectPortal
|
134870a87597ec6127225b8b8178b17399dc4065
|
860e15176df96864a9e63fad4918fa6aa38b7d17
|
refs/heads/master
| 2021-01-22T23:07:14.644847
| 2017-03-20T18:39:43
| 2017-03-20T18:48:43
| 85,612,355
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#------------------- Autor: Lucie Dvorakova ----------------------#
#------------------- Login: xdvora1f ----------------------#
#----------------- Automaticky aktualizovaný webový portál -------------------#
#------------------- o evropských výzkumných projektech ----------------------#
import re
import sys
import math
from flask import Flask
from flask import request
from flask import render_template
from elasticsearch import Elasticsearch
from datetime import datetime
from cStringIO import StringIO
from elasticutils import get_es, S, MLT
from query import Query
HOST = "localhost"
PORT = 9200
IDXPROJ = "xdvora1f_projects"
IDXDELIV = "xdvora1f_deliverables"
DOCTYPE = "data"
URL = "http://%s:%d/" % (HOST, PORT)
ITEMS_PER_PAGE = 20
last_url = ''
es = Elasticsearch(host=HOST, port=PORT)
deliv_s = S().es(urls=[URL]).indexes(IDXDELIV).doctypes(DOCTYPE)
project_s = S().es(urls=[URL]).indexes(IDXPROJ).doctypes(DOCTYPE)
app = Flask(__name__)
@app.route("/")
def index():
code = render_template("index.html")
return code
@app.route("/find", methods=["GET"])
def find():
global last_url
last_url = request.url
# search in project or deliverables (projects are default)
search = request.args.get("search", "projects")
# get current page, or default to zero
try:
page = int(request.args.get("page", "0"))
if page < 0:
page = 0
except:
page = 0
# getting search query with keywords
keyword = request.args.get("keyword", "")
q = Query(keyword)
#print q.keywords
#print q.specifications
# getting facets from url and query
search_dic = {}
valid_specs = ["country", "programme", "subprogramme", "coordinator",
"participant", "year"]
# specification from search box is prioritized
for spec in valid_specs:
val = q.getSpecification(spec)
if val:
search_dic[spec] = val.strip().lower()
continue
val = request.args.get(spec)
if val:
search_dic[spec] = val.replace("+", " ").strip().lower()
# build actual query for ElasticSearch
offset = page*ITEMS_PER_PAGE
keywords = " ".join(q.getKeywords())
keyword_s = get_project_with_keywords(keywords, search, \
offset, offset+ITEMS_PER_PAGE)
if len(search_dic) > 0:
keyword_s = keyword_s.filter(**search_dic)
# getting facets
facet = facets(keyword_s)
# getting instant show
instan_s = ''
if keyword_s:
if keyword_s[0].es_meta.score > 3:
instan_s = keyword_s
else:
filter_args = {"abbr": keyword}
instan_s = keyword_s.filter(**filter_args)
#if keyword deleted, no results are found
if not keywords:
keyword_s = ''
instan_s = ''
# if not enought project fill with deliv + create facet of projects
deli_s = ''
deli_facet = []
if keyword_s:
if keyword_s.count() < ITEMS_PER_PAGE:
if search == 'projects':
deli_s = get_project_with_keywords(keywords, 'deliverables', \
offset, offset+ITEMS_PER_PAGE)
deli_facet = deliverable_facets(deli_s)
deli_s = deli_s[0:ITEMS_PER_PAGE - keyword_s.count()]
else:
if search == 'projects':
deli_s = get_project_with_keywords(keywords, 'deliverables', \
offset, offset+ITEMS_PER_PAGE)
deli_facet = deliverable_facets(deli_s)
deli_s = deli_s[0:ITEMS_PER_PAGE]
safe_keywords = re.sub(r'([:\\"])', r'\\\1', keywords)
safe_search_dic = {}
for key in search_dic:
safe_key = re.sub(r'([:\\"])', r'\\\1', key)
safe_val = re.sub(r'([:\\"])', r'\\\1', search_dic[key])
safe_search_dic[safe_key] = safe_val
code = render_template('find.html', keyword=safe_keywords, s=keyword_s, \
f=facet, d=safe_search_dic, search=search, insta=instan_s, page=page, \
deli=deli_s, deli_facet = deli_facet)
return code
@app.route('/project/<projectid>')
def project_detail(projectid):
global project_s
global last_url
#getting similar projects
mlt_s = MLT(projectid, index=IDXPROJ, doctype=DOCTYPE, search_size=3)
filter_args = {"id": projectid}
data_s = project_s.filter(**filter_args)
deli_s = deliv_s.filter(**filter_args)
code = render_template('project.html', s = data_s[0], d = deli_s[0:50], url = last_url, similar=mlt_s)
return code
@app.route('/user/')
@app.route('/user/<name>')
def user(name=None):
return render_template('user.html', name=name)
## --------------------------------------- ##
## -------------- FUNKCE ----------------- ##
## --------------------------------------- ##
# Vrati filtr vsech projektu kde se nachazeji klicova slova
def get_project_with_keywords(keyword, search, from_, to):
if search == 'projects':
keyword_s = project_s.query_raw({
"multi_match" : {
"query" : keyword,
"fields" : [ "abbr^6","title^5", "subprogramme^3", "objective", "origWeb"],
# "type" : "phrase"
}
})
keyword_s = keyword_s[from_:to]
keyword_s = keyword_s.highlight('objective', pre_tags = ["<b>"], post_tags = ["</b>"])
else:
keyword_s = deliv_s.query_raw({
"multi_match" : {
"query" : keyword,
"fields" : [ "deliv_title^3", "deliv_article"],
# "type" : "phrase"
}
})
keyword_s = keyword_s[from_:to]
keyword_s = keyword_s.highlight('deliv_article', pre_tags = ["<b>"], post_tags = ["</b>"])
return keyword_s
# Generuje leve menu s facety na zaklade filtru facet_s
def facets(keyword_s):
listfacet = [['programme', []], ['subprogramme', []],['year', []], ['coordinator', []], ['participant', []], ['country',[]]]
for facet in listfacet:
facet_s = keyword_s.facet(facet[0], filtered=True, size=20).facet_counts()
for value in facet_s[facet[0]]['terms']:
facet[1].append([value['term'], str(value['count'])])
return listfacet
# finding projects with deliverables
def deliverable_facets(deli_s):
deli_facet = []
deli_facet_s = deli_s.facet("id", filtered=True, size=100).facet_counts()
for item in deli_facet_s['id']['terms']:
filter_args = {"id": item['term']}
tmp_s = deli_s.filter(**filter_args)
deli_facet.append([item['term'],tmp_s[0]['abbr']])
return deli_facet
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
|
UTF-8
|
Python
| false
| false
| 6,798
|
py
| 5
|
portal.py
| 2
| 0.553592
| 0.547114
| 0
| 209
| 31.492823
| 128
|
lehvitus/arvestust
| 3,607,772,576,679
|
943037341acd115c2295ca1082c6e3d95d45fe5c
|
14152916a7833fe946ed2e1a75a3fda59aab0c8f
|
/arvestust/serializers/image.py
|
3cffc0ef5a1b26b97e63661bc9c3fed6d59ca76a
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/lehvitus/arvestust
|
313a3c17f4f5ca52c438a329fbf3f401384d492e
|
2d508317b744eaf12a643a398ff95723893a046a
|
refs/heads/master
| 2022-11-29T02:34:08.479358
| 2020-08-13T01:22:53
| 2020-08-13T01:22:53
| 260,359,666
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from rest_framework import serializers
from ..models import Image
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = Image
exclude = (
'uuid',
'object_id',
'content_type',
)
|
UTF-8
|
Python
| false
| false
| 262
|
py
| 111
|
image.py
| 102
| 0.572519
| 0.572519
| 0
| 13
| 19.153846
| 51
|
guirmoreira/cafeteira
| 12,043,088,319,052
|
7b8f893b4449c7597da57946c15b4d3af30affd3
|
92aa4420077a0d60679860bcf9b7befa36885be6
|
/profiles/migrations/0002_auto_20170327_0141.py
|
9b5bd9d0d197a2ebd37ee395d8dd072741a99ca0
|
[] |
no_license
|
https://github.com/guirmoreira/cafeteira
|
5c41ce469ff9b87da9fcceb9c2dd3a9bb7ffad6e
|
670d9b2f1c13efcac416e4601643e464a9673d0e
|
refs/heads/master
| 2020-12-03T08:07:22.663525
| 2017-06-28T10:45:58
| 2017-06-28T10:45:58
| 95,658,637
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-27 01:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='user',
),
migrations.AddField(
model_name='profile',
name='email',
field=models.EmailField(default='no_email', max_length=255),
),
]
|
UTF-8
|
Python
| false
| false
| 563
|
py
| 13
|
0002_auto_20170327_0141.py
| 8
| 0.566607
| 0.523979
| 0
| 24
| 22.458333
| 72
|
jrhartog/REDPyAlpha
| 7,447,473,320,240
|
0e627b15d0a43eb537c2bc9893adcae8d35f7d17
|
2e124a41b56c03b64b9f624eeb9514228d6ba03c
|
/redpy/table.py
|
e857a7e7ae95e6ae3bb4b9edab6cd71dad79f1fb
|
[] |
no_license
|
https://github.com/jrhartog/REDPyAlpha
|
4027beaa3b6cab8a9ecaa8ccbb92b3accdbfba64
|
e51444436bebfa383267019b61d670dc04dfe277
|
refs/heads/master
| 2021-01-14T08:29:53.787744
| 2015-12-02T19:09:17
| 2015-12-02T19:09:17
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from tables import *
from obspy.core.trace import Trace
from obspy import UTCDateTime
import datetime
import numpy as np
import redpy.correlation
import datetime
import matplotlib
def Repeaters(opt):
"""
Defines the columns in the 'Repeater Catalog' table based on the Options in opt
id: unique ID number for the event (integer)
startTime: UTC time of start of the waveform (string)
startTimeMPL: matplotlib number associated with time (float)
waveform: Waveform data (ndarray)
windowStart: "trigger" time, in samples from start (integer)
windowCoeff: amplitude scaling for cross-correlation (float)
windowFFT: Fourier transform of window (complex ndarray)
windowAmp: amplitude in first half of window (float)
order: Order in the cluster ordering (integer)
reachability: Reachability in the cluster ordering (float)
coreDistance: Core distance in the cluster ordering (float)
isCore: 1 if core, else 0 (integer)
alignedTo: ID of event this one is aligned to (integer)
Returns a dictionary defining the table
"""
dict = {
"id" : Int32Col(shape=(), pos=0),
"startTime" : StringCol(itemsize=32, pos=1),
"startTimeMPL" : Float64Col(shape=(), pos=2),
"waveform" : Float64Col(shape=(opt.wshape,), pos=3),
"windowStart" : Int32Col(shape=(), pos=4),
"windowCoeff" : Float64Col(shape=(), pos=5),
"windowFFT" : ComplexCol(shape=(opt.winlen,), itemsize=16, pos=6),
"windowAmp" : Float64Col(shape=(), pos=7),
"order" : Int32Col(shape=(), pos=8),
"reachability" : Float64Col(shape=(), pos=9),
"coreDistance" : Float64Col(shape=(), pos=10),
"clusterNumber" : Int32Col(shape=(), pos=11),
"isCore" : Int32Col(shape=(), pos=12),
"alignedTo" : Int32Col(shape=(), pos=13)
}
return dict
def Orphans(opt):
"""
Defines the columns in the 'Orphans' table based on the Options in opt
id: unique ID number for the event (integer)
startTime: UTC time of start of the waveform (string)
startTimeMPL: matplotlib number associated with time (float)
waveform: Waveform data (ndarray)
windowStart: "trigger" time, in samples from start (integer)
windowCoeff: amplitude scaling for cross-correlation (float)
windowFFT: Fourier transform of window (complex ndarray)
windowAmp: amplitude in first half of window (float)
expires: UTC time of when orphan should no longer be considered (string)
Returns a dictionary defining the table
"""
dict = {
"id" : Int32Col(shape=(), pos=0),
"startTime" : StringCol(itemsize=32, pos=1),
"startTimeMPL": Float64Col(shape=(), pos=2),
"waveform" : Float64Col(shape=(opt.wshape,), pos=3),
"windowStart" : Int32Col(shape=(), pos=4),
"windowCoeff" : Float64Col(shape=(), pos=5),
"windowFFT" : ComplexCol(shape=(opt.winlen,), itemsize=16, pos=6),
"windowAmp" : Float64Col(shape=(), pos=7),
"expires" : StringCol(itemsize=32, pos=8)
}
return dict
def Deleted(opt):
"""
Defines the columns in the 'Deleted' table based on the Options in opt
id: unique ID number for the event (integer)
startTime: UTC time of start of the waveform (string)
startTimeMPL: matplotlib number associated with time (float)
waveform: Waveform data (ndarray)
windowStart: "trigger" time, in samples from start (integer)
windowCoeff: amplitude scaling for cross-correlation (float)
windowFFT: Fourier transform of window (complex ndarray)
windowAmp: amplitude in first half of window (float)
Returns a dictionary defining the table
"""
dict = {
"id" : Int32Col(shape=(), pos=0),
"startTime" : StringCol(itemsize=32, pos=1),
"startTimeMPL": Float64Col(shape=(), pos=2),
"waveform" : Float64Col(shape=(opt.wshape,), pos=3),
"windowStart" : Int32Col(shape=(), pos=4),
"windowCoeff" : Float64Col(shape=(), pos=5),
"windowFFT" : ComplexCol(shape=(opt.winlen,), itemsize=16, pos=6),
"windowAmp" : Float64Col(shape=(), pos=7)
}
return dict
def Junk(opt):
"""
Defines the columns in the 'Junk' table, a holding tank for testing suspect events
startTime: UTC time of start of the waveform (string)
waveform: Waveform data (ndarray)
windowStart: "trigger" time, in samples from start (integer)
isjunk: Logic holder (integer)
Returns a dictionary defining the table
"""
dict = {
"startTime" : StringCol(itemsize=32, pos=1),
"waveform" : Float64Col(shape=(opt.wshape,), pos=2),
"windowStart" : Int32Col(shape=(), pos=3),
"isjunk" : Int32Col(shape=(), pos=0)
}
return dict
def Correlation(opt):
"""
Defines the columns in the 'Correlation' table
id1: unique ID number for the first event (integer)
id2: unique ID number for the second event (integer)
ccc: cross-correlation coefficient between those two events (float)
Returns a dictionary defining the table
"""
dict = {
"id1" : Int32Col(shape=(), pos=0),
"id2" : Int32Col(shape=(), pos=1),
"ccc" : Float64Col(shape=(), pos=2)
}
return dict
def initializeTable(opt):
"""
Initializes the hdf5 file with 'Repeater Catalog', 'Orphans', 'Junk', and 'Correlation
Matrix' tables in a group related to the station where the data come from. This is
defined via the redpy.config.Options class.
opt: Options object describing the station/run parameters
Saves table to file and closes it.
Will likely need extensive editing when more tables get added...
"""
h5file = open_file(opt.filename, mode="w", title=opt.title)
group = h5file.create_group("/", opt.groupName, opt.groupDesc)
rtable = h5file.create_table(group, "repeaters", Repeaters(opt),
"Repeater Catalog")
rtable.attrs.scnl = [opt.station, opt.channel, opt.network, opt.location]
rtable.attrs.samprate = opt.samprate
rtable.attrs.windowLength = opt.winlen
rtable.attrs.ptrig = opt.ptrig
rtable.attrs.atrig = opt.atrig
rtable.attrs.fmin = opt.fmin
rtable.attrs.fmax = opt.fmax
rtable.attrs.previd = 0
rtable.attrs.ptime = 0
rtable.flush()
otable = h5file.create_table(group, "orphans", Orphans(opt),
"Orphan Catalog")
otable.flush()
jtable = h5file.create_table(group, "junk", Junk(opt), "Junk Catalog")
jtable.flush()
dtable = h5file.create_table(group, "deleted", Deleted(opt),
"Manually Deleted Events")
dtable.flush()
ctable = h5file.create_table(group, "correlation", Correlation(opt),
"Correlation Matrix")
ctable.flush()
h5file.close()
def openTable(opt):
"""
Convenience function to open the catalog and access the tables in it.
opt: Options object describint station/run parameters
Returns handles to h5file, rtable, otable, ctable, and jtable
"""
h5file = open_file(opt.filename, "a")
rtable = eval('h5file.root.'+ opt.groupName + '.repeaters')
otable = eval('h5file.root.'+ opt.groupName + '.orphans')
ctable = eval('h5file.root.'+ opt.groupName + '.correlation')
jtable = eval('h5file.root.'+ opt.groupName + '.junk')
dtable = eval('h5file.root.'+ opt.groupName + '.deleted')
return h5file, rtable, otable, ctable, jtable, dtable
def populateRepeater(rtable, id, trig, opt, alignedTo, windowStart=-1):
"""
Initially populates a new row in the 'Repeater Catalog' table.
rtable: object pointing to the repeater table to populate
(e.g., h5file.root.groupName.repeaters)
id: integer id number given to this trigger, should be unique
trig: ObsPy trace from triggering function
opt: Options object describing station/run parameters
alignedTo: id number of repeater this one is aligned to (can be itself)
windowStart: triggering time (defaults to opt.ptrig seconds)
Appends this row to Repeaters table, but does not update the clustering parameters
(sets them to 0)
"""
trigger = rtable.row
if windowStart == -1:
windowStart = int(opt.ptrig*opt.samprate)
trigger['id'] = id
trigger['startTime'] = trig.stats.starttime.isoformat()
try:
trigger['startTimeMPL'] = matplotlib.dates.date2num(datetime.datetime.strptime(
trig.stats.starttime.isoformat(), '%Y-%m-%dT%H:%M:%S.%f'))
except ValueError:
trigger['startTimeMPL'] = matplotlib.dates.date2num(datetime.datetime.strptime(
trig.stats.starttime.isoformat(), '%Y-%m-%dT%H:%M:%S'))
trigger['waveform'] = trig.data
trigger['windowStart'] = windowStart
trigger['windowCoeff'], trigger['windowFFT'] = redpy.correlation.calcWindow(
trig.data, windowStart, opt)
trigger['windowAmp'] = max(abs(trig.data[windowStart:int(windowStart+opt.winlen/2)]))
trigger['order'] = -1
trigger['reachability'] = -1.0
trigger['coreDistance'] = -1.0
trigger['clusterNumber'] = -1
trigger['isCore'] = 0 # Set to zero to avoid being counted erroneously as a core
trigger['alignedTo'] = alignedTo
trigger.append()
rtable.flush()
def populateOrphan(otable, id, trig, opt):
"""
Initially populates a new row in the 'Orphans' table.
otable: object pointing to the table to populate
(e.g., h5file.root.groupName.orphans)
id: integer id number given to this trigger, should be unique
trig: ObsPy trace from triggering function
opt: Options object describing station/run parameters
Appends this row to Orphans table, adding an expiration date
"""
trigger = otable.row
windowStart = int(opt.ptrig*opt.samprate)
trigger['id'] = id
trigger['startTime'] = trig.stats.starttime.isoformat()
try:
trigger['startTimeMPL'] = matplotlib.dates.date2num(datetime.datetime.strptime(
trig.stats.starttime.isoformat(), '%Y-%m-%dT%H:%M:%S.%f'))
except ValueError:
trigger['startTimeMPL'] = matplotlib.dates.date2num(datetime.datetime.strptime(
trig.stats.starttime.isoformat(), '%Y-%m-%dT%H:%M:%S'))
trigger['waveform'] = trig.data
trigger['windowStart'] = windowStart
trigger['windowCoeff'], trigger['windowFFT'] = redpy.correlation.calcWindow(
trig.data, windowStart, opt)
trigger['windowAmp'] = max(abs(trig.data[windowStart:int(windowStart+opt.winlen/2)]))
adddays = ((opt.maxorph-opt.minorph)/7.)*(trig.stats.maxratio-opt.trigon)+opt.minorph
trigger['expires'] = (trig.stats.starttime+adddays*86400).isoformat()
trigger.append()
otable.flush()
def populateJunk(jtable, trig, isjunk, opt):
"""
Initially populates a new row in the 'Junk' table.
jtable: object pointing to the table to populate
(e.g., h5file.root.groupName.junk)
trig: ObsPy trace from triggering function
isjunk: Integer flag, 0=junk, 1=expired orphan
opt: Options object describing station/run parameters
"""
trigger = jtable.row
windowStart = int(opt.ptrig*opt.samprate)
trigger['startTime'] = trig.stats.starttime.isoformat()
trigger['waveform'] = trig.data
trigger['windowStart'] = windowStart
trigger['isjunk'] = isjunk
trigger.append()
jtable.flush()
def moveOrphan(rtable, otable, oindex, alignedTo, opt):
"""
Moves a row from the 'Orphans' table to the 'Repeater Catalog' table.
"""
trigger = rtable.row
orow = otable[oindex]
trigger['id'] = orow['id']
trigger['startTime'] = orow['startTime']
trigger['startTimeMPL'] = orow['startTimeMPL']
trigger['waveform'] = orow['waveform']
trigger['windowStart'] = orow['windowStart']
trigger['windowCoeff'] = orow['windowCoeff']
trigger['windowFFT'] = orow['windowFFT']
trigger['windowAmp'] = orow['windowAmp']
trigger['order'] = -1
trigger['reachability'] = -1.0
trigger['coreDistance'] = -1.0
trigger['clusterNumber'] = -1
trigger['isCore'] = 0 # Set to zero to avoid being counted erroneously as a core
trigger['alignedTo'] = alignedTo
trigger.append()
otable.remove_row(oindex)
otable.flush()
rtable.flush()
def removeFamily(rtable, dtable, cnum, opt):
"""
Moves the core of a family into the dtable, deletes the rest of the members.
"""
trigger = dtable.row
members = rtable.get_where_list('(clusterNumber=={})'.format(cnum))
idx = rtable.get_where_list('(clusterNumber=={}) & (isCore==1)'.format(cnum))
core = rtable[idx]
trigger['id'] = core['id']
trigger['startTime'] = core['startTime'][0]
trigger['startTimeMPL'] = core['startTimeMPL']
trigger['waveform'] = core['waveform']
trigger['windowStart'] = core['windowStart']
trigger['windowCoeff'] = core['windowCoeff']
trigger['windowFFT'] = core['windowFFT']
trigger['windowAmp'] = core['windowAmp']
trigger.append()
for m in members[::-1]:
rtable.remove_row(m)
rtable.flush()
dtable.flush()
def clearExpiredOrphans(otable, opt, tend):
"""
Deletes orphans that have passed their expiration date
otable: object pointing to the table to populate
(e.g., h5file.root.groupName.orphans)
opt: Options object describing station/run parameters
tend: Time to remove orphans older than, corresponds usually to end of run increment
Removes orphans from table, prints how many were removed
"""
index = np.where(otable.cols.expires[:] < tend.isoformat())
for n in range(len(index[0])-1,-1,-1):
otable.remove_row(index[0][n])
otable.flush()
print '%i Orphans aged out of the system' % len(index[0])
def appendCorrelation(ctable, id1, id2, ccc, opt):
"""
Appends a new value to the 'Correlation Matrix' table.
corr: object pointing to the row in the table to populate
(e.g., h5file.root.hsr.correlation.row)
id1: unique id number of first trigger
id2: unique id number of second trigger
ccc: cross-correlation between the two triggers in the window
opt: Options object describing station/run parameters
Appends this row to the table, and automatically puts the smaller of
the two id numbers first
Only appends if the value is greater than the minimum defined in opt
"""
if (ccc >= opt.cmin) and (id1!=id2):
corr = ctable.row
corr['id1'] = min(id1, id2)
corr['id2'] = max(id1, id2)
corr['ccc'] = ccc
corr.append()
ctable.flush()
|
UTF-8
|
Python
| false
| false
| 14,931
|
py
| 8
|
table.py
| 5
| 0.644766
| 0.631371
| 0
| 439
| 33.01139
| 90
|
janeyeon/progressive_growing_of_points
| 5,995,774,366,188
|
8bdcb1cb8867d685f8737b7181a892a4867fdac3
|
40932f17b5896560d900726e9aa3144220003933
|
/models/auto_encoder.py
|
f7deb8aa0c73f701327965d35514f9c51f9ac8b6
|
[
"MIT"
] |
permissive
|
https://github.com/janeyeon/progressive_growing_of_points
|
8d12131165e75f83bf8800f81aa79ec545d4612e
|
17a7641cdf73f22e60eaf2ca378ac74e9a4d4572
|
refs/heads/main
| 2023-08-24T19:17:50.523796
| 2021-10-23T10:55:46
| 2021-10-23T10:55:46
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import torch
import torch.nn as nn
import os
import shutil
from layers.pointnet import pointnet
from layers.srtdecoder import SRTDecoder
from layers.srtdecoder_pg import SRTDecoderPG
from layers.mrtdecoder import MRTDecoder
from layers.mrtdecoder_pg import MRTDecoderPG
from layers.mrtdecoder_pg2 import MRTDecoderPGV2
from layers.topnet import topnet
from layers.topnet_pg import topnetPG
from layers.treegcn import TreeGCNGenerator
from layers.treegcn_pg import TreeGCNGeneratorPG
class AE(nn.Module):
def __init__(self):
super(AE, self).__init__()
def get_encoder(self, config):
if config['model']['encoder']['type'] == "pointnet":
GFV_dim = config['model']['encoder']['GFV_dim']
last_feat_dim = config['model']['encoder']['pointnet_hp']['feat_dims_list'][-1]
assert GFV_dim == last_feat_dim, 'GFV_dim MUST be equal to last feature dimension.'
kwargs = {
'feat_dims_list': config['model']['encoder']['pointnet_hp']['feat_dims_list']
}
return pointnet(**kwargs)
else:
raise NotImplementedError
def get_decoder(self, config):
pass
def load_ae(self, ckpt_path):
data = torch.load(ckpt_path)
self.encoder.load_state_dict(data['encoder_weights'])
self.decoder.load_state_dict(data['decoder_weights'])
global_step = data['global_step']
return global_step
def save_ae(self, global_step, best_valid_loss, is_best, ckpt_dir):
saving_contents = {
'global_step': global_step,
'encoder_weights': self.encoder.state_dict(),
'decoder_weights': self.decoder.state_dict(),
'best_valid_loss': best_valid_loss
}
torch.save(saving_contents, os.path.join(ckpt_dir, '%06d' % global_step + '.pth'))
if is_best:
src_path = os.path.join(ckpt_dir, '%06d' % global_step + '.pth')
target_path = os.path.join(ckpt_dir, 'best.pth')
shutil.copy(src_path, target_path)
class AENormal(AE):
def __init__(self, config):
super(AENormal, self).__init__()
self.encoder = self.get_encoder(config)
self.decoder = self.get_decoder(config)
self.encoder_type = config['model']['encoder']['type']
self.decoder_type = config['model']['decoder']['type']
def get_decoder(self, config):
if config['model']['decoder']['type'] == 'srtdecoder':
kwargs = {
'z_dim': config['model']['encoder']['GFV_dim'],
'nlevels': config['model']['decoder']['srtdecoder_hp']['nlevels'],
'feat_dims': config['model']['decoder']['srtdecoder_hp']['feat_dims'],
'num_output_points': config['model']['decoder']['srtdecoder_hp']['num_output_points']
}
return SRTDecoder(**kwargs)
elif config['model']['decoder']['type'] == 'mrtdecoder':
kwargs = {
'z_dim': config['model']['encoder']['GFV_dim'],
'nlevels': config['model']['decoder']['mrtdecoder_hp']['nlevels'],
'feat_dims': config['model']['decoder']['mrtdecoder_hp']['feat_dims'],
'num_output_points': config['model']['decoder']['mrtdecoder_hp']['num_output_points']
}
return MRTDecoder(**kwargs)
elif config['model']['decoder']['type'] == 'topnet':
kwargs = {
'z_dim': config['model']['encoder']['GFV_dim'],
'nlevels': config['model']['decoder']['topnet_hp']['nlevels'],
'node_feat_dim': config['model']['decoder']['topnet_hp']['node_feat_dim'],
'num_output_points': config['model']['decoder']['topnet_hp']['num_output_points']
}
return topnet(**kwargs)
elif config['model']['decoder']['type'] == 'treegcn':
kwargs = {
'features': config['model']['decoder']['treegcn_hp']['G_FEAT'],
'degrees': config['model']['decoder']['treegcn_hp']['DEGREE'],
'support': config['model']['decoder']['treegcn_hp']['support']
}
return TreeGCNGenerator(**kwargs)
else:
raise NotImplementedError
def forward(self, x): # x: (B, N, 3)
batch_size, num_points = x.shape[0], x.shape[1]
if self.encoder_type == 'pointnet':
# encoding x
gfv = self.encoder(x)
else:
raise NotImplementedError
out = self.decoder(gfv)
return out
class AEPG(AE):
def __init__(self, config):
super(AEPG, self).__init__()
self.encoder = self.get_encoder(config)
self.decoder = self.get_decoder(config)
self.encoder_type = config['model']['encoder']['type']
self.decoder_type = config['model']['decoder']['type']
def get_decoder(self, config):
if config['model']['decoder']['type'] == 'srtdecoder':
kwargs = {
'z_dim': config['model']['encoder']['GFV_dim'],
'nlevels': config['model']['decoder']['srtdecoder_hp']['nlevels'],
'feat_dims': config['model']['decoder']['srtdecoder_hp']['feat_dims'],
'num_output_points': config['model']['decoder']['srtdecoder_hp']['num_output_points']
}
return SRTDecoderPG(**kwargs)
elif config['model']['decoder']['type'] == 'mrtdecoder':
kwargs = {
'z_dim': config['model']['encoder']['GFV_dim'],
'nlevels': config['model']['decoder']['mrtdecoder_hp']['nlevels'],
'feat_dims': config['model']['decoder']['mrtdecoder_hp']['feat_dims'],
'num_output_points': config['model']['decoder']['mrtdecoder_hp']['num_output_points']
}
return MRTDecoderPG(**kwargs)
elif config['model']['decoder']['type'] == 'mrtdecoder_pgv2':
kwargs = {
'z_dim': config['model']['encoder']['GFV_dim'],
'nlevels': config['model']['decoder']['mrtdecoder_hp']['nlevels'],
'feat_dims': config['model']['decoder']['mrtdecoder_hp']['feat_dims'],
'num_output_points': config['model']['decoder']['mrtdecoder_hp']['num_output_points']
}
return MRTDecoderPGV2(**kwargs)
elif config['model']['decoder']['type'] == 'topnet':
kwargs = {
'z_dim': config['model']['encoder']['GFV_dim'],
'nlevels': config['model']['decoder']['topnet_hp']['nlevels'],
'node_feat_dim': config['model']['decoder']['topnet_hp']['node_feat_dim'],
'num_output_points': config['model']['decoder']['topnet_hp']['num_output_points']
}
return topnetPG(**kwargs)
elif config['model']['decoder']['type'] == 'treegcn':
kwargs = {
'features': config['model']['decoder']['treegcn_hp']['G_FEAT'],
'degrees': config['model']['decoder']['treegcn_hp']['DEGREE'],
'support': config['model']['decoder']['treegcn_hp']['support']
}
return TreeGCNGeneratorPG(**kwargs)
else:
raise NotImplementedError
def forward(self, x, phase, alpha): # x: (B, N, 3)
batch_size, num_points = x.shape[0], x.shape[1]
if self.encoder_type == 'pointnet':
# encoding x
gfv = self.encoder(x)
else:
raise NotImplementedError
# decoding
out = self.decoder(gfv, phase, alpha)
return out
def get_autoencoder(config):
if config['train_setting']['pg_on']:
return AEPG(config)
else:
return AENormal(config)
|
UTF-8
|
Python
| false
| false
| 7,757
|
py
| 38
|
auto_encoder.py
| 28
| 0.547505
| 0.545572
| 0
| 179
| 42.340782
| 101
|
CreativePhilip/OperatingSystemsLaboratories
| 953,482,739,872
|
48f3040c25c14a343609d4eef45898d790411eb1
|
e606e7a9d454a93330f15850e154723a0bd2a0e7
|
/lab1/schedulers/sjf.py
|
6d555951e1afca28b0b0448c61dacd136d110e17
|
[] |
no_license
|
https://github.com/CreativePhilip/OperatingSystemsLaboratories
|
31404568a87b7d90a596528a21ea742d9f772c3b
|
341ad098076021a89db7fd6ccd9583339e4a2201
|
refs/heads/master
| 2023-06-03T03:58:59.474727
| 2021-06-22T15:26:12
| 2021-06-22T15:26:12
| 344,957,645
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from typing import Optional
from utils.process import Process
from utils.queue import ProcessQueue, ProcessSorting
def sjf_non_preemptive(queue: ProcessQueue, prev_proc: Process) -> Optional[Process]:
if prev_proc:
return prev_proc
if queue.is_empty():
return None
return queue.sort(ProcessSorting.ET)[0]
def sjf_preemptive(queue: ProcessQueue, prev_proc: Process) -> Optional[Process]:
proc = queue.sort(ProcessSorting.ET)
if queue.is_empty():
return None
if prev_proc is None:
return proc[0]
else:
return prev_proc if prev_proc.execution_time_left <= proc[0].execution_time_left else proc[0]
|
UTF-8
|
Python
| false
| false
| 671
|
py
| 27
|
sjf.py
| 21
| 0.691505
| 0.685544
| 0
| 26
| 24.807692
| 101
|
Itzharshit/Pyro-FileStreamBota
| 19,550,691,145,403
|
740031e6b6324a518a0d2f6786534ff632739949
|
c6c5d497101fcf0509a7b2530adae70e32026867
|
/WebStreamer/bot/plugins/start.py
|
7a320445f5a8d577a13af79dbae5b7ca44b7bb98
|
[] |
no_license
|
https://github.com/Itzharshit/Pyro-FileStreamBota
|
d375e866682b31ec62e28d0914d8f7b19ac5d0d1
|
a0f2f19248e2c632cd24d97fe9744542dc99cab8
|
refs/heads/main
| 2023-08-25T06:10:17.667836
| 2021-10-09T19:08:21
| 2021-10-09T19:08:21
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# (c) @EverythingSuckz | @AbirHasan2005
from WebStreamer.bot import StreamBot
from WebStreamer.vars import Var
from WebStreamer.utils.human_readable import humanbytes
from WebStreamer.utils.database import Database
from pyrogram import filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from pyrogram.errors import UserNotParticipant
db = Database(Var.DATABASE_URL, Var.SESSION_NAME)
@StreamBot.on_message(filters.command('start') & filters.private & ~filters.edited)
async def start(b, m):
if not await db.is_user_exist(m.from_user.id):
await db.add_user(m.from_user.id)
await b.send_message(
Var.BIN_CHANNEL,
f"#NEW_USER: \n\nNew User [{m.from_user.first_name}](tg://user?id={m.from_user.id}) Started !!"
)
usr_cmd = m.text.split("_")[-1]
if usr_cmd == "/start":
if Var.UPDATES_CHANNEL != "None":
try:
user = await b.get_chat_member(Var.UPDATES_CHANNEL, m.chat.id)
if user.status == "kicked":
await b.send_message(
chat_id=m.chat.id,
text="Sorry Dude, You were Banned to use me. Contact my [Support Group](https://t.me/AJPyroVerseGroup).",
parse_mode="markdown",
disable_web_page_preview=True
)
return
except UserNotParticipant:
await b.send_message(
chat_id=m.chat.id,
text="**Please Join My update Channel to use me!**\n\nDue to Overload, Only my Channel Subscribers can use me!",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Join Update Channel", url=f"https://t.me/{Var.UPDATES_CHANNEL}")
]
]
),
parse_mode="markdown"
)
return
except Exception:
await b.send_message(
chat_id=m.chat.id,
text="Oops!! Something went Wrong. Contact my [Support Group](https://t.me/AJPyroVerseGroup).",
parse_mode="markdown",
disable_web_page_preview=True)
return
await m.reply_text(
text='Hii Sir!!\nI am Telegram File to Link Generator Bot.\n\nSend me any file amd get http link!',
reply_markup=InlineKeyboardMarkup(
[
[InlineKeyboardButton('My Channel', url='https://t.me/AJPyroVerse'), InlineKeyboardButton('Support Group', url='https://t.me/AJPyroVerseGroup')],
[InlineKeyboardButton('Developer', url='https://t.me/harshitshrivastavbot')]
]
),
disable_web_page_preview=True
)
else:
if Var.UPDATES_CHANNEL != "None":
try:
user = await b.get_chat_member(Var.UPDATES_CHANNEL, m.chat.id)
if user.status == "kicked":
await b.send_message(
chat_id=m.chat.id,
text="Sorry Sir, You are Banned to use me. Contact my [Support Group](https://t.me/AJPyroVerseGroup).",
parse_mode="markdown",
disable_web_page_preview=True
)
return
except UserNotParticipant:
await b.send_message(
chat_id=m.chat.id,
text="**Please Join My Updates Channel to use me!**\n\nDue to Overload, Only my Channel Subscribers can use me!",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Join Updates Channel", url=f"https://t.me/{Var.UPDATES_CHANNEL}")
],
[
InlineKeyboardButton("Refresh / Try Again",
url=f"https://t.me/File2linkpyrobot?start=AJPyroVerse_{usr_cmd}")
]
]
),
parse_mode="markdown"
)
return
except Exception:
await b.send_message(
chat_id=m.chat.id,
text="Oops!! Something went Wrong. Contact my [Support Group](https://t.me/AJPyroVerseGroup).",
parse_mode="markdown",
disable_web_page_preview=True)
return
get_msg = await b.get_messages(chat_id=Var.BIN_CHANNEL, message_ids=int(usr_cmd))
file_size = None
if get_msg.video:
file_size = f"{humanbytes(get_msg.video.file_size)}"
elif get_msg.document:
file_size = f"{humanbytes(get_msg.document.file_size)}"
elif get_msg.audio:
file_size = f"{humanbytes(get_msg.audio.file_size)}"
file_name = None
if get_msg.video:
file_name = f"{get_msg.video.file_name}"
elif get_msg.document:
file_name = f"{get_msg.document.file_name}"
elif get_msg.audio:
file_name = f"{get_msg.audio.file_name}"
stream_link = "https://{}/{}".format(Var.FQDN, get_msg.message_id) if Var.ON_HEROKU or Var.NO_PORT else \
"http://{}:{}/{}".format(Var.FQDN,
Var.PORT,
get_msg.message_id)
msg_text = "Sir\nYour Link Generated! \n\n📂 **File Name:** `{}`\n**File Size:** `{}`\n\n📥 **Download Link:** `{}`"
await m.reply_text(
text=msg_text.format(file_name, file_size, stream_link),
parse_mode="Markdown",
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("Download Now", url=stream_link)]])
)
@StreamBot.on_message(filters.command('help') & filters.private & ~filters.edited)
async def help_handler(bot, message):
if not await db.is_user_exist(message.from_user.id):
await db.add_user(message.from_user.id)
await bot.send_message(
Var.BIN_CHANNEL,
f"#NEW_USER: \n\nNew User [{message.from_user.first_name}](tg://user?id={message.from_user.id}) Started !!"
)
if Var.UPDATES_CHANNEL != "None":
try:
user = await bot.get_chat_member(Var.UPDATES_CHANNEL, message.chat.id)
if user.status == "kicked":
await bot.send_message(
chat_id=message.chat.id,
text="Sorry Sir, You are Banned to use me. Contact my [Support Group](https://t.me/AJPyroVerseGroup).",
parse_mode="markdown",
disable_web_page_preview=True
)
return
except UserNotParticipant:
await bot.send_message(
chat_id=message.chat.id,
text="**Please Join My Updates Channel to use me!**\n\nDue to Overload, Only my Channel Subscribers can use me!",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Join Updates Channel", url=f"https://t.me/{Var.UPDATES_CHANNEL}")
]
]
),
parse_mode="markdown"
)
return
except Exception:
await bot.send_message(
chat_id=message.chat.id,
text="Something went Wrong. Contact my [Support Group](https://t.me/AJPyroVerseGroup).",
parse_mode="markdown",
disable_web_page_preview=True)
return
await message.reply_text(
text="Send me any File I will give you http Direct Download Link!\n\nI also Support Channels. Add me to your Channel as Admin and see magic with files!",
parse_mode="Markdown",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[InlineKeyboardButton("Support Group", url="https://t.me/AJPyroVerseGroup"), InlineKeyboardButton("My Channel", url="https://t.me/AJPyroVerse")],
[InlineKeyboardButton("Developer", url="https://t.me/harshitshrivastavbot")]
]
)
)
|
UTF-8
|
Python
| false
| false
| 8,494
|
py
| 1
|
start.py
| 1
| 0.51779
| 0.517083
| 0
| 184
| 45.130435
| 165
|
DiamaiD/Studium
| 16,398,185,175,948
|
5ef8fd2d43ee43b9c1633306dec589ad8089bbb9
|
059ffb0e6a296b20a381328ccbec3fdfc63433b6
|
/Semester_1/programm1/Klausurvorbereitung/probeklausur WS2019_2020/ex_keywords.py
|
87e4fcf215294ee59262fd9ee1ed4c20e2441909
|
[] |
no_license
|
https://github.com/DiamaiD/Studium
|
d6f589b7d76d031d74dae20e1304b6568a94cd8e
|
5981e63e020db65e93299ac197bc7dcbefa6540c
|
refs/heads/master
| 2020-09-11T09:04:26.855214
| 2020-07-29T17:59:33
| 2020-07-29T17:59:33
| 222,014,184
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
def keywords(c: str, s: str) -> list:
indexes = []
x = 0
passages = []
for char in s:
if char == c:
indexes.append(x)
x += 1
for i in range(len(indexes)):
if i % 2 == 0:
passages.append(s[indexes[i]+1:indexes[i+1]])
return passages
|
UTF-8
|
Python
| false
| false
| 306
|
py
| 187
|
ex_keywords.py
| 101
| 0.477124
| 0.457516
| 0
| 13
| 22.461538
| 57
|
yandexdataschool/everware
| 10,934,986,761,250
|
261461d6aabaadda090097f9b3fd7e96429ee31b
|
3ea75c8c4121f9da70e79ee82a5d38958762417c
|
/etc/local_config.py
|
41ef63a43c317c05521c109848de6de7fb5a9403
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/yandexdataschool/everware
|
6e117fa18e75b5c9ecbd664b1edea724f7ed6185
|
78c3a02acfcf3f3170981f2319d061eaf9c407aa
|
refs/heads/master
| 2020-03-19T07:49:56.579291
| 2018-06-18T18:30:26
| 2018-06-18T18:32:48
| 136,152,332
| 0
| 0
|
BSD-3-Clause
| true
| 2018-06-06T09:54:17
| 2018-06-05T09:20:05
| 2018-06-05T09:20:07
| 2018-06-06T09:54:16
| 485
| 0
| 0
| 0
|
Python
| false
| null |
# Use this config file to run everware locally on linux or other systems
# that do not need to use a VM to run docker containers
c = get_config()
load_subconfig('etc/base_config.py')
load_subconfig('etc/github_auth.py')
|
UTF-8
|
Python
| false
| false
| 221
|
py
| 41
|
local_config.py
| 27
| 0.751131
| 0.751131
| 0
| 6
| 35.833333
| 72
|
Dineshsgit/dea-airflow
| 15,040,975,491,737
|
c0995ea7898f1c7a3deae19247692ebf7be9c586
|
11cbf195ade6bf4aa77e24d3057c1c387d812638
|
/dags/automated_reporting/utilities/copernicus_api.py
|
6b1c18944589e473ec7cca1f127f2c862fe57708
|
[] |
no_license
|
https://github.com/Dineshsgit/dea-airflow
|
9e1a81a3604f299a195d69ce231612818f53a8e2
|
4b29fb227d4b6e653c1c0a5ee6b4cd079e325493
|
refs/heads/master
| 2023-08-29T07:08:46.448263
| 2021-10-07T22:42:08
| 2021-10-07T22:42:08
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Utilities for copernicus api (scihub) queries
"""
import logging
import requests
from concurrent import futures
from datetime import timezone, timedelta, datetime as dt
from automated_reporting.utilities import helpers
from automated_reporting.aux_data import aoi
log = logging.getLogger("airflow.task")
def query(execution_date, days, copernicus_api_creds):
"""Query Copernicus/Scihub for a product type, date range and area of interest"""
execution_date = helpers.python_dt(execution_date)
producttype = "S2MSI1C"
aoi_polygon = aoi.AOI_POLYGON
# base Copernicus API url and query, needs query arguments inserted
cop_url = 'https://scihub.copernicus.eu/dhus/search?q=ingestiondate:[{} TO {}] AND \
producttype:{} AND footprint:"Intersects({})"&start={}&rows=100&format=json'
# gets dates in a format suitable for Copernicus API
cop_start_time = (execution_date - timedelta(days=days)).astimezone(
tz=timezone.utc
).replace(tzinfo=None).isoformat() + "Z"
cop_end_time = (
execution_date.astimezone(tz=timezone.utc).replace(tzinfo=None).isoformat()
+ "Z"
)
log.info(
"Querying Copernicus API between {} - {} for {}".format(
cop_start_time, cop_end_time, producttype
)
)
# Helper functions
def get(url):
"""
Perform a GET to copernicus api for paged inventory data
"""
return requests.get(
url,
auth=(copernicus_api_creds["user"], copernicus_api_creds["password"]),
)
def format_url(offset=0):
"""
Format url with query arguments
"""
return cop_url.format(
cop_start_time, cop_end_time, producttype, aoi_polygon, offset
)
def get_entry_val(entry, data_type, name):
"""
Extract a value from entry section of response
"""
for val in entry[data_type]:
if val["name"] == name:
return val["content"]
return None
expected_products = []
start_time = dt.now()
# make a first api call with a zero offset
resp = get(format_url(0))
if resp.ok:
# start a list of responses
responses = [resp]
# use count from first response to build a list of urls for
# multi-threaded download
count = int(resp.json()["feed"]["opensearch:totalResults"])
log.info("Downloading: {}".format(count))
urls = [format_url(offset) for offset in range(100, count, 100)]
# populate responses list with a multithreaded download
with futures.ThreadPoolExecutor(max_workers=20) as executor:
res = executor.map(get, urls)
responses += list(res)
# check responses and extract result in expected_products list
for resp in responses:
if resp.ok:
data = resp.json()["feed"]
if type(data["entry"]) == dict:
data["entry"] = [data["entry"]]
for entry in data["entry"]:
granule_id = get_entry_val(entry, "str", "granuleidentifier")
row = {
"uuid": entry["id"],
"granule_id": granule_id,
"region_id": get_entry_val(entry, "str", "tileid"),
"sensor": granule_id[:3].lower(),
"identifier": get_entry_val(entry, "str", "identifier"),
}
expected_products.append(row)
else:
raise Exception("Sentinel API Failed: {}".format(resp.status_code))
log.info("Downloaded: {}".format(len(expected_products)))
# check that the inventory list is the same length as the expected count
if count != len(expected_products):
raise Exception("Sentinel API Failed: products missing from download")
else:
raise Exception("Sentinel API Failed: {}".format(resp.status_code))
log.info(
"Copernicus API download completed in {} seconds".format(
(dt.now() - start_time).total_seconds()
)
)
return expected_products
|
UTF-8
|
Python
| false
| false
| 4,202
|
py
| 70
|
copernicus_api.py
| 63
| 0.584246
| 0.580438
| 0
| 122
| 33.442623
| 88
|
ipalongengi/algorithm-practice
| 10,591,389,363,332
|
c413554ac4a624d728d60a053e61dc6a006892fd
|
cadef9ab90eddcc5c2e212635eee262fd9b5c7c5
|
/questions/say_it/test_solution.py
|
1d429e4ee0027e6be1a33834ccd856afa813c3b4
|
[] |
no_license
|
https://github.com/ipalongengi/algorithm-practice
|
83aeb5389eb82a2e8104cb2627fb31494bfe0b82
|
4a423e010ded51d8de38c809fa0ca113a91d836d
|
refs/heads/master
| 2019-07-09T05:54:38.420691
| 2019-03-25T00:14:31
| 2019-03-25T00:14:31
| 90,082,886
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import pytest
from solution import say_it
@pytest.mark.parametrize('n, output', [
(1, 1),
(6, 312211),
(10, 13211311123113112211)
])
def test_valid_inputs(n, output):
assert say_it(n) == output
@pytest.mark.parametrize('n, output', [
(0, 1),
(-25, 1),
])
def test_invalid_inputs(n, output):
assert say_it(n) == output
|
UTF-8
|
Python
| false
| false
| 348
|
py
| 7
|
test_solution.py
| 4
| 0.617816
| 0.514368
| 0
| 18
| 18.388889
| 39
|
sofiane-fourati/Mistplay-Challange
| 15,625,091,056,087
|
7263aed29bc9f85cd04009745e9f6f0592640c69
|
951ca4a054d4febe32f959d9d420f3faa1df8163
|
/polls/forms.py
|
e0a3449dd3195556503cbdb0f7048c22213ffb04
|
[] |
no_license
|
https://github.com/sofiane-fourati/Mistplay-Challange
|
e6cc893ecea303c4c80ac165ac2ee7a883df5650
|
b8a899b5a85adb94e9b7a8ff3e17e1a109bcd105
|
refs/heads/master
| 2020-12-10T22:07:45.209949
| 2020-01-14T01:53:01
| 2020-01-14T01:53:01
| 233,725,525
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import forms
class FilesForm(forms.Form):
file = forms.FileField(widget=forms.ClearableFileInput(attrs={'multiple': False}))
|
UTF-8
|
Python
| false
| false
| 141
|
py
| 7
|
forms.py
| 2
| 0.77305
| 0.77305
| 0
| 4
| 33.75
| 83
|
epu-ntua/qualichain-jobcrawler
| 1,881,195,701,961
|
88bcc46d0b524dc0d6f0e3622eee7a8eab3201b2
|
01ae09eae33e3da27726c4eb0efc1b143358eae1
|
/jobcrawler/postgres_client/PostgresClient.py
|
c84c270c7c6acfa9657c08e253b9fbb11cb77034
|
[
"MIT"
] |
permissive
|
https://github.com/epu-ntua/qualichain-jobcrawler
|
d9d5146fbe3613347723c50b5106e23781a0ef59
|
16f236a1f38af8502e9d039814ef2f4193d85071
|
refs/heads/master
| 2022-12-16T11:38:10.051858
| 2020-02-27T08:35:50
| 2020-02-27T08:35:50
| 235,815,570
| 3
| 1
|
MIT
| false
| 2022-12-08T03:41:02
| 2020-01-23T14:47:23
| 2020-11-12T15:27:20
| 2022-12-08T03:41:01
| 96
| 3
| 1
| 7
|
Python
| false
| false
|
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import sessionmaker
from jobcrawler.settings import POSTGRES_USER, POSTGRES_PASSWORD, POSTGRES_HOST, POSTGRES_DB
from jobcrawler.postgres_client.models import Base, JobPost
class PostgresClient(object):
"""
This is a Python Object that handles Postgress DB using SQLAlchemy
"""
def __init__(self):
self.engine = create_engine(
'postgresql+psycopg2://{}:{}@{}/{}'.format(POSTGRES_USER, POSTGRES_PASSWORD, POSTGRES_HOST, POSTGRES_DB)
)
self.meta = MetaData()
self.conn = self.engine.connect()
self.session = sessionmaker(bind=self.engine)()
def initialize_tables(self):
"""
This function is used to initialize Job Posting Tables
"""
# create Tables from Models
Base.metadata.create_all(self.engine)
print("JobPost table initialized successfully")
def add_job_post(self, **kwargs):
"""
This function is used to add job post to JobPost Model
Args:
**kwargs: provided keyword arguments
Examples:
>>> pg = PostgresClient()
>>> pg.add_job_post(**kwargs)
"""
new_job_post = JobPost(
title=kwargs["title"],
requirements=kwargs["requirements"],
job_url=kwargs["job_url"],
timestamp=kwargs["timestamp"],
full_html=kwargs["full_html"],
site=kwargs["site"],
language=kwargs["language"],
full_text=kwargs["full_text"]
)
self.session.add(new_job_post)
self.session.commit()
def check_if_record_exists(self, title, job_url):
"""
The following function is used to check if the extracted job exists in DB
Args:
title: job title
job_url: job url
Returns: True/False
"""
this_job_count = self.session.query(JobPost).filter(
JobPost.job_url == job_url,
JobPost.title == title
).count()
if this_job_count:
return True
else:
return False
|
UTF-8
|
Python
| false
| false
| 2,170
|
py
| 18
|
PostgresClient.py
| 13
| 0.582488
| 0.582028
| 0
| 71
| 29.56338
| 116
|
KhawajaAhmad/Genomage-Platform
| 3,195,455,693,093
|
81dc7c53f602405af09499566429f000977c5be7
|
3acf67a9a2a0a33ffb7a4af586dc5ed5bfdb9769
|
/pages/create_pipeline.py
|
f8fe13028deb3af17195099beeab51df49edb055
|
[] |
no_license
|
https://github.com/KhawajaAhmad/Genomage-Platform
|
a7cd58ef3221e9c6c9ac092ad1d5cce82bd6b0ee
|
507ebd144a654dd7eaa7639fff5b5a7d0e3489ae
|
refs/heads/master
| 2023-09-01T05:49:01.718742
| 2021-11-02T16:04:36
| 2021-11-02T16:04:36
| 411,681,492
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from utils.import_utils import *
from app import app
from pages import sidebar, topbar
import pandas as pd
# =================================== Genomage =================================================
def cards(image, text, card_id):
return html.Div([
dbc.Card([
dbc.CardBody([
dbc.CardImg(src=image, top=True, className="pipeline_card_img"),
html.P(text, className="analysis_card_text"),
], className="pipeline_card_body"
),
], className="pipeline_card"
)
], id=card_id, n_clicks=0)
def radio_items(options, id):
return dcc.RadioItems(
options=options,
id=id,
labelStyle={'font-size': 13, 'display': 'flex', 'align-items': 'center',
'padding': 0,
'margin': 5},
style={'vertical-align': -4, 'display': 'inline-block', 'margin': 5},
inputStyle={"margin-right": 6}
)
def modal_content(modal_id):
if modal_id == "analysis_type_modal":
options = [{'label': 'WGS', 'value': 'WGS'},
{'label': 'WES', 'value': 'WES'}]
return html.Div([
radio_items(options, "analysis_type_radio")
])
elif modal_id == "data_format_modal":
options = [{'label': 'fastq', 'value': 'fastq'},
{'label': 'fastq.gz', 'value': 'fastq.gz'},
{'label': '.BAM', 'value': 'bam'}]
options2 = [{'label': '.fasta', 'value': 'fasta'},
{'label': '.fa', 'value': 'fa'},
{'label': '.fna', 'value': 'fna'},
{'label': '.bed file', 'value': 'bed'}]
return html.Div([
html.P("Input File:", className="modal_sub_headings"),
radio_items(options, "data_format1"),
dbc.Collapse(
dbc.Card(dbc.CardBody([
html.Div(
[
dbc.Checklist(
options=[
{"label": "mark duplicated and read group", "value": 1},
],
value=[],
id="bam_switch",
switch=True,
className="bam_switch"
),
]
)
]), style={'border': 'none'}
),
id="collapse_bam",
is_open=False,
),
html.P("Reference Genome", className="modal_sub_headings"),
radio_items(options2, "data_format2")
])
elif modal_id == "qc_modal":
options = [{'label': 'FASTQC', 'value': 'fastqc'}]
return html.Div([
radio_items(options, "qc_radio")
])
elif modal_id == "trimming_modal":
options = [{'label': 'Trimomatic', 'value': 'trimomatic'},
{'label': 'Fastp', 'value': 'fastp'}]
return html.Div([
radio_items(options, "trimming_radio")
])
elif modal_id == "trimming_modal1":
options = [{'label': 'Trimomatic', 'value': 'trimomatic'},
{'label': 'Fastp', 'value': 'fastp'}]
return html.Div([
radio_items(options, "trimming_radio1")
])
elif modal_id == "reference_genome_index_modal":
options = [{'label': 'SAM Tools', 'value': 'sam'},
{'label': 'BWA', 'value': 'bwa'},
{'label': 'Bowtie2', 'value': 'bowtie2'}]
options2 = [{'label': 'GRCH37/hg19', 'value': 'GRCH37/hg19'},
{'label': 'GRCH38/hg38', 'value': 'GRCH38/hg38'}]
return html.Div([
html.P("Set a Tool:", className="modal_sub_headings"),
radio_items(options, "reference_genome_tool_index_radio"),
html.P("Ref. genome version", className="modal_sub_headings"),
radio_items(options2, "reference_genome_dataformat_index_radio")
])
elif modal_id == "alignment_modal":
options = [{'label': 'BWA', 'value': 'bwa'},
{'label': 'Bowtie2', 'value': 'bowtie2'}]
return html.Div([
radio_items(options, "alignment_radio")
])
elif modal_id == "duplicates_modal":
options = [{'label': 'Picard Tools', 'value': 'picard'}]
return html.Div([
radio_items(options, "duplicates_radio")
])
elif modal_id == "duplicates_modal1":
options = [{'label': 'Picard Tools', 'value': 'picard'}]
return html.Div([
radio_items(options, "duplicates_radio1")
])
elif modal_id == "BQSR_modal":
options = [{'label': 'gatk BaseRecalibrator', 'value': 'gatk BaseRecalibrator'},
{'label': 'gatk ApplyBQSR', 'value': 'gatk ApplyBQSR'}]
return html.Div([
radio_items(options, "BQSR_radio")
])
elif modal_id == "BQSR_modal1":
options = [{'label': 'gatk BaseRecalibrator', 'value': 'gatk BaseRecalibrator'},
{'label': 'gatk ApplyBQSR', 'value': 'gatk ApplyBQSR'}]
return html.Div([
radio_items(options, "BQSR_radio1")
])
elif modal_id == "BQSR_modal2":
options = [{'label': 'gatk BaseRecalibrator', 'value': 'gatk BaseRecalibrator'},
{'label': 'gatk ApplyBQSR', 'value': 'gatk ApplyBQSR'}]
return html.Div([
radio_items(options, "BQSR_radio2")
])
elif modal_id == "variant_calling_modal":
options = [{'label': 'gatk Mutect2', 'value': 'gatk Mutect2'},
{'label': 'gatk HaplotypeCaller', 'value': 'gatk HaplotypeCaller'},
{'label': 'DeepVariant', 'value': 'deepvariant'}]
return html.Div([
radio_items(options, "variant_calling_radio")
])
elif modal_id == "variant_calling_modal1":
options = [{'label': 'gatk Mutect2', 'value': 'gatk Mutect2'},
{'label': 'gatk HaplotypeCaller', 'value': 'gatk HaplotypeCaller'},
{'label': 'DeepVariant', 'value': 'deepvariant'}]
return html.Div([
radio_items(options, "variant_calling_radio1")
])
elif modal_id == "variant_calling_modal2":
options = [{'label': 'gatk Mutect2', 'value': 'gatk Mutect2'},
{'label': 'gatk HaplotypeCaller', 'value': 'gatk HaplotypeCaller'},
{'label': 'DeepVariant', 'value': 'deepvariant'}]
return html.Div([
radio_items(options, "variant_calling_radio2")
])
elif modal_id == "variant_filtration_modal":
options = [{'label': 'bcftools', 'value': 'bcftools'},
{'label': 'gatk', 'value': 'gatk'}]
return html.Div([
radio_items(options, "variant_filtration_radio")
])
elif modal_id == "variant_filtration_modal1":
options = [{'label': 'bcftools', 'value': 'bcftools'},
{'label': 'gatk', 'value': 'gatk'}]
return html.Div([
radio_items(options, "variant_filtration_radio1")
])
elif modal_id == "variant_filtration_modal2":
options = [{'label': 'bcftools', 'value': 'bcftools'},
{'label': 'gatk', 'value': 'gatk'}]
return html.Div([
radio_items(options, "variant_filtration_radio2")
])
elif modal_id == "computing_power_modal":
options = [{'label': 'High', 'value': 'high'},
{'label': 'Medium', 'value': 'medium'},
{'label': 'Low', 'value': 'low'}]
return html.Div([
html.Div([
dcc.Slider(
className="computing_power_slider",
min=1,
max=3,
value=1,
step=None,
id="computing_power_radio",
marks={
1: {'label': 'low', 'style': {'color': '#004677', 'font-size': 14}},
2: {'label': 'medium', 'style': {'color': '#004677', 'font-size': 14}},
3: {'label': 'high', 'style': {'color': '#004677', 'font-size': 14}},
},
),
html.Div([
html.P("RAM: 10GB", className="modal_box_text"),
html.P("CPU: 7GHz", className="modal_box_text"),
html.P("Estimated Cost: $10", className="modal_box_text")
], className="modal_box"),
html.Div([
html.P("RAM: 10GB", className="modal_box_text"),
html.P("CPU: 7GHz", className="modal_box_text"),
html.P("Estimated Cost: $10", className="modal_box_text")
], className="modal_box"),
html.Div([
html.P("RAM: 10GB", className="modal_box_text"),
html.P("CPU: 7GHz", className="modal_box_text"),
html.P("Estimated Cost: $10", className="modal_box_text")
], className="modal_box"),
], style={'text-align': 'center', 'padding': "20px 0px"}),
])
elif modal_id == "computing_power_modal1":
options = [{'label': 'High', 'value': 'high'},
{'label': 'Medium', 'value': 'medium'},
{'label': 'Low', 'value': 'low'}]
return html.Div([
html.Div([
dcc.Slider(
className="computing_power_slider",
min=1,
max=3,
value=1,
step=None,
id="computing_power_radio1",
marks={
1: {'label': 'low', 'style': {'color': '#004677', 'font-size': 14}},
2: {'label': 'medium', 'style': {'color': '#004677', 'font-size': 14}},
3: {'label': 'high', 'style': {'color': '#004677', 'font-size': 14}},
},
),
html.Div([
html.P("RAM: 10GB", className="modal_box_text"),
html.P("CPU: 7GHz", className="modal_box_text"),
html.P("Estimated Cost: $10", className="modal_box_text")
], className="modal_box"),
html.Div([
html.P("RAM: 10GB", className="modal_box_text"),
html.P("CPU: 7GHz", className="modal_box_text"),
html.P("Estimated Cost: $10", className="modal_box_text")
], className="modal_box"),
html.Div([
html.P("RAM: 10GB", className="modal_box_text"),
html.P("CPU: 7GHz", className="modal_box_text"),
html.P("Estimated Cost: $10", className="modal_box_text")
], className="modal_box"),
], style={'text-align': 'center', 'padding': "20px 0px"}),
])
elif modal_id == "computing_power_modal2":
options = [{'label': 'High', 'value': 'high'},
{'label': 'Medium', 'value': 'medium'},
{'label': 'Low', 'value': 'low'}]
return html.Div([
html.Div([
dcc.Slider(
className="computing_power_slider",
min=1,
max=3,
step=None,
value=1,
id="computing_power_radio2",
marks={
1: {'label': 'low', 'style': {'color': '#004677', 'font-size': 14}},
2: {'label': 'medium', 'style': {'color': '#004677', 'font-size': 14}},
3: {'label': 'high', 'style': {'color': '#004677', 'font-size': 14}},
},
),
html.Div([
html.P("RAM: 10GB", className="modal_box_text"),
html.P("CPU: 7GHz", className="modal_box_text"),
html.P("Estimated Cost: $10", className="modal_box_text")
], className="modal_box"),
html.Div([
html.P("RAM: 10GB", className="modal_box_text"),
html.P("CPU: 7GHz", className="modal_box_text"),
html.P("Estimated Cost: $10", className="modal_box_text")
], className="modal_box"),
html.Div([
html.P("RAM: 10GB", className="modal_box_text"),
html.P("CPU: 7GHz", className="modal_box_text"),
html.P("Estimated Cost: $10", className="modal_box_text")
], className="modal_box"),
], style={'text-align': 'center', 'padding': "20px 0px"}),
])
def get_modal(modal_id, close_id, save_id, heading):
return html.Div([
dbc.Modal([
dbc.ModalBody([
html.P(heading, className="modal_headings"),
modal_content(modal_id)]),
dbc.ModalFooter([
dbc.Button("Close", id=close_id, color="light", n_clicks=0, className="modal_closing"),
dbc.Button("Save", id=save_id, color="success", n_clicks=0, className="modal_saving"),
]
),
],
id=modal_id,
is_open=False,
),
]
)
def get_modal2(modal_id, close_id, save_id, heading):
return html.Div([
dbc.Modal([
dbc.ModalBody([
html.P(heading, className="modal_headings"),
modal_content(modal_id)]),
dbc.ModalFooter([
dbc.Button("Close", id=close_id, color="light", n_clicks=0, className="modal_closing"),
dbc.Button("Save", id=save_id, color="success", n_clicks=0, className="modal_saving"),
]
),
],
id=modal_id,
is_open=False,
size='lg'
),
]
)
def create_layout():
return html.Div([
html.Div([
sidebar.create_layout(),
], className="screen_division_left"),
html.Div([
topbar.create_layout(),
html.Div([
html.Div([
html.Div([
html.P("Analysis", className="pipeline_title", id="pipeline_title"),
dbc.Button(html.Img(src="/assets/edit.png", className="pipeline_share_icon"),
className="pipeline_title_edit", id="pipeline_title_edit"),
dbc.Button(html.Img(src="/assets/share.png", className="pipeline_share_icon"),
className="pipeline_title_edit"),
dbc.Modal(
[
dbc.ModalBody([
html.P("Change Title", className="modal_headings"),
dbc.Input(id="title_input", placeholder="Enter New title", type="text",
className="title_modal_input"),
]),
dbc.ModalFooter(
dbc.Button("Close", id="pipeline_title_edit_close", color="light", n_clicks=0,
className="modal_closing"),
),
],
id="pipeline_title_edit_modal",
is_open=False,
),
dbc.Button("Save as draft", color="light", className="pipeline_save_as_draft")
]),
dbc.Row([
get_modal("analysis_type_modal", "analysis_type_close", "analysis_type_save",
'Select Analysis Type'),
get_modal("data_format_modal", "data_format_close", "data_format_save",
'Select Data Format'),
get_modal("qc_modal", "qc_close", "qc_save", "Quality Control"),
get_modal("trimming_modal", "trimming_close", "trimming_save",
"Read Manipulation/ Trimming"),
get_modal("trimming_modal1", "trimming_close1", "trimming_save1",
"Read Manipulation/ Trimming"),
get_modal("reference_genome_index_modal", "reference_genome_index_close",
"reference_genome_index_save", "Reference Genome Index"),
get_modal("alignment_modal", "alignment_close", "alignment_save", "Alignment"),
get_modal("duplicates_modal", "duplicates_close", "duplicates_save", "Mark Duplicates"),
get_modal("duplicates_modal1", "duplicates_close1", "duplicates_save1", "Mark Duplicates"),
get_modal("BQSR_modal", "BQSR_close", "BQSR_save", "BQSR"),
get_modal("BQSR_modal1", "BQSR_close1", "BQSR_save1", "BQSR"),
get_modal("BQSR_modal2", "BQSR_close2", "BQSR_save2", "BQSR"),
get_modal("variant_calling_modal", "variant_calling_close", "variant_calling_save",
"variant Calling"),
get_modal("variant_calling_modal1", "variant_calling_close1", "variant_calling_save1",
"variant Calling"),
get_modal("variant_calling_modal2", "variant_calling_close2", "variant_calling_save2",
"variant Calling"),
get_modal("variant_filtration_modal", "variant_filtration_close",
"variant_filtration_save", "variant filtration"),
get_modal("variant_filtration_modal1", "variant_filtration_close1",
"variant_filtration_save1", "variant filtration"),
get_modal("variant_filtration_modal2", "variant_filtration_close2",
"variant_filtration_save2", "variant filtration"),
get_modal2("computing_power_modal", "computing_power_close", "computing_power_save",
"computing Power"),
get_modal2("computing_power_modal1", "computing_power_close1", "computing_power_save1",
"computing Power"),
get_modal2("computing_power_modal2", "computing_power_close2", "computing_power_save2",
"computing Power"),
html.Div(html.Div(cards("assets/img1.png", "Select Analysis Type", 'analysis_type')),
className="visible_div"),
html.Div(id="visible_div1", className="visible_div"),
html.Div(id="visible_div2", className="visible_div"),
html.Div(id="visible_div3", className="visible_div"),
html.Div(id="visible_div4", className="visible_div"),
html.Div(id="visible_div5", className="visible_div"),
html.Div(id="visible_div6", className="visible_div"),
html.Div(id="visible_div7", className="visible_div"),
html.Div(id="visible_div8", className="visible_div"),
html.Div(id="visible_div9", className="visible_div"),
html.Div(id="visible_div10", className="visible_div"),
html.Div(id="visible_div11", className="visible_div"),
], no_gutters=True),
])
], className="pipeline_sub_main")
], className="screen_division_right")
], className="pipeline_main")
def toggle_modal(n1, n2, n3, is_open):
if n1 or n2 or n3:
return not is_open
return is_open
app.callback(
Output("analysis_type_modal", "is_open"),
[Input("analysis_type", "n_clicks"), Input("analysis_type_close", "n_clicks"),
Input("analysis_type_save", "n_clicks")],
[State("analysis_type_modal", "is_open")])(toggle_modal)
app.callback(
Output("data_format_modal", "is_open"),
[Input("data_format", "n_clicks"), Input("data_format_close", "n_clicks"),
Input("data_format_save", "n_clicks")],
[State("data_format_modal", "is_open")])(toggle_modal)
app.callback(
Output("qc_modal", "is_open"),
[Input("qc", "n_clicks"), Input("qc_close", "n_clicks"),
Input("qc_save", "n_clicks")],
[State("qc_modal", "is_open")])(toggle_modal)
app.callback(
Output("trimming_modal", "is_open"),
[Input("trimming", "n_clicks"), Input("trimming_close", "n_clicks"),
Input("trimming_save", "n_clicks")],
[State("trimming_modal", "is_open")])(toggle_modal)
app.callback(
Output("trimming_modal1", "is_open"),
[Input("trimming1", "n_clicks"), Input("trimming_close1", "n_clicks"),
Input("trimming_save1", "n_clicks")],
[State("trimming_modal1", "is_open")])(toggle_modal)
app.callback(
Output("reference_genome_index_modal", "is_open"),
[Input("reference_genome_index", "n_clicks"), Input("reference_genome_index_close", "n_clicks"),
Input("reference_genome_index_save", "n_clicks")],
[State("reference_genome_index_modal", "is_open")])(toggle_modal)
app.callback(
Output("alignment_modal", "is_open"),
[Input("alignment", "n_clicks"), Input("alignment_close", "n_clicks"),
Input("alignment_save", "n_clicks")],
[State("alignment_modal", "is_open")])(toggle_modal)
app.callback(
Output("duplicates_modal", "is_open"),
[Input("duplicates", "n_clicks"), Input("duplicates_close", "n_clicks"),
Input("duplicates_save", "n_clicks")],
[State("duplicates_modal", "is_open")])(toggle_modal)
app.callback(
Output("duplicates_modal1", "is_open"),
[Input("duplicates1", "n_clicks"), Input("duplicates_close1", "n_clicks"),
Input("duplicates_save1", "n_clicks")],
[State("duplicates_modal1", "is_open")])(toggle_modal)
app.callback(
Output("BQSR_modal", "is_open"),
[Input("BQSR", "n_clicks"), Input("BQSR_close", "n_clicks"),
Input("BQSR_save", "n_clicks")],
[State("BQSR_modal", "is_open")])(toggle_modal)
app.callback(
Output("BQSR_modal1", "is_open"),
[Input("BQSR1", "n_clicks"), Input("BQSR_close1", "n_clicks"),
Input("BQSR_save1", "n_clicks")],
[State("BQSR_modal1", "is_open")])(toggle_modal)
app.callback(
Output("BQSR_modal2", "is_open"),
[Input("BQSR2", "n_clicks"), Input("BQSR_close2", "n_clicks"),
Input("BQSR_save2", "n_clicks")],
[State("BQSR_modal2", "is_open")])(toggle_modal)
app.callback(
Output("variant_calling_modal", "is_open"),
[Input("variant_calling", "n_clicks"), Input("variant_calling_close", "n_clicks"),
Input("variant_calling_save", "n_clicks")],
[State("variant_calling_modal", "is_open")])(toggle_modal)
app.callback(
Output("variant_calling_modal1", "is_open"),
[Input("variant_calling1", "n_clicks"), Input("variant_calling_close1", "n_clicks"),
Input("variant_calling_save1", "n_clicks")],
[State("variant_calling_modal1", "is_open")])(toggle_modal)
app.callback(
Output("variant_calling_modal2", "is_open"),
[Input("variant_calling2", "n_clicks"), Input("variant_calling_close2", "n_clicks"),
Input("variant_calling_save2", "n_clicks")],
[State("variant_calling_modal2", "is_open")])(toggle_modal)
app.callback(
Output("variant_filtration_modal", "is_open"),
[Input("variant_filtration", "n_clicks"), Input("variant_filtration_close", "n_clicks"),
Input("variant_filtration_save", "n_clicks")],
[State("variant_filtration_modal", "is_open")])(toggle_modal)
app.callback(
Output("variant_filtration_modal1", "is_open"),
[Input("variant_filtration1", "n_clicks"), Input("variant_filtration_close1", "n_clicks"),
Input("variant_filtration_save1", "n_clicks")],
[State("variant_filtration_modal1", "is_open")])(toggle_modal)
app.callback(
Output("variant_filtration_modal2", "is_open"),
[Input("variant_filtration2", "n_clicks"), Input("variant_filtration_close2", "n_clicks"),
Input("variant_filtration_save2", "n_clicks")],
[State("variant_filtration_modal2", "is_open")])(toggle_modal)
app.callback(
Output("computing_power_modal", "is_open"),
[Input("computing_power", "n_clicks"), Input("computing_power_close", "n_clicks"),
Input("computing_power_save", "n_clicks")],
[State("computing_power_modal", "is_open")])(toggle_modal)
app.callback(
Output("computing_power_modal1", "is_open"),
[Input("computing_power1", "n_clicks"), Input("computing_power_close1", "n_clicks"),
Input("computing_power_save1", "n_clicks")],
[State("computing_power_modal1", "is_open")])(toggle_modal)
app.callback(
Output("computing_power_modal2", "is_open"),
[Input("computing_power2", "n_clicks"), Input("computing_power_close2", "n_clicks"),
Input("computing_power_save2", "n_clicks")],
[State("computing_power_modal2", "is_open")])(toggle_modal)
@app.callback(
Output("collapse_bam", "is_open"),
[Input("data_format1", "value")],
[State("collapse_bam", "is_open")],
)
def toggle_collapse(n, is_open):
if n == "bam":
return not is_open
elif n == "fastq" or n == "fastq.gz":
return is_open
@app.callback(Output("visible_div1", "children"),
Input("analysis_type_save", "n_clicks"),
State("analysis_type_radio", "value"))
def invisible_div(n1, v1):
if n1 and v1:
return html.Div(cards("assets/img2.png", "Select Data Format", 'data_format'))
@app.callback([Output("visible_div2", "children"), Output('data-format-store', 'data')],
[Input("data_format_save", "n_clicks"),
Input("data_format1", "value"), Input("bam_switch", "value")],
[State("data_format1", "value"),
State("data_format2", "value")], prevent_initial_call=True)
def invisible_div(n1, value, s, v1, v2):
n_switches = len(s)
if value is not None:
data_format_list = [value]
data_format_df = pd.DataFrame({'value': data_format_list})
if value == "fastq" or value == "fastq.gz":
if n1 and v1 and v2:
return html.Div(cards("assets/img3.png", "Quality Control", 'qc')), \
data_format_df.to_json(date_format='iso', orient='split')
elif value == "bam":
if n_switches == 1:
if n1 and v1 and v2:
return html.Div(cards("assets/img8.png", "BQSR", 'BQSR')), \
data_format_df.to_json(date_format='iso', orient='split')
elif n_switches != 1:
if n1 and v1 and v2:
return html.Div(cards("assets/img4.png", "Read Manipulation/ Trimming", 'trimming')), \
data_format_df.to_json(date_format='iso', orient='split')
return html.Div(), None
@app.callback(Output("visible_div3", "children"),
[Input("qc_save", "n_clicks"), Input("BQSR_save", "n_clicks"), Input("trimming_save", "n_clicks"),
Input("data_format1", "value"), Input("bam_switch", "value")],
[State("qc_radio", "value"),
State("BQSR_radio", "value"),
State("trimming_radio", "value")])
def invisible_div(n1, n2, n3, value, s, v1, v2, v3):
n_switches = len(s)
if value == "fastq" or value == "fastq.gz":
if n1 and v1:
return html.Div(cards("assets/img4.png", "Read Manipulation/ Trimming", 'trimming1'))
elif value == "bam":
if n_switches == 1:
if n2 and v2:
return html.Div(cards("assets/img9.png", "Variant Calling", 'variant_calling'))
elif n_switches != 1:
if n3 and v3:
return html.Div(cards("assets/img7.png", "Mark Duplicates", 'duplicates'))
@app.callback(Output("visible_div4", "children"),
[Input("trimming_save1", "n_clicks"), Input("variant_calling_save", "n_clicks"),
Input("duplicates_save", "n_clicks"),
Input("data_format1", "value"), Input("bam_switch", "value")],
[State("trimming_radio1", "value"),
State("variant_calling_radio", "value"),
State("duplicates_radio", "value")])
def invisible_div(n1, n2, n3, value, s, v1, v2, v3):
n_switches = len(s)
if value == "fastq" or value == "fastq.gz":
if n1 and v1:
return html.Div(cards("assets/img5.png", "Reference Genome Index", 'reference_genome_index'))
elif value == "bam":
if n_switches == 1:
if n2 and v2:
return html.Div(cards("assets/img10.png", "Variant Filtration", 'variant_filtration'))
elif n_switches != 1:
if n3 and v3:
return html.Div(cards("assets/img8.png", "BQSR", 'BQSR1'))
@app.callback(Output("visible_div5", "children"),
[Input("reference_genome_index_save", "n_clicks"), Input("variant_filtration_save", "n_clicks"),
Input("BQSR_save1", "n_clicks"),
Input("data_format1", "value"), Input("bam_switch", "value")],
[State("reference_genome_tool_index_radio", "value"),
State("reference_genome_dataformat_index_radio", "value"),
State("variant_filtration_radio", "value"),
State("BQSR_radio1", "value")])
def invisible_div(n1, n2, n3, value, s, v1, v2, v3, v4):
n_switches = len(s)
if value == "fastq" or value == "fastq.gz":
if n1 and v1 and v2:
return html.Div(cards("assets/img6.png", "Alignment", 'alignment'))
elif value == "bam":
if n_switches == 1:
if n2 and v3:
return html.Div(cards("assets/img11.png", "Computing Power", 'computing_power'))
elif n_switches != 1:
if n3 and v4:
return html.Div(cards("assets/img9.png", "Variant Calling", 'variant_calling1'))
@app.callback([Output("visible_div6", "children"), Output('mapping-store', 'data')],
[Input("alignment_save", "n_clicks"), Input("computing_power_save", "n_clicks"),
Input("variant_calling_save1", "n_clicks"),
Input("data_format1", "value"), Input("bam_switch", "value")],
[State("alignment_radio", "value"),
State("computing_power_radio", "value"),
State("variant_calling_radio1", "value")])
def invisible_div(n1, n2, n3, value, s, v1, v2, v3):
n_switches = len(s)
if value == "fastq" or value == "fastq.gz":
mapping_df = pd.DataFrame({'value': [v1]})
if n1 and v1:
return html.Div(cards("assets/img7.png", "Mark Duplicates", 'duplicates1')), \
mapping_df.to_json(date_format='iso', orient='split')
elif value == "bam":
if n_switches == 1:
if n2 and v2:
return html.Div(
dbc.Button("Finish Setup", color="success", className="pipeline_finish_button",
href="/get_data")), None
elif n_switches != 1:
if n3 and v3:
return html.Div(cards("assets/img10.png", "Variant Filtration", 'variant_filtration1')), None
return html.Div(), None
@app.callback(Output("visible_div7", "children"),
[Input("duplicates_save1", "n_clicks"),
Input("variant_filtration_save1", "n_clicks"),
Input("data_format1", "value"), Input("bam_switch", "value")],
[State("duplicates_radio1", "value"),
State("variant_filtration_radio1", "value")])
def invisible_div(n1, n2, value, s, v1, v2):
n_switches = len(s)
if value == "fastq" or value == "fastq.gz":
if n1 and v1:
return html.Div(cards("assets/img8.png", "BQSR", 'BQSR2'))
elif value == "bam":
if n_switches == 1:
return
elif n_switches != 1:
if n2 and v2:
return html.Div(cards("assets/img11.png", "Computing Power", 'computing_power1'))
@app.callback(Output("visible_div8", "children"),
[Input("BQSR_save2", "n_clicks"),
Input("computing_power_save1", "n_clicks"),
Input("data_format1", "value"), Input("bam_switch", "value")],
[State("BQSR_radio2", "value"),
State("computing_power_radio1", "value")])
def invisible_div(n1, n2, value, s, v1, v2):
n_switches = len(s)
if value == "fastq" or value == "fastq.gz":
if n1 and v1:
return html.Div(cards("assets/img9.png", "Variant Calling", 'variant_calling2'))
elif value == "bam":
if n_switches == 1:
return
elif n_switches != 1:
if n2 and v2:
return html.Div(
dbc.Button("Finish Setup", color="success", className="pipeline_finish_button", href="/get_data"))
@app.callback(Output("visible_div9", "children"),
[Input("variant_calling_save2", "n_clicks"),
Input("data_format1", "value"), Input("bam_switch", "value")],
State("variant_calling_radio2", "value"))
def invisible_div(n1, value, s, v1):
n_switches = len(s)
if value == "fastq" or value == "fastq.gz":
if n1 and v1:
return html.Div(cards("assets/img10.png", "Variant Filtration", 'variant_filtration2'))
elif value == "bam":
if n_switches == 1:
return
elif n_switches != 1:
return
@app.callback(Output("visible_div10", "children"),
[Input("variant_filtration_save2", "n_clicks"),
Input("data_format1", "value"), Input("bam_switch", "value")],
State("variant_filtration_radio2", "value"))
def invisible_div(n1, value, s, v1):
n_switches = len(s)
if value == "fastq" or value == "fastq.gz":
if n1 and v1:
return html.Div(cards("assets/img11.png", "Computing Power", 'computing_power2'))
elif value == "bam":
if n_switches == 1:
return
elif n_switches != 1:
return
@app.callback(Output("visible_div11", "children"),
[Input("computing_power_save2", "n_clicks"),
Input("data_format1", "value"), Input("bam_switch", "value"),
Input("computing_power_radio2", "value")])
def invisible_div(n1, value, s, v1):
n_switches = len(s)
if value == "fastq" or value == "fastq.gz":
if n1 and v1:
return html.Div(
dbc.Button("Finish Setup", color="success", className="pipeline_finish_button", href="/get_data"))
elif value == "bam":
if n_switches == 1:
return
elif n_switches != 1:
return
@app.callback(
Output("pipeline_title_edit_modal", "is_open"),
[Input("pipeline_title_edit", "n_clicks"), Input("pipeline_title_edit_close", "n_clicks")],
[State("pipeline_title_edit_modal", "is_open")],
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(Output("pipeline_title", "children"), [Input("title_input", "value")])
def output_text(value):
if value is None:
return "Analysis"
return value
|
UTF-8
|
Python
| false
| false
| 35,639
|
py
| 38
|
create_pipeline.py
| 18
| 0.510312
| 0.495861
| 0
| 784
| 44.457908
| 118
|
Govind-Jangid/cams_biometrics
| 1,571,958,066,437
|
25a5903c53ac10870381e415bba1dae7660b9da4
|
5540f838b7d6437eb740cd3ce8b988168307d9e8
|
/setup.py
|
17b7fe1296a8e9b20ac92f5e123550e5d170b04b
|
[
"MIT"
] |
permissive
|
https://github.com/Govind-Jangid/cams_biometrics
|
9be994834ea320ace5f60105e44bcff6daac6cd0
|
a969902c7ad4aa5512807807ed662f16cd402716
|
refs/heads/master
| 2022-03-03T12:13:17.852911
| 2018-11-05T06:46:46
| 2018-11-05T06:46:46
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re, ast
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in cams_biometrics/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('cams_biometrics/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='cams_biometrics',
version=version,
description='API to post attendance from CAMS Biometrics',
author='earthians',
author_email='info@earthianslive.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
|
UTF-8
|
Python
| false
| false
| 719
|
py
| 6
|
setup.py
| 3
| 0.698192
| 0.694019
| 0
| 25
| 27.76
| 70
|
PCLC7Z2/self-sample
| 7,533,372,658,751
|
b5f499061558e762ccd99917281ac374daffba45
|
47c10ad5bd158de019d0c0200cf9e8d9a57eb94b
|
/options.py
|
a91d784815af85969ad248e96c0fb4cfd3ace250
|
[] |
no_license
|
https://github.com/PCLC7Z2/self-sample
|
1ee5721b80eb5adfbc320bcced6cf3795e0432f5
|
ad119c6fbef8fcf03e9fce4c1c2dbe61a85e2689
|
refs/heads/master
| 2023-07-08T20:29:46.156012
| 2021-08-15T13:43:57
| 2021-08-15T13:43:57
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import argparse
from pathlib import Path
import os
import util
import warnings
def get_parser(name='Self-Sampling') -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description=name)
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--iterations', default=100000, type=int)
parser.add_argument('--export-interval', default=1000, type=int)
parser.add_argument('--D1', default=5000, type=int)
parser.add_argument('--D2', default=5000, type=int)
parser.add_argument('--max-points', default=-1, type=int)
parser.add_argument('--save-path', type=Path, required=True)
parser.add_argument('--pc', type=str)
parser.add_argument('--batch-size', type=int, default=4)
parser.add_argument('--bn', action='store_true')
parser.add_argument('--stn', action='store_true')
parser.add_argument('--name', type=str, default='')
parser.add_argument('--init-var', default=-1.0, type=float)
parser.add_argument('--sampling-mode', default='uniform', type=str)
parser.add_argument('--p1', default=0.9, type=float)
parser.add_argument('--p2', default=-1.0, type=float)
parser.add_argument('--k', type=int, default=20)
parser.add_argument('--percentile', type=float, default=-1.0)
parser.add_argument('--ang-wt', type=float, default=0.1)
parser.add_argument('--force-normal-estimation', action='store_true')
parser.add_argument('--kmeans', action='store_true')
parser.add_argument('--mse', action='store_true')
parser.add_argument('--curvature-cache', type=str, default='')
return parser
def parse_args(parser: argparse.ArgumentParser, inference=False):
args = parser.parse_args()
if args.p2 == -1.0:
args.p2 = 1 - args.p1
if not os.path.exists(args.save_path):
Path.mkdir(args.save_path, exist_ok=True, parents=True)
if not inference:
Path.mkdir(args.save_path / 'exports', exist_ok=True, parents=True)
Path.mkdir(args.save_path / 'targets', exist_ok=True, parents=True)
Path.mkdir(args.save_path / 'sources', exist_ok=True, parents=True)
Path.mkdir(args.save_path / 'generators', exist_ok=True, parents=True)
with open(args.save_path / ('inference_args.txt' if inference else 'args.txt'), 'w+') as file:
file.write(util.args_to_str(args))
return args
|
UTF-8
|
Python
| false
| false
| 2,562
|
py
| 18
|
options.py
| 15
| 0.65847
| 0.638954
| 0
| 57
| 43.947368
| 98
|
crystallistic/wallbreakers-hw
| 6,957,847,047,840
|
dd5d4b71c8fdfe7dd854234f9dc4e3de355eedc5
|
21c9fcd90fbfa368b60d747cedf9d5fa0eb60d32
|
/week2/290.py
|
2310a254a33fbed478f2a123fb5f8cf4fbf343e1
|
[] |
no_license
|
https://github.com/crystallistic/wallbreakers-hw
|
0a2cff6d08b18b7705fc93565020fdf0d510c44e
|
e6e66e396d42662bba5c3042b4cf337a2506fa83
|
refs/heads/master
| 2020-06-08T06:00:35.744870
| 2019-08-05T05:07:31
| 2019-08-05T05:07:31
| 193,172,596
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
class Solution(object):
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
words = str.split()
if len(pattern) != len(words): return False
seenValues = set()
lookup = dict()
for c,w in zip(pattern,words):
if (c in lookup and lookup[c] != w) or (c not in lookup and w in seenValues):
return False
else:
lookup[c]= w
seenValues.add(w)
return True
|
UTF-8
|
Python
| false
| false
| 553
|
py
| 66
|
290.py
| 65
| 0.484629
| 0.484629
| 0
| 18
| 29.777778
| 89
|
amd840/harajBeta
| 13,640,816,144,128
|
1d5fa9c563a933e349d73dac72ecc79d993bff4d
|
12837a0f5bb21a456e0a16d7a4553d3a9e77db18
|
/polls/migrations/0006_auto_20200802_0921.py
|
aae14510805112828e23eafbb0a8d75fbb25f5ae
|
[] |
no_license
|
https://github.com/amd840/harajBeta
|
0e4f503294e2e38db85e517867466290fb1d3e50
|
d9036438055e3f3f54b2ccfd79769eea70450d25
|
refs/heads/main
| 2023-05-01T14:42:49.920376
| 2021-05-20T20:39:14
| 2021-05-20T20:39:14
| 341,306,450
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 3.0.8 on 2020-08-02 09:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('polls', '0005_product_product_seller'),
]
operations = [
migrations.RemoveField(
model_name='cart',
name='product_seller',
),
migrations.RemoveField(
model_name='orders',
name='product_buyer',
),
migrations.AddField(
model_name='cart',
name='product_name',
field=models.TextField(max_length=200, null=True),
),
migrations.AddField(
model_name='cart',
name='seller',
field=models.TextField(max_length=200, null=True),
),
migrations.AddField(
model_name='orders',
name='buyer',
field=models.TextField(max_length=200, null=True),
),
migrations.AddField(
model_name='user',
name='cart',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='polls.Cart'),
),
migrations.AddField(
model_name='user',
name='order',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='polls.Orders'),
),
migrations.AlterField(
model_name='cart',
name='product_qentity',
field=models.IntegerField(default=1),
),
migrations.AlterField(
model_name='cart',
name='state',
field=models.IntegerField(default=1),
),
]
|
UTF-8
|
Python
| false
| false
| 1,700
|
py
| 49
|
0006_auto_20200802_0921.py
| 26
| 0.542353
| 0.524706
| 0
| 57
| 28.824561
| 111
|
karoonakar/ML
| 10,050,223,475,981
|
efead633d2514d2882019c27fcfc64ecd2a2c177
|
0f78cb7079b45e4f1dc8734c6f2c24f6720e4a1e
|
/Notebook/exception_classifier.py
|
3ee6d0b4e293108be0baa03b4af0ebfb3ee481ca
|
[] |
no_license
|
https://github.com/karoonakar/ML
|
53cb274e98e026a9676f7148851db3d0c935657d
|
a669e593494b8afac50a15fd9e7b2265d4aba9db
|
refs/heads/master
| 2022-10-30T08:42:22.964185
| 2018-08-27T20:16:20
| 2018-08-27T20:16:20
| 145,786,869
| 0
| 1
| null | false
| 2022-10-07T03:03:54
| 2018-08-23T02:06:35
| 2018-08-27T20:17:14
| 2018-08-27T20:17:13
| 8,427
| 0
| 1
| 1
|
Python
| false
| false
|
import pandas as pd
from sklearn import tree
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
path = 'data/exception_data.csv'
dataFrame = pd.read_csv(path)
df_master=dataFrame[["Module", "Language","Exception","CausedBy1","CausedBy2"]]
df_lable=dataFrame[["Severity"]]
df_message=dataFrame[["Message"]]
df_causedBy1Msg=dataFrame[["CausedBy1Msg"]]
df_causedBy2Msg=dataFrame[["CausedBy2Msg"]]
vect_msg = CountVectorizer(ngram_range=(2, 3))
msg_train_dtm = vect_msg.fit_transform(df_message.Message)
message_trigramed_frame= pd.DataFrame(msg_train_dtm.toarray(),columns=vect_msg.get_feature_names())
vect_causedby1_msg = CountVectorizer(ngram_range=(2, 3))
msg_causedby1_train_dtm = vect_causedby1_msg.fit_transform(df_causedBy1Msg.CausedBy1Msg)
causedby1_msg_trigramed_frame=pd.DataFrame(msg_causedby1_train_dtm.toarray(),columns=vect_causedby1_msg.get_feature_names())
vect_causedby2_msg = CountVectorizer(ngram_range=(2, 3))
msg_causedby2_train_dtm = vect_causedby2_msg.fit_transform(df_causedBy2Msg.CausedBy2Msg)
causedby2_msg_trigramed_frame=pd.DataFrame(msg_causedby2_train_dtm.toarray(),columns=vect_causedby2_msg.get_feature_names())
df1=pd.merge(df_master, message_trigramed_frame, left_index=True, right_index=True)
df2=pd.merge(df1, causedby1_msg_trigramed_frame, left_index=True, right_index=True)
df3=pd.merge(df2, causedby2_msg_trigramed_frame, left_index=True, right_index=True)
print(df3)
print(df3.shape)
print(df_lable.shape)
#Decision Tree
dtClassifier = tree.DecisionTreeClassifier()
dtClassifier.fit(df3, df_lable)
#Testing
exception_df = pd.DataFrame({'Module':[11],'Language':[100],'Exception':[10019],'CausedBy1' : [0],'CausedBy2' : [0],'Message' : ['could not execute native bulk manipulation query at'], 'CausedBy1Msg' : ['NoCausedBy oneMsg'], 'CausedBy2Msg' : ['NoCausedBy twoMsg']})
df_master_test=exception_df[["Module", "Language","Exception","CausedBy1","CausedBy2"]]
df_message_test=exception_df[["Message"]]
df_causedBy1Msg_test=exception_df[["CausedBy1Msg"]]
df_causedBy2Msg_test=exception_df[["CausedBy2Msg"]]
msg_test_dtm = vect_msg.transform(df_message_test.Message)
msg_test_causedby1_dtm = vect_causedby1_msg.transform(df_causedBy1Msg_test.CausedBy1Msg)
msg_test_causedby2_dtm = vect_causedby2_msg.transform(df_causedBy2Msg_test.CausedBy2Msg)
test_msg_trigramed_frame= pd.DataFrame(msg_test_dtm.toarray(),columns=vect_msg.get_feature_names())
test_causedby1_msg_trigramed_frame=pd.DataFrame(msg_test_causedby1_dtm.toarray(),columns=vect_causedby1_msg.get_feature_names())
test_causedby2_msg_trigramed_frame=pd.DataFrame(msg_test_causedby2_dtm.toarray(),columns=vect_causedby2_msg.get_feature_names())
test_df1=pd.merge(df_master_test, test_msg_trigramed_frame, left_index=True, right_index=True)
test_df2=pd.merge(test_df1, test_causedby1_msg_trigramed_frame, left_index=True, right_index=True)
test_df3=pd.merge(test_df2, test_causedby2_msg_trigramed_frame, left_index=True, right_index=True)
print(test_df3)
print(dtClassifier.predict(test_df3))
|
UTF-8
|
Python
| false
| false
| 3,058
|
py
| 15
|
exception_classifier.py
| 10
| 0.77894
| 0.751799
| 0
| 62
| 48.322581
| 265
|
paulhzq/cs61a
| 2,989,297,277,719
|
2f673f8886012d4bd5c08b5ebe9139cd7388c2c6
|
108034973f9046a7603d5fe3f26c59b20a7e68da
|
/homework/hw12/tests/find.py
|
e6dc78ba21e592d75abdea7a11cd26190efdae02
|
[] |
no_license
|
https://github.com/paulhzq/cs61a
|
b1b1387cefbaaf1823c02d535891db7d085f3b04
|
9eee13df9ad113591dc55d106561951cea34abc5
|
refs/heads/master
| 2020-05-23T08:16:14.193086
| 2017-01-15T02:06:18
| 2017-01-15T02:06:18
| 70,255,875
| 8
| 8
| null | null | null | null | null | null | null | null | null | null | null | null | null |
test = {
'name': 'find',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
scm> (find m even?)
2
scm> (find m (lambda (x) (= x 3)))
False
scm> (find m (lambda (x) (= x 1)))
1
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
scm> (load 'hw12)
scm> (define m (cons-stream 1 (cons-stream 2 nil)))
""",
'teardown': '',
'type': 'scheme'
}
]
}
|
UTF-8
|
Python
| false
| false
| 551
|
py
| 64
|
find.py
| 60
| 0.344828
| 0.328494
| 0
| 29
| 18
| 57
|
awesome-liuxiao/leetcodesolution
| 2,800,318,694,290
|
489ef4720ebf4017465747cf45b531f0ab1fdcae
|
e2468c60810764971f2dae2b959650b553042810
|
/796_rotateStr.py
|
e827d3c541a6852bc1ab7fff30319c7fe8cc82e5
|
[] |
no_license
|
https://github.com/awesome-liuxiao/leetcodesolution
|
9a01b6f36266149ae7fe00625785d1ada41f190a
|
3637cd1347b5153daeeb855ebc44cfea5649fc90
|
refs/heads/master
| 2023-06-08T13:42:14.653688
| 2023-06-01T08:39:35
| 2023-06-01T08:39:35
| 213,380,224
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
class Solution:
def rotateString(self, s: str, goal: str) -> bool:
if s == goal:
return True
sLen = len(s)
def rotateOnce(tmpS: str, sLen: int) -> str:
if sLen == 1:
return tmpS
tail = tmpS[0]
tmpL = list(tmpS)
for i in range(sLen-1):
tmpL[i] = tmpL[i+1]
tmpL[sLen-1] = tail
tmpS = ''.join(tmpL)
return tmpS
for i in range(sLen):
s = rotateOnce(s, sLen)
# print("cur s: "+ s + ", goal: " + goal)
if s == goal:
return True
return False
X = Solution()
s = "abcde"
goal = "cdeab"
print(X.rotateString(s, goal))
s = "abcde"
goal = "abced"
print(X.rotateString(s, goal))
s = "abc"
goal = "cba"
print(X.rotateString(s, goal))
s = "ab"
goal = "ba"
print(X.rotateString(s, goal))
s = "a"
goal = "b"
print(X.rotateString(s, goal))
|
UTF-8
|
Python
| false
| false
| 969
|
py
| 176
|
796_rotateStr.py
| 173
| 0.474716
| 0.469556
| 0
| 44
| 21.045455
| 54
|
yifaan/CIT-590
| 2,619,930,095,578
|
c9fd19f90c1dcc3cb4148b2729d052969c3f89b9
|
5e1ee67ddad12cc44b1e5270b2b2d2e0df686bd7
|
/homework5/makeWebsite_tests.py
|
6a0e0ffde76e4847ae244462fd3017194ef3f1ea
|
[] |
no_license
|
https://github.com/yifaan/CIT-590
|
7b3d60f0cb5abbfabcf5ba7d360f89c09ce07999
|
69d6a793d99cbf0a196cc1dc464a1be0ab434d89
|
refs/heads/master
| 2020-06-14T16:03:16.078494
| 2015-04-08T23:10:23
| 2015-04-08T23:10:23
| 29,832,003
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from makeWebsite import *
import unittest
class makeWebtestcase(unittest.TestCase):
def setUp(self):
self.resume = 'resume.txt'
self.newresume = 'resume_unittest.txt'
def testGetName(self):
self.assertEqual(GetName(self.resume), 'Yifan Yang')
self.assertRaises(NameError, GetName, self.newresume)
def testGetEmail(self):
self.assertEqual(GetEmail(self.resume), 'Yifany@seas.upenn.edu')
self.assertRaises(NameError, GetEmail, self.newresume)
def testGetCourse(self):
course = ['Programming Languages and Techniques',
'Feedback Control', 'Advanced Robotics']
self.assertEqual(GetCourse(self.resume), course)
def testGetProjects(self):
projects = [
'Robockey - A robot hockey competition of MEAM 510 Mechatronics class']
self.assertEqual(GetProjects(self.resume), projects)
def testGetEducation(self):
Education = ['University of Pennsylvania, Philadelphia, PA, USA - Master of Science in Robotics',
'University of Birmingham, Birmingham, UK - Bachelor of Engineering in Mechanical Engineering',
'Huazhong University of Science and Technology, Wuhan, China - Bachelor of Engineering in Mechanical Engineering']
self.assertEqual(GetEducation(self.resume), Education)
def testIntro(self):
self.assertEqual(intro('Yifan Yang', 'yifany@seas.upenn.edu'),
'<h1 >\nYifan Yang\n</h1>\n<p>\nyifany@seas.upenn.edu\n</p>')
def testeduhtml(self):
self.assertEqual(Eduhtml(['University']),'<h2>Education</h2>\n<ul>\n<li>\nUniversity\n</li>\n</ul>')
def testProjecthtml(self):
self.assertEqual(Projecthtml(['Proj']),'<h2>\nProjects\n</h2>\n<ul>\n<li>\n<p>\nProj\n</p>\n</li>\n</ul>')
def testCoursehtml(self):
self.assertEqual(Courseshtml(['a','b']),'<h3>\nCourses\n</h3>\n<span>\na, b\n</span>')
unittest.main()
|
UTF-8
|
Python
| false
| false
| 1,979
|
py
| 28
|
makeWebsite_tests.py
| 21
| 0.651844
| 0.646286
| 0
| 49
| 39.387755
| 137
|
flatironinstitute/binderhub
| 1,812,476,226,212
|
127ef50cdc51ed7e67e3dff21cd9175a08363a7a
|
49ad3bc3c96128e8f8b23d8b5aed06e7fbf03fa9
|
/binderhub/repoproviders.py
|
5269877aec55f56a6ea2408e343201014bd9dfb6
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
https://github.com/flatironinstitute/binderhub
|
27ef1b58cf1a17b8ed254a683cd92102dc1b323d
|
e7da1b6dc9036fbb774043c321e847744abd6ffa
|
refs/heads/main
| 2023-06-12T00:45:18.645048
| 2023-06-03T23:15:18
| 2023-06-03T23:15:18
| 175,685,187
| 1
| 1
|
BSD-3-Clause
| true
| 2022-11-09T01:58:37
| 2019-03-14T19:19:38
| 2022-01-10T17:17:18
| 2022-11-09T01:58:36
| 7,570
| 1
| 2
| 0
|
Python
| false
| false
|
"""
Classes for Repo providers.
Subclass the base class, ``RepoProvider``, to support different version
control services and providers.
.. note:: When adding a new repo provider, add it to the allowed values for
repo providers in event-schemas/launch.json.
"""
import asyncio
import json
import os
import re
import time
import urllib.parse
from datetime import datetime, timedelta, timezone
from urllib.parse import urlparse
from stat import S_ISDIR, S_ISREG, S_IROTH, S_IXOTH
import string
import yaml
import escapism
from prometheus_client import Gauge
from tornado.httpclient import AsyncHTTPClient, HTTPError, HTTPRequest
from tornado.httputil import url_concat
from traitlets import Bool, Dict, List, Set, Unicode, default
from traitlets.config import LoggingConfigurable
from kubernetes import client
from jupyterhub.traitlets import ByteSpecification
from .utils import Cache
GITHUB_RATE_LIMIT = Gauge(
"binderhub_github_rate_limit_remaining", "GitHub rate limit remaining"
)
SHA1_PATTERN = re.compile(r"[0-9a-f]{40}")
GIT_SSH_PATTERN = re.compile(r"([\w\-]+@[\w\-\.]+):(.+)", re.IGNORECASE)
def tokenize_spec(spec):
"""Tokenize a GitHub-style spec into parts, error if spec invalid."""
spec_parts = spec.split("/", 2) # allow ref to contain "/"
if len(spec_parts) != 3:
msg = f'Spec is not of the form "user/repo/ref", provided: "{spec}".'
if len(spec_parts) == 2 and spec_parts[-1] not in {"main", "master", "HEAD"}:
msg += f' Did you mean "{spec}/HEAD"?'
raise ValueError(msg)
return spec_parts
def strip_suffix(text, suffix):
if text.endswith(suffix):
text = text[: -(len(suffix))]
return text
class RepoProvider(LoggingConfigurable):
"""Base class for a repo provider"""
name = Unicode(
help="""
Descriptive human readable name of this repo provider.
"""
)
spec = Unicode(
help="""
The spec for this builder to parse
"""
)
banned_specs = List(
help="""
List of specs to blacklist building.
Should be a list of regexes (not regex objects) that match specs which should be blacklisted
""",
config=True,
)
high_quota_specs = List(
help="""
List of specs to assign a higher quota limit.
Should be a list of regexes (not regex objects) that match specs which should have a higher quota
""",
config=True,
)
spec_config = List(
help="""
List of dictionaries that define per-repository configuration.
Each item in the list is a dictionary with two keys:
pattern : string
defines a regex pattern (not a regex object) that matches specs.
config : dict
a dictionary of "config_name: config_value" pairs that will be
applied to any repository that matches `pattern`
""",
config=True,
)
unresolved_ref = Unicode()
git_credentials = Unicode(
"",
help="""
Credentials (if any) to pass to git when cloning.
""",
config=True,
)
def is_banned(self):
"""
Return true if the given spec has been banned
"""
for banned in self.banned_specs:
# Ignore case, because most git providers do not
# count DS-100/textbook as different from ds-100/textbook
if re.match(banned, self.spec, re.IGNORECASE):
return True
return False
def has_higher_quota(self):
"""
Return true if the given spec has a higher quota
"""
for higher_quota in self.high_quota_specs:
# Ignore case, because most git providers do not
# count DS-100/textbook as different from ds-100/textbook
if re.match(higher_quota, self.spec, re.IGNORECASE):
return True
return False
def repo_config(self, settings):
"""
Return configuration for this repository.
"""
repo_config = {}
# Defaults and simple overrides
if self.has_higher_quota():
repo_config["quota"] = settings.get("per_repo_quota_higher")
else:
repo_config["quota"] = settings.get("per_repo_quota")
# Spec regex-based configuration
for item in self.spec_config:
pattern = item.get("pattern", None)
config = item.get("config", None)
if not isinstance(pattern, str):
raise ValueError(
"Spec-pattern configuration expected "
"a regex pattern string, not "
f"type {type(pattern)}"
)
if not isinstance(config, dict):
raise ValueError(
"Spec-pattern configuration expected "
"a specification configuration dict, not "
f"type {type(config)}"
)
# Ignore case, because most git providers do not
# count DS-100/textbook as different from ds-100/textbook
if re.match(pattern, self.spec, re.IGNORECASE):
repo_config.update(config)
return repo_config
async def get_resolved_ref(self):
raise NotImplementedError("Must be overridden in child class")
async def get_resolved_spec(self):
"""Return the spec with resolved ref."""
raise NotImplementedError("Must be overridden in child class")
def get_repo_url(self):
"""Return the git clone-able repo URL"""
raise NotImplementedError("Must be overridden in the child class")
async def get_resolved_ref_url(self):
"""Return the URL of repository at this commit in history"""
raise NotImplementedError("Must be overridden in child class")
def get_build_slug(self):
"""Return a unique build slug"""
raise NotImplementedError("Must be overriden in the child class")
def get_launch_options(self):
return
def check_hub_user(self, user):
return True
@staticmethod
def is_valid_sha1(sha1):
return bool(SHA1_PATTERN.match(sha1))
class FakeProvider(RepoProvider):
"""Fake provider for local testing of the UI"""
labels = {
"text": "Fake Provider",
"tag_text": "Fake Ref",
"ref_prop_disabled": True,
"label_prop_disabled": True,
}
async def get_resolved_ref(self):
return "1a2b3c4d5e6f"
async def get_resolved_spec(self):
return "fake/repo/1a2b3c4d5e6f"
def get_repo_url(self):
return "https://example.com/fake/repo.git"
async def get_resolved_ref_url(self):
return "https://example.com/fake/repo/tree/1a2b3c4d5e6f"
def get_build_slug(self):
return "{user}-{repo}".format(user="Rick", repo="Morty")
class ZenodoProvider(RepoProvider):
"""Provide contents of a Zenodo record
Users must provide a spec consisting of the Zenodo DOI.
"""
name = Unicode("Zenodo")
display_name = "Zenodo DOI"
labels = {
"text": "Zenodo DOI (10.5281/zenodo.3242074)",
"tag_text": "Git ref (branch, tag, or commit)",
"ref_prop_disabled": True,
"label_prop_disabled": True,
}
async def get_resolved_ref(self):
client = AsyncHTTPClient()
req = HTTPRequest(f"https://doi.org/{self.spec}", user_agent="BinderHub")
r = await client.fetch(req)
self.record_id = r.effective_url.rsplit("/", maxsplit=1)[1]
return self.record_id
async def get_resolved_spec(self):
if not hasattr(self, "record_id"):
self.record_id = await self.get_resolved_ref()
# zenodo registers a DOI which represents all versions of a software package
# and it always resolves to latest version
# for that case, we have to replace the version number in DOIs with
# the specific (resolved) version (record_id)
resolved_spec = self.spec.split("zenodo")[0] + "zenodo." + self.record_id
return resolved_spec
def get_repo_url(self):
# While called repo URL, the return value of this function is passed
# as argument to repo2docker, hence we return the spec as is.
return self.spec
async def get_resolved_ref_url(self):
resolved_spec = await self.get_resolved_spec()
return f"https://doi.org/{resolved_spec}"
def get_build_slug(self):
return f"zenodo-{self.record_id}"
class FigshareProvider(RepoProvider):
"""Provide contents of a Figshare article
Users must provide a spec consisting of the Figshare DOI.
"""
name = Unicode("Figshare")
display_name = "Figshare DOI"
url_regex = re.compile(r"(.*)/articles/([^/]+)/([^/]+)/(\d+)(/)?(\d+)?")
labels = {
"text": "Figshare DOI (10.6084/m9.figshare.9782777.v1)",
"tag_text": "Git ref (branch, tag, or commit)",
"ref_prop_disabled": True,
"label_prop_disabled": True,
}
async def get_resolved_ref(self):
client = AsyncHTTPClient()
req = HTTPRequest(f"https://doi.org/{self.spec}", user_agent="BinderHub")
r = await client.fetch(req)
match = self.url_regex.match(r.effective_url)
article_id = match.groups()[3]
article_version = match.groups()[5]
if not article_version:
article_version = "1"
self.record_id = f"{article_id}.v{article_version}"
return self.record_id
async def get_resolved_spec(self):
if not hasattr(self, "record_id"):
self.record_id = await self.get_resolved_ref()
# spec without version is accepted as version 1 - check get_resolved_ref method
# for that case, we have to replace the version number in DOIs with
# the specific (resolved) version (record_id)
resolved_spec = self.spec.split("figshare")[0] + "figshare." + self.record_id
return resolved_spec
def get_repo_url(self):
# While called repo URL, the return value of this function is passed
# as argument to repo2docker, hence we return the spec as is.
return self.spec
async def get_resolved_ref_url(self):
resolved_spec = await self.get_resolved_spec()
return f"https://doi.org/{resolved_spec}"
def get_build_slug(self):
return f"figshare-{self.record_id}"
class DataverseProvider(RepoProvider):
name = Unicode("Dataverse")
display_name = "Dataverse DOI"
labels = {
"text": "Dataverse DOI (10.7910/DVN/TJCLKP)",
"tag_text": "Git ref (branch, tag, or commit)",
"ref_prop_disabled": True,
"label_prop_disabled": True,
}
async def get_resolved_ref(self):
client = AsyncHTTPClient()
req = HTTPRequest(f"https://doi.org/{self.spec}", user_agent="BinderHub")
r = await client.fetch(req)
search_url = urllib.parse.urlunparse(
urllib.parse.urlparse(r.effective_url)._replace(
path="/api/datasets/:persistentId"
)
)
req = HTTPRequest(search_url, user_agent="BinderHub")
r = await client.fetch(req)
resp = json.loads(r.body)
assert resp["status"] == "OK"
self.identifier = resp["data"]["identifier"]
self.record_id = "{datasetId}.v{major}.{minor}".format(
datasetId=resp["data"]["id"],
major=resp["data"]["latestVersion"]["versionNumber"],
minor=resp["data"]["latestVersion"]["versionMinorNumber"],
)
# NOTE: data.protocol should be potentially prepended here
# {protocol}:{authority}/{identifier}
self.resolved_spec = "{authority}/{identifier}".format(
authority=resp["data"]["authority"],
identifier=resp["data"]["identifier"],
)
self.resolved_ref_url = resp["data"]["persistentUrl"]
return self.record_id
async def get_resolved_spec(self):
if not hasattr(self, "resolved_spec"):
await self.get_resolved_ref()
return self.resolved_spec
async def get_resolved_ref_url(self):
if not hasattr(self, "resolved_ref_url"):
await self.get_resolved_ref()
return self.resolved_ref_url
def get_repo_url(self):
# While called repo URL, the return value of this function is passed
# as argument to repo2docker, hence we return the spec as is.
return self.spec
def get_build_slug(self):
return "dataverse-" + escapism.escape(self.identifier, escape_char="-").lower()
class HydroshareProvider(RepoProvider):
"""Provide contents of a Hydroshare resource
Users must provide a spec consisting of the Hydroshare resource id.
"""
name = Unicode("Hydroshare")
display_name = "Hydroshare resource"
url_regex = re.compile(r".*([0-9a-f]{32}).*")
labels = {
"text": "Hydroshare resource id or URL",
"tag_text": "Git ref (branch, tag, or commit)",
"ref_prop_disabled": True,
"label_prop_disabled": True,
}
def _parse_resource_id(self, spec):
match = self.url_regex.match(spec)
if not match:
raise ValueError("The specified Hydroshare resource id was not recognized.")
resource_id = match.groups()[0]
return resource_id
async def get_resolved_ref(self):
client = AsyncHTTPClient()
self.resource_id = self._parse_resource_id(self.spec)
req = HTTPRequest(
f"https://www.hydroshare.org/hsapi/resource/{self.resource_id}/scimeta/elements",
user_agent="BinderHub",
)
r = await client.fetch(req)
def parse_date(json_body):
json_response = json.loads(json_body)
date = next(
item for item in json_response["dates"] if item["type"] == "modified"
)["start_date"]
# Hydroshare timestamp always returns the same timezone, so strip it
date = date.split(".")[0]
parsed_date = datetime.strptime(date, "%Y-%m-%dT%H:%M:%S")
epoch = parsed_date.replace(tzinfo=timezone(timedelta(0))).timestamp()
# truncate the timestamp
return str(int(epoch))
# date last updated is only good for the day... probably need something finer eventually
self.record_id = f"{self.resource_id}.v{parse_date(r.body)}"
return self.record_id
async def get_resolved_spec(self):
# Hydroshare does not provide a history, resolves to repo url
return self.get_repo_url()
async def get_resolved_ref_url(self):
# Hydroshare does not provide a history, resolves to repo url
return self.get_repo_url()
def get_repo_url(self):
self.resource_id = self._parse_resource_id(self.spec)
return f"https://www.hydroshare.org/resource/{self.resource_id}"
def get_build_slug(self):
return f"hydroshare-{self.record_id}"
class GitRepoProvider(RepoProvider):
"""Bare bones git repo provider.
Users must provide a spec of the following form.
<url-escaped-namespace>/<unresolved_ref>
<url-escaped-namespace>/<resolved_ref>
eg:
https%3A%2F%2Fgithub.com%2Fbinder-examples%2Fconda/main
https%3A%2F%2Fgithub.com%2Fbinder-examples%2Fconda/034931911e853252322f2309f1246a4f1076fd7d
This provider is typically used if you are deploying binderhub yourself and you require access to repositories that
are not in one of the supported providers.
"""
name = Unicode("Git")
display_name = "Git repository"
labels = {
"text": "Arbitrary git repository URL (http://git.example.com/repo)",
"tag_text": "Git ref (branch, tag, or commit)",
"ref_prop_disabled": False,
"label_prop_disabled": False,
}
allowed_protocols = Set(
Unicode(),
default_value={
"http",
"https",
"git",
"ssh",
},
config=True,
help="""Specify allowed git protocols. Default: http[s], git, ssh.""",
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.escaped_url, unresolved_ref = self.spec.split("/", 1)
self.repo = urllib.parse.unquote(self.escaped_url)
# handle `git@github.com:path` git ssh url, map to standard url format
ssh_match = GIT_SSH_PATTERN.match(self.repo)
if ssh_match:
user_host, path = ssh_match.groups()
self.repo = f"ssh://{user_host}/{path}"
proto = urlparse(self.repo).scheme
if proto not in self.allowed_protocols:
raise ValueError(
f"Unsupported git url {self.repo}, protocol {proto} not in {', '.join(self.allowed_protocols)}"
)
self.unresolved_ref = urllib.parse.unquote(unresolved_ref)
if not self.unresolved_ref:
raise ValueError(
"`unresolved_ref` must be specified in the url for the basic git provider"
)
async def get_resolved_ref(self):
if hasattr(self, "resolved_ref"):
return self.resolved_ref
if self.is_valid_sha1(self.unresolved_ref):
# The ref already was a valid SHA hash
self.resolved_ref = self.unresolved_ref
else:
# The ref is a head/tag and we resolve it using `git ls-remote`
command = ["git", "ls-remote", "--", self.repo, self.unresolved_ref]
proc = await asyncio.create_subprocess_exec(
*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await proc.communicate()
retcode = await proc.wait()
if retcode:
raise RuntimeError(
f"Unable to run git ls-remote to get the `resolved_ref`: {stderr.decode()}"
)
if not stdout:
return None
resolved_ref = stdout.decode().split(None, 1)[0]
if not self.is_valid_sha1(resolved_ref):
raise ValueError(
f"resolved_ref {resolved_ref} is not a valid sha1 hexadecimal hash"
)
self.resolved_ref = resolved_ref
return self.resolved_ref
async def get_resolved_spec(self):
if not hasattr(self, "resolved_ref"):
self.resolved_ref = await self.get_resolved_ref()
return f"{self.escaped_url}/{self.resolved_ref}"
def get_repo_url(self):
return self.repo
async def get_resolved_ref_url(self):
# not possible to construct ref url of unknown git provider
return self.get_repo_url()
def get_build_slug(self):
return self.repo
class GitLabRepoProvider(RepoProvider):
"""GitLab provider.
GitLab allows nested namespaces (eg. root/project/component/repo) thus we need to urlescape the namespace of this
repo. Users must provide a spec that matches the following form.
<url-escaped-namespace>/<unresolved_ref>
eg:
group%2Fproject%2Frepo/main
"""
name = Unicode("GitLab")
display_name = "GitLab.com"
hostname = Unicode(
"gitlab.com",
config=True,
help="""The host of the GitLab instance
For personal GitLab servers.
""",
)
access_token = Unicode(
config=True,
help="""GitLab OAuth2 access token for authentication with the GitLab API
For use with client_secret.
Loaded from GITLAB_ACCESS_TOKEN env by default.
""",
)
@default("access_token")
def _access_token_default(self):
return os.getenv("GITLAB_ACCESS_TOKEN", "")
private_token = Unicode(
config=True,
help="""GitLab private token for authentication with the GitLab API
Loaded from GITLAB_PRIVATE_TOKEN env by default.
""",
)
@default("private_token")
def _private_token_default(self):
return os.getenv("GITLAB_PRIVATE_TOKEN", "")
auth = Dict(
help="""Auth parameters for the GitLab API access
Populated from access_token, private_token
"""
)
@default("auth")
def _default_auth(self):
auth = {}
for key in ("access_token", "private_token"):
value = getattr(self, key)
if value:
auth[key] = value
return auth
@default("git_credentials")
def _default_git_credentials(self):
if self.private_token:
return rf"username=binderhub\npassword={self.private_token}"
return ""
labels = {
"text": "GitLab.com repository or URL",
"tag_text": "Git ref (branch, tag, or commit)",
"ref_prop_disabled": False,
"label_prop_disabled": False,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.quoted_namespace, unresolved_ref = self.spec.split("/", 1)
self.namespace = urllib.parse.unquote(self.quoted_namespace)
self.unresolved_ref = urllib.parse.unquote(unresolved_ref)
if not self.unresolved_ref:
raise ValueError("An unresolved ref is required")
async def get_resolved_ref(self):
if hasattr(self, "resolved_ref"):
return self.resolved_ref
namespace = urllib.parse.quote(self.namespace, safe="")
client = AsyncHTTPClient()
api_url = "https://{hostname}/api/v4/projects/{namespace}/repository/commits/{ref}".format(
hostname=self.hostname,
namespace=namespace,
ref=urllib.parse.quote(self.unresolved_ref, safe=""),
)
self.log.debug("Fetching %s", api_url)
if self.auth:
# Add auth params. After logging!
api_url = url_concat(api_url, self.auth)
try:
resp = await client.fetch(api_url, user_agent="BinderHub")
except HTTPError as e:
if e.code == 404:
return None
else:
raise
ref_info = json.loads(resp.body.decode("utf-8"))
self.resolved_ref = ref_info["id"]
return self.resolved_ref
async def get_resolved_spec(self):
if not hasattr(self, "resolved_ref"):
self.resolved_ref = await self.get_resolved_ref()
return f"{self.quoted_namespace}/{self.resolved_ref}"
def get_build_slug(self):
# escape the name and replace dashes with something else.
return "-".join(p.replace("-", "_-") for p in self.namespace.split("/"))
def get_repo_url(self):
return f"https://{self.hostname}/{self.namespace}.git"
async def get_resolved_ref_url(self):
if not hasattr(self, "resolved_ref"):
self.resolved_ref = await self.get_resolved_ref()
return f"https://{self.hostname}/{self.namespace}/tree/{self.resolved_ref}"
class GitHubRepoProvider(RepoProvider):
"""Repo provider for the GitHub service"""
name = Unicode("GitHub")
display_name = "GitHub"
# shared cache for resolved refs
cache = Cache(1024)
# separate cache with max age for 404 results
# 404s don't have ETags, so we want them to expire at some point
# to avoid caching a 404 forever since e.g. a missing repo or branch
# may be created later
cache_404 = Cache(1024, max_age=300)
hostname = Unicode(
"github.com",
config=True,
help="""The GitHub hostname to use
Only necessary if not github.com,
e.g. GitHub Enterprise.
""",
)
api_base_path = Unicode(
"https://api.{hostname}",
config=True,
help="""The base path of the GitHub API
Only necessary if not github.com,
e.g. GitHub Enterprise.
Can use {hostname} for substitution,
e.g. 'https://{hostname}/api/v3'
""",
)
client_id = Unicode(
config=True,
help="""GitHub client id for authentication with the GitHub API
For use with client_secret.
Loaded from GITHUB_CLIENT_ID env by default.
""",
)
@default("client_id")
def _client_id_default(self):
return os.getenv("GITHUB_CLIENT_ID", "")
client_secret = Unicode(
config=True,
help="""GitHub client secret for authentication with the GitHub API
For use with client_id.
Loaded from GITHUB_CLIENT_SECRET env by default.
""",
)
@default("client_secret")
def _client_secret_default(self):
return os.getenv("GITHUB_CLIENT_SECRET", "")
access_token = Unicode(
config=True,
help="""GitHub access token for authentication with the GitHub API
Loaded from GITHUB_ACCESS_TOKEN env by default.
""",
)
@default("access_token")
def _access_token_default(self):
return os.getenv("GITHUB_ACCESS_TOKEN", "")
@default("git_credentials")
def _default_git_credentials(self):
if self.access_token:
# Based on https://github.com/blog/1270-easier-builds-and-deployments-using-git-over-https-and-oauth
# If client_id is specified, assuming access_token is personal access token. Otherwise,
# assume oauth basic token.
if self.client_id:
return r"username={client_id}\npassword={token}".format(
client_id=self.client_id, token=self.access_token
)
else:
return rf"username={self.access_token}\npassword=x-oauth-basic"
return ""
labels = {
"text": "GitHub repository name or URL",
"tag_text": "Git ref (branch, tag, or commit)",
"ref_prop_disabled": False,
"label_prop_disabled": False,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user, self.repo, self.unresolved_ref = tokenize_spec(self.spec)
self.repo = strip_suffix(self.repo, ".git")
def get_repo_url(self):
return f"https://{self.hostname}/{self.user}/{self.repo}"
async def get_resolved_ref_url(self):
if not hasattr(self, "resolved_ref"):
self.resolved_ref = await self.get_resolved_ref()
return (
f"https://{self.hostname}/{self.user}/{self.repo}/tree/{self.resolved_ref}"
)
async def github_api_request(self, api_url, etag=None):
client = AsyncHTTPClient()
request_kwargs = {}
if self.client_id and self.client_secret:
request_kwargs.update(
dict(auth_username=self.client_id, auth_password=self.client_secret)
)
headers = {}
# based on: https://developer.github.com/v3/#oauth2-token-sent-in-a-header
if self.access_token:
headers["Authorization"] = f"token {self.access_token}"
if etag:
headers["If-None-Match"] = etag
req = HTTPRequest(
api_url, headers=headers, user_agent="BinderHub", **request_kwargs
)
try:
resp = await client.fetch(req)
except HTTPError as e:
if e.code == 304:
resp = e.response
elif (
e.code == 403
and e.response
and "x-ratelimit-remaining" in e.response.headers
and e.response.headers.get("x-ratelimit-remaining") == "0"
):
rate_limit = e.response.headers["x-ratelimit-limit"]
reset_timestamp = int(e.response.headers["x-ratelimit-reset"])
reset_seconds = int(reset_timestamp - time.time())
self.log.error(
"GitHub Rate limit ({limit}) exceeded. Reset in {delta}.".format(
limit=rate_limit,
delta=timedelta(seconds=reset_seconds),
)
)
# round expiry up to nearest 5 minutes
minutes_until_reset = 5 * (1 + (reset_seconds // 60 // 5))
raise ValueError(
f"GitHub rate limit exceeded. Try again in {minutes_until_reset} minutes."
)
# Status 422 is returned by the API when we try and resolve a non
# existent reference
elif e.code in (404, 422):
return None
else:
raise
if "x-ratelimit-remaining" in resp.headers:
# record and log github rate limit
remaining = int(resp.headers["x-ratelimit-remaining"])
rate_limit = int(resp.headers["x-ratelimit-limit"])
reset_timestamp = int(resp.headers["x-ratelimit-reset"])
# record with prometheus
GITHUB_RATE_LIMIT.set(remaining)
# log at different levels, depending on remaining fraction
fraction = remaining / rate_limit
if fraction < 0.2:
log = self.log.warning
elif fraction < 0.5:
log = self.log.info
else:
log = self.log.debug
# str(timedelta) looks like '00:32'
delta = timedelta(seconds=int(reset_timestamp - time.time()))
log(
"GitHub rate limit remaining {remaining}/{limit}. Reset in {delta}.".format(
remaining=remaining,
limit=rate_limit,
delta=delta,
)
)
return resp
async def get_resolved_ref(self):
if hasattr(self, "resolved_ref"):
return self.resolved_ref
api_url = "{api_base_path}/repos/{user}/{repo}/commits/{ref}".format(
api_base_path=self.api_base_path.format(hostname=self.hostname),
user=self.user,
repo=self.repo,
ref=self.unresolved_ref,
)
self.log.debug("Fetching %s", api_url)
cached = self.cache.get(api_url)
if cached:
etag = cached["etag"]
self.log.debug("Cache hit for %s: %s", api_url, etag)
else:
cache_404 = self.cache_404.get(api_url)
if cache_404:
self.log.debug("Cache hit for 404 on %s", api_url)
return None
etag = None
resp = await self.github_api_request(api_url, etag=etag)
if resp is None:
self.log.debug("Caching 404 on %s", api_url)
self.cache_404.set(api_url, True)
return None
if resp.code == 304:
self.log.info("Using cached ref for %s: %s", api_url, cached["sha"])
self.resolved_ref = cached["sha"]
# refresh cache entry
self.cache.move_to_end(api_url)
return self.resolved_ref
elif cached:
self.log.debug("Cache outdated for %s", api_url)
ref_info = json.loads(resp.body.decode("utf-8"))
if "sha" not in ref_info:
# TODO: Figure out if we should raise an exception instead?
self.log.warning("No sha for %s in %s", api_url, ref_info)
self.resolved_ref = None
return None
# store resolved ref and cache for later
self.resolved_ref = ref_info["sha"]
self.cache.set(
api_url,
{
"etag": resp.headers.get("ETag"),
"sha": self.resolved_ref,
},
)
return self.resolved_ref
async def get_resolved_spec(self):
if not hasattr(self, "resolved_ref"):
self.resolved_ref = await self.get_resolved_ref()
return f"{self.user}/{self.repo}/{self.resolved_ref}"
def get_build_slug(self):
return f"{self.user}-{self.repo}"
class GistRepoProvider(GitHubRepoProvider):
"""GitHub gist provider.
Users must provide a spec that matches the following form (similar to github)
[https://gist.github.com/]<username>/<gist-id>[/<ref>]
The ref is optional, valid values are
- a full sha1 of a ref in the history
- HEAD for the latest ref (also allow 'master', 'main' as aliases for HEAD)
If HEAD or no ref is specified the latest revision will be used.
"""
name = Unicode("Gist")
display_name = "Gist"
hostname = Unicode("gist.github.com")
allow_secret_gist = Bool(
default_value=False,
config=True,
help="Flag for allowing usages of secret Gists. The default behavior is to disallow secret gists.",
)
labels = {
"text": "Gist ID (username/gistId) or URL",
"tag_text": "Git commit SHA",
"ref_prop_disabled": False,
"label_prop_disabled": False,
}
def __init__(self, *args, **kwargs):
# We dont need to initialize entirely the same as github
super(RepoProvider, self).__init__(*args, **kwargs)
parts = self.spec.split("/")
self.user, self.gist_id, *_ = parts
if len(parts) > 2:
self.unresolved_ref = parts[2]
else:
self.unresolved_ref = ""
def get_repo_url(self):
return f"https://{self.hostname}/{self.user}/{self.gist_id}.git"
async def get_resolved_ref_url(self):
if not hasattr(self, "resolved_ref"):
self.resolved_ref = await self.get_resolved_ref()
return f"https://{self.hostname}/{self.user}/{self.gist_id}/{self.resolved_ref}"
async def get_resolved_ref(self):
if hasattr(self, "resolved_ref"):
return self.resolved_ref
api_url = f"https://api.github.com/gists/{self.gist_id}"
self.log.debug("Fetching %s", api_url)
resp = await self.github_api_request(api_url)
if resp is None:
return None
ref_info = json.loads(resp.body.decode("utf-8"))
if (not self.allow_secret_gist) and (not ref_info["public"]):
raise ValueError(
"You seem to want to use a secret Gist, but do not have permission to do so. "
"To enable secret Gist support, set (or have an administrator set) "
"'GistRepoProvider.allow_secret_gist = True'"
)
all_versions = [e["version"] for e in ref_info["history"]]
if self.unresolved_ref in {"", "HEAD", "master", "main"}:
self.resolved_ref = all_versions[0]
else:
if self.unresolved_ref not in all_versions:
return None
else:
self.resolved_ref = self.unresolved_ref
return self.resolved_ref
async def get_resolved_spec(self):
if not hasattr(self, "resolved_ref"):
self.resolved_ref = await self.get_resolved_ref()
return f"{self.user}/{self.gist_id}/{self.resolved_ref}"
def get_build_slug(self):
return self.gist_id
# A (v)formatter that removes used keys from its arguments
class ConsumingFormatter(string.Formatter):
def check_unused_args(self, used_args, args, kwargs):
ints = set()
for key in used_args:
if isinstance(key, int):
ints.add(key)
else:
try:
del kwargs[key]
except KeyError:
pass
for key in sorted(ints, reverse=True):
try:
args.pop(key)
except IndexError:
pass
class LocalDirRepoProvider(RepoProvider):
"""Local host directory provider.
This provider just passes the given local host directory "repo" directly to repo2docker, mounted as a host_path volume.
"""
name = Unicode('LocalDir')
display_name = "Local directory"
labels = {
"text": "Local directory root",
"tag_text": "Local directory path",
"ref_prop_disabled": True,
"label_prop_disabled": True,
}
allowed_paths = List(
config = True,
help="""
Prefixes for paths that are allowed to be used as repos.
By default, none are allowed. Set to ['/'] to allow all.
""")
required_marker = Unicode(
config = True,
help="""
If set, a file by this name must exist in any "repo" directory.
""")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
path = self.spec
if not path.startswith('/'):
path = '/' + path
path = os.path.normpath(path)
self.allowed_index = next(i for (i, p) in enumerate(self.allowed_paths) if path.startswith(p))
if not os.path.exists(os.path.join(path, self.required_marker)):
raise ValueError('path not allowed')
self.path = path
def get_repo_url(self):
return self.path
def get_build_slug(self):
return '{0}/{1}'.format(self.allowed_index, self.path[len(self.allowed_paths[self.allowed_index]):])
async def get_resolved_ref(self):
s = os.lstat(self.path)
if not S_ISDIR(s.st_mode):
raise NotADirectoryError(self.path)
m = os.stat(os.path.join(self.path, self.required_marker))
# ctime should really be recursive somehow...
return format(max(s.st_ctime_ns, m.st_ctime_ns), 'x')
async def get_resolved_spec(self):
return self.path
async def get_resolved_ref_url(self):
return self.path
class CuratedRepoProvider(RepoProvider):
"""Curated meta-repo provider.
This provider uses a meta-config that contains the specifications for other repositories.
This can be used as a level of indirection to restrict what can be launched.
"""
name = Unicode('Curated')
display_name = "Curated environment"
labels = {
"text": "Owner",
"tag_text": "Project",
"tag_placeholder": "Project name (may contain slashes)",
"ref_prop_disabled": False,
"label_prop_disabled": False,
}
config_path = Unicode(
config=True,
help="""
Path to the configuration.
{N} is expanded to the Nth component of the repo specification.
Remaining unused components are then looked up in the indicated directory or yaml file.""")
dir_config = Unicode(
config=True,
help="""
If set, a directory containing this yaml config file (or empty) may be used as LocalDirRepoProvider repo2docker directory.
""")
providers = Dict(#BinderHub.repo_providers.default_value,
{
'gh': GitHubRepoProvider,
'gist': GistRepoProvider,
'git': GitRepoProvider,
'gl': GitLabRepoProvider,
'dir': LocalDirRepoProvider
},
config=True,
help="""Repo Providers to register""")
allowed_mounts = Set(
config = True,
help="""
Prefixes for paths that are allowed to be mounted.
By default, none are allowed. Set to ['/'] to allow all.
""")
default_options = Dict(
config = True,
help="""
Default launch options to pass to the spawner.
""")
allowed_options = Set(
config = True,
help="""
Launch options that may be overridden by the configuration yaml.
""")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
safe_chars = frozenset(string.ascii_letters + string.digits + '-_/')
if not all(c in safe_chars for c in self.spec):
raise ValueError('Invalid characters in spec')
spec = list(filter(None, self.spec.split('/')))
self.specuser = spec[0]
path = ConsumingFormatter().vformat(self.config_path, spec, {})
try:
while True:
stat = os.lstat(path)
if S_ISDIR(stat.st_mode):
if spec:
# recurse into directory
h = spec.pop(0)
path = os.path.join(path, h)
continue
elif self.dir_config:
# treat as repo2docker dir
with open(os.path.join(path, self.dir_config)) as f:
params = yaml.safe_load(f)
if params is None:
params = {}
params.setdefault('provider', 'dir')
params.setdefault('spec', path)
if 'mounts' not in params:
params['mounts'] = {}
for ent in os.scandir(path):
if not ent.name.startswith('.') and ent.is_symlink() and ent.is_dir():
targ = os.readlink(ent.path)
if self.check_mount(targ, stat):
params['mounts'][ent.name] = targ
elif S_ISREG(stat.st_mode):
# load yaml file
with open(path) as f:
params = yaml.safe_load(f)
for p in spec:
params = params[p]
break
except OSError as e:
self.log.info("Looking up %s: %s", self.spec, e)
self.params = {}
self.provider = None
return
self.params = params
provider = self.providers[params.get('provider', 'gh')]
try:
spec = params['spec']
except KeyError:
spec = params['repo'] + '/' + params.get('branch', 'master')
self.provider = provider(config = self.config, spec = spec)
self.mounts = params.get('mounts', {})
for (mount, path) in self.mounts.items():
if not self.check_mount(path, stat):
raise PermissionError(path)
def check_mount(self, path, stat):
if not (os.path.isabs(path) and any(path.startswith(a) for a in self.allowed_mounts)):
return False
s = os.lstat(path)
if not S_ISDIR(s.st_mode):
return False
need = S_IROTH | S_IXOTH
if s.st_uid != stat.st_uid and s.st_gid != stat.st_gid or s.st_mode & need != need:
return False
return True
def get_repo_url(self):
if not self.provider:
return None
return self.provider.get_repo_url()
async def get_resolved_ref(self):
if not self.provider:
return None
return await self.provider.get_resolved_ref()
async def get_resolved_spec(self):
if not self.provider:
return None
return await self.provider.get_resolved_spec()
async def get_resolved_ref_url(self):
if not self.provider:
return None
return await self.provider.get_resolved_ref_url()
def get_build_slug(self):
return self.provider.get_build_slug()
def mount_path(self, path):
if not path.startswith('/'):
path = '/home/jovyan/' + path
return path
def check_limit(self, opts, lim, typ, maximum):
try:
val = opts[lim+'_'+typ]
except KeyError:
return
if lim == 'mem':
val = ByteSpecification.validate(ByteSpecification, None, val)
if not (0 < val <= maximum):
raise ValueError('Invalid %s limit'%(lim))
return val
def check_limits(self, opts, lim):
maximum = { # TODO: make configurable
'cpu': 16,
'mem': 274877906944 # '256G'
}
l = self.check_limit(opts, lim, 'limit', maximum[lim])
self.check_limit(opts, lim, 'guarantee', l or maximum[lim])
def get_launch_options(self):
options = {'volumes': [], 'volume_mounts': [],
'extra_labels': {'specuser': self.specuser}
}
options.update(self.default_options)
for idx, (mount, path) in enumerate(self.mounts.items(), 1):
name = 'mount%d'%idx
options['volumes'].append(client.V1Volume(name=name, host_path=client.V1HostPathVolumeSource(path=path, type='Directory')).to_dict())
options['volume_mounts'].append(client.V1VolumeMount(name=name, mount_path=self.mount_path(mount), read_only=True).to_dict())
for k in self.allowed_options:
try:
options[k] = self.params[k]
except KeyError:
pass
# Do some sanity checks:
for lim in ['cpu', 'mem']:
self.check_limits(options, lim)
return options
def check_hub_user(self, user):
if user['admin']:
return True
users = self.params.get('users')
if type(users) is str:
users = users.split()
if users and not user['name'] in users:
return False
return super().check_hub_user(user)
|
UTF-8
|
Python
| false
| false
| 44,813
|
py
| 6
|
repoproviders.py
| 2
| 0.578761
| 0.572646
| 0
| 1,339
| 32.467513
| 145
|
seanigami/QuickYOLO
| 13,486,197,340,174
|
2f9fdf20119fd7b768b179dc047287a064fa1d08
|
90d6f90d35d0cf1778bd8282e78abdfcaaf4e4c9
|
/export_larq_model.py
|
a97383eb1907b6dc2b3fc77729f5cc80a1778a7c
|
[
"MIT"
] |
permissive
|
https://github.com/seanigami/QuickYOLO
|
adfb78df2e4305105fe4698794a84e4fde6ff0cf
|
77d8c544ca564b1c0e26815ed5fe99dd855dfd23
|
refs/heads/main
| 2023-04-07T09:06:36.412880
| 2021-04-13T03:39:22
| 2021-04-13T03:39:22
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import larq_compute_engine as lce
import tensorflow as tf
from tensorflow.python.saved_model import tag_constants
from yolov3.dataset import Dataset
from yolov3.yolov4 import Create_Yolo
from yolov3.utils import load_yolo_weights, postprocess_boxes, nms_no_gather
from yolov3.configs import *
import shutil
import json
import time
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')")
# Custom Keras layer, for easy exporting
class PostProcess(tf.keras.layers.Layer):
def __init__(self, iou_threshold, score_threshold, **kwargs):
self.iou_threshold = iou_threshold
self.score_threshold = score_threshold
super(PostProcess, self).__init__(**kwargs)
def post_prediction_process(self,
pred_boxes):
flattened_boxes = tf.reshape(pred_boxes, (-1, tf.shape(pred_boxes)[-1]))
boxes = postprocess_boxes(flattened_boxes, score_threshold=self.score_threshold)
selected_indices = nms_no_gather(boxes, iou_threshold=self.iou_threshold)
boxes, box_scores, box_classes = tf.split(boxes, (4, 1, 1), axis=-1)
box_scores = tf.squeeze(box_scores, axis=-1)
box_classes = tf.cast(box_classes, dtype=tf.int32)
box_classes = tf.squeeze(box_classes, axis=-1)
return boxes, box_scores, box_classes, selected_indices
def call(self, y_pred):
return self.post_prediction_process(y_pred)
if __name__ == '__main__':
if YOLO_FRAMEWORK == "tf": # TensorFlow detection
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
if YOLO_TYPE == "yolov2":
Darknet_weights = YOLO_V2_WEIGHTS
if YOLO_CUSTOM_WEIGHTS == False:
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
else:
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
yolo.load_weights(f"./checkpoints/{TRAIN_MODEL_NAME}") # use custom weights
elif YOLO_FRAMEWORK == "trt": # TensorRT detection
saved_model_loaded = tf.saved_model.load(f"./checkpoints/{TRAIN_MODEL_NAME}", tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
yolo = saved_model_loaded.signatures['serving_default']
post_processed_output = PostProcess(TEST_IOU_THRESHOLD, TEST_SCORE_THRESHOLD)(yolo.output)
yolo = tf.keras.models.Model(yolo.input, post_processed_output)
flatbuffer_bytes = lce.convert_keras_model(yolo)
# export
exported_model_path = f'checkpoints/{TRAIN_MODEL_NAME}.tflite'
with open(exported_model_path, "wb") as flatbuffer_file:
flatbuffer_file.write(flatbuffer_bytes)
print(f'exported to: {exported_model_path}')
|
UTF-8
|
Python
| false
| false
| 3,259
|
py
| 14
|
export_larq_model.py
| 11
| 0.677202
| 0.668917
| 0
| 74
| 43.054054
| 115
|
GNUCS-SYJ/AlgorithmStudy
| 4,372,276,710,521
|
e8ddb9581dd82c295e759afc83308753874784e0
|
88f65e4f83ea5fbfc7b2ce92d1a0372769c6013a
|
/suho/withpython/Graph/Q45.py
|
902e33b55edfb68e019c9046728d35ca8124cd18
|
[] |
no_license
|
https://github.com/GNUCS-SYJ/AlgorithmStudy
|
e8bf85d5f77567ec17ba7925104feb2ef29fb9a2
|
7f17aca5163b83644c605993d73ef90e82fc5045
|
refs/heads/main
| 2023-04-23T02:44:22.214145
| 2022-06-11T06:39:55
| 2022-06-11T06:39:55
| 332,159,173
| 0
| 6
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from collections import deque
import sys
t = int(input())
for _ in range(t):
n = int(input())
in_degree = [0] * (n+1)
graph = [[0] * (n+1) for _ in range(n+1)]
last_year = list(map(int, sys.stdin.readline().rstrip().split()))
for i in range(n):
for j in range(i+1, n):
graph[last_year[i]][last_year[j]] = 1
in_degree[last_year[j]] += 1
m = int(input())
for _ in range(m):
a, b = map(int, input().split())
if graph[a][b] == 1:
graph[a][b] = 0
graph[b][a] = 1
in_degree[a] += 1
in_degree[b] -= 1
else:
graph[a][b] = 1
graph[b][a] = 0
in_degree[a] -= 1
in_degree[b] += 1
q = deque()
cur_year = []
impossible = False
many_cases = False
for i in range(1, n+1):
if in_degree[i] == 0:
q.append(i)
for i in range(n):
if len(q) == 0:
impossible = True
break
if len(q) >= 2:
many_cases = True
break
team = q.popleft()
cur_year.append(team)
for i in range(1, n+1):
graph[team][i] = 0
in_degree[i] -= 1
if in_degree[i] == 0:
q.append(i)
if impossible:
print("IMPOSSIBLE")
elif many_cases:
print("?")
else:
for i in range(n):
print(cur_year[i], end=' ')
print()
|
UTF-8
|
Python
| false
| false
| 1,481
|
py
| 756
|
Q45.py
| 412
| 0.430115
| 0.411884
| 0
| 66
| 21.439394
| 69
|
evanjamesjackson/spotify_recommender
| 18,717,467,515,367
|
00e70a2897dd1cf275e5fcfbfddb19fdaa62bff1
|
a04ff8e343b4d3b2ef3193722dfbaf197df42a04
|
/lastipy/spotify/token.py
|
bd3f39b233d622b015923b9e63a6d0d6c3627943
|
[] |
no_license
|
https://github.com/evanjamesjackson/spotify_recommender
|
4a36ca6d87faacaaefccec876a29f5b390414264
|
e011977ff2d13d85243dc69052b3f972dc84abee
|
refs/heads/master
| 2020-05-09T19:18:08.674279
| 2020-05-08T20:06:16
| 2020-05-08T20:06:16
| 181,373,367
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import spotipy.oauth2 as oauth2
import webbrowser
import os
from lastipy import definitions
REDIRECT_URI = 'https://www.example.com/callback/'
#TODO test
def get_token(username, client_id_key, client_secret_key):
'''Returns a Spotify token for the given user. If a cached token file exists (with the format .cache-<username>),
it is returned; otherwise, the given user will be prompted to authorize the app.
This functionw was modified from util.py in spotipy in order to expose cache path'''
# These are the only scopes required by this app so no need to parameterize this
scope = 'playlist-modify-public user-library-read user-library-modify user-follow-read'
sp_oauth = oauth2.SpotifyOAuth(client_id_key,
client_secret_key,
REDIRECT_URI,
scope=scope,
cache_path=os.path.join(definitions.ROOT_DIR, '.cache-' + username))
token_info = sp_oauth.get_cached_token()
if not token_info:
print('''
User authentication requires interaction with your
web browser. Once you enter your credentials and
give authorization, you will be redirected to
a url. Paste that url you were directed to to
complete the authorization.
''')
auth_url = sp_oauth.get_authorize_url()
try:
webbrowser.open(auth_url)
print("Opened %s in your browser" % auth_url)
except:
print("Please navigate here: %s" % auth_url)
print()
print()
response = input("Enter the URL you were redirected to: ")
print()
print()
code = sp_oauth.parse_response_code(response)
token_info = sp_oauth.get_access_token(code)
if token_info:
return token_info['access_token']
else:
return None
|
UTF-8
|
Python
| false
| false
| 1,947
|
py
| 33
|
token.py
| 31
| 0.605547
| 0.604006
| 0
| 54
| 35.055556
| 118
|
jsqwe5656/MyPythonNote
| 15,702,400,440,337
|
37b49a9efd53b703c7441867ca352e36da77fa94
|
fa46150df2ed96a9177288929047f03a184b5c43
|
/py_process/demo_dieloop.py
|
79f6338bbcb63385f6f449307e30d94b211f678c
|
[] |
no_license
|
https://github.com/jsqwe5656/MyPythonNote
|
e588202312f7e7929a5606266418b99f83f0ab3b
|
7434c84f6fda4c2e13e00907f26f995d8e06251a
|
refs/heads/master
| 2021-01-19T03:59:33.084181
| 2017-03-09T09:29:35
| 2017-03-09T09:29:35
| 84,422,061
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*- coding: utf-8 -*-
import threading,multiprocessing
def loop():
x = 10
while True:
x = x+1
print(x)
for i in range(multiprocessing.cpu_count()):
t = threading.Thread(target=loop)
t.start()
|
UTF-8
|
Python
| false
| false
| 236
|
py
| 61
|
demo_dieloop.py
| 56
| 0.567797
| 0.550847
| 0
| 14
| 15.928571
| 44
|
DaTimsta/euler
| 163,208,770,984
|
23315e0a7dd772a27ac7b233445359176bb34ad2
|
ec4573f8b36c60a8175c63833f3015c697aa06a6
|
/097.py
|
2c3cb2053a692dc1bd84fb4b52b230359aa7429f
|
[] |
no_license
|
https://github.com/DaTimsta/euler
|
366702ac6567cc5adf41c6e273f2e8f8b6672c3c
|
95a1508f2b0b17f62a5915658273098002f2e476
|
refs/heads/master
| 2018-01-07T23:36:27.847761
| 2017-11-18T16:20:14
| 2017-11-18T16:20:14
| 49,143,865
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# https://projecteuler.net/problem=97
from euler import num_digits
from time import time
T = time()
s = 28433
for i in range(0, 7830457):
s *= 2
if num_digits(s) == 11:
s %= (10**10)
print(s + 1)
print('Time elapsed:', time() - T)
# notice that python can do it easily (and faster) just by inputting the formula
# print((28433 * (2 ** 7830457) + 1) % (10**10))
|
UTF-8
|
Python
| false
| false
| 380
|
py
| 88
|
097.py
| 87
| 0.621053
| 0.513158
| 0
| 15
| 24.266667
| 80
|
Sibyx/mdns
| 14,199,161,911,255
|
e712b2ecd0382526b43d0563552b5ed7b5d6e92e
|
39148a9cfd32be3f074810cdecbfe5f7342b0fc0
|
/core/models/taxonomic_class.py
|
c156ee90933f861fd43e993cd4ff758bd32d4ff9
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/Sibyx/mdns
|
71d4e9c49e6106940d2bcb7b77c94f7d470b9788
|
6c65364ef04b413a734dd495fba85569e2648d73
|
refs/heads/master
| 2023-01-29T10:36:18.112606
| 2021-03-05T01:35:19
| 2021-03-05T01:35:19
| 170,988,517
| 1
| 0
|
Apache-2.0
| false
| 2023-01-07T04:29:56
| 2019-02-16T09:59:15
| 2021-03-05T01:35:21
| 2023-01-07T04:29:56
| 3,688
| 0
| 0
| 28
|
Python
| false
| false
|
from django.db import models
from core.models.base import BaseModel
from core.models.taxonomic_phylum import TaxonomicPhylum
class TaxonomicClass(BaseModel):
class Meta:
app_label = 'core'
default_permissions = ()
db_table = 'taxonomic_classes'
taxonomic_phylum = models.ForeignKey(TaxonomicPhylum, on_delete=models.CASCADE)
name = models.CharField(max_length=45, unique=True)
def __str__(self):
return self.name
|
UTF-8
|
Python
| false
| false
| 466
|
py
| 76
|
taxonomic_class.py
| 53
| 0.703863
| 0.699571
| 0
| 17
| 26.411765
| 83
|
GutuAlexei/Introducere_Afi-are_Calcule
| 14,877,766,735,687
|
2702a9cb206c748e854a5f75564bef585fcac3a2
|
e7228b2e25236cad4b5bc1a623f81fe1be2405d4
|
/problema_7_AG.py
|
89ac5cf87d5ebda5606b7f528f632498db0f45c7
|
[] |
no_license
|
https://github.com/GutuAlexei/Introducere_Afi-are_Calcule
|
061efbd8fcc79cb9bc3fea8487f6a368263aef87
|
c5b4380ee05e70ac6ff7f80cfb9a9410049a01b2
|
refs/heads/master
| 2022-12-24T19:00:34.477254
| 2020-10-03T22:39:09
| 2020-10-03T22:39:09
| 299,917,219
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
a=int(input('intr virsta='))
Gr=2*a+8
I=5*a+80
print('greutatea ideala=', Gr , 'kg')
print('inaltimea ideala= ', I , 'cm')
|
UTF-8
|
Python
| false
| false
| 126
|
py
| 10
|
problema_7_AG.py
| 10
| 0.603175
| 0.563492
| 0
| 5
| 23.6
| 37
|
alexazf/mipt-python3
| 13,494,787,247,451
|
fa0f3bdd7e890faa7d8298012fe078e1abea73f9
|
4458cf1abe59f89c43cab50b13b315f4fa49cc0d
|
/Lesson-2/task_17.py
|
8b5af28c5c85a285ee812c6a579d04b9b39c6c57
|
[] |
no_license
|
https://github.com/alexazf/mipt-python3
|
04782a82a722501c645ab30340187fcb271f25ef
|
822a03cc2c8cafcaad176b16b572060cc3e53ae4
|
refs/heads/master
| 2018-10-29T02:37:19.552317
| 2018-10-09T17:14:33
| 2018-10-09T17:14:33
| 144,759,635
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
from pyrob.api import *
from move.index import *
@task
def task_8_27():
move(move_up, cell_is_filled, False)
move_left()
if not cell_is_filled():
move_right(2)
if __name__ == '__main__':
run_tasks()
|
UTF-8
|
Python
| false
| false
| 248
|
py
| 82
|
task_17.py
| 78
| 0.580645
| 0.560484
| 0
| 17
| 13.588235
| 40
|
Kreloc/python
| 17,025,250,369,633
|
6515cf6553c4aeda6f56c88d39174bd7665aaf8a
|
e8ec8e30cbbeb9b1a27c7093509e655d4070bc43
|
/getDinnerMenuForWeek.py
|
f32bb7744d2369ad0bd3a24016e1d5e57d050a3e
|
[] |
no_license
|
https://github.com/Kreloc/python
|
b627f412470f638c305b0eacb30333420571a1eb
|
93e9c532fc79caf7cce220b2440773d73236724e
|
refs/heads/master
| 2021-01-19T04:39:21.269420
| 2019-04-05T23:37:48
| 2019-04-05T23:37:48
| 63,201,440
| 1
| 4
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#! python3
# getDinnerSelection - Gets dinner selection for the week from list in script
import random
#import os
#Dictionary of dinners and their ingeredients
dinner_ingredients = {
'breakfast burritos': 'Eggs, tortillas(Burrito size), maple sausage, cheese, sour cream',
'chicken enchiladas': 'Boneless skinless chicken, cheese, black olives (chopped), green onions, enchilada sauce mix, flour tortillas, tomato paste',
'fajitas': 'Chicken/Steak meat, large onion, orange pepper, yellow pepper, refried beans, tortillas, shredded cheese, sour cream, salsa',
'kalua pork': 'Pork butt roast, Hawaiian sea salt, Liquid smoke flavoring',
'lasagna': 'Ground beef, tomato sauce, tomato paste, garlic clove, Ricotta cheese, lasagna noodles, eggs, Italian seasonings, shredded cheese, shredded parmesan cheese',
'mac & cheese': 'Macaroni noodles, butter, seasoned dry crumbs, flour, salt, milk, Velveeta, shredded cheddar cheese',
'manicotti': 'Ground beef, eggs, tomato sauce, tomato paste, garlic clove, Ricotta cheese, lasagna noodles, eggs, Italian seasonings, shredded cheese, shredded parmesan cheese',
'meatloaf': 'Ground beef, eggs, ketchup, bread crumbs, salt, pepper, Italian seasoning',
'pulled pork': 'Pork shoulder, hamburger buns, leafy green lettuce, Root beer, chili sauce, garlic cloves, root beer concentrate, tomato slices, hot pepper sauce, salt, pepper, cooking oil',
'red beans with hammock': 'Red beans, hammocks, rice',
'ribs': 'Pork spare ribs, Bbq sauce',
'roasted chicken & veggies': 'Chicken thighs with skin, oil, salt, pepper, dill, italian seasoning, carrots, zucchini',
'salmon chowder': 'Salmon, butter, onion, celery, garlic, potatoes, carrots, chicken broth, salt, butter, dried dill weed, evaporated milk, creamed corn, shredded cheddar cheese',
'Shit on Rice': 'Ground beef, rice, cream of mushroom soup',
'Hamburger Pie': 'Ground beef, green beans, corn, shredded cheese, tomato soup, onion, mashed potatoes',
'Sloppy Joes': 'Ground beef, hamburger buns, tomato paste, sloppy joe mix',
'Chicken drumsticks': 'Chicken thighs/drumsticks',
'Teriyaki chicken burgers': 'Boneless skinless chicken, hamburger buns, Swiss cheese - sliced, pineapple - ring slices, teriyaki sauce',
'Chicken and Noodles': 'Chicken, onion, egg noodles',
'Chicken and Green Beans': 'Boneless skinless chicken, green beans, cream of chicken soup',
'Chicken pot pie': 'Boneless skinless chicken, mixed vegetables, pie crust, onion',
'Bbq chicken': 'Chicken drumsticks/thigs, Bbq sauce',
'Pork chops': 'Pork chops, milk, cream of mushroom soup',
'Ham - spiral': 'Honey spiral ham',
'Hot dogs': 'Hot dogs, hot dog buns, ketchup, mustard',
'Chili dogs': 'Chili, hot dogs, hot dog buns, cheese, jalapenos',
'Frito Pie': 'Fritos, chili, corn, diced tomatoes, jalapenos',
'Taco salad': 'Ground turkey, tortillas, shredded cheese, olives, beans, sour cream',
'Nachos': 'Ground turkey/leftover tacos, shredded cheese, jalapenos, tortilla chips',
'Rib eye steak': 'Rib eye steak, raspberry chipotle sauce',
'Chicken fried steak': 'Round steak, crackers, eggs, shortening',
'Tri tip sandwich': 'Tri tip steak, mushrooms, itlaian bread, mozzarella cheese',
'Soup and grilled cheese sandwiches': 'sliced cheese, tomato soup, bread, butter',
'Pizza rolls': 'Pizza meats (Pepperoni, sausage, etc.), egg roll wrappers, shredded cheese, pizza sauce',
'Teriyaki meat with rice and broccoli': 'Mock beef/Chicken tender/pork loin, rice, broccoli, teriyaki sauce',
'Pot roast and vegetables': 'Pot roast, potatoes, beef broth',
'Beef stew': 'Pot roast meat, beef broth, carrots, Worchester sauce, flour, onion, potatoes',
'Breakfast for dinner': 'Eggs, bacon, bread, butter',
'Oven fried chicken': 'Chicken drumsticks/thigs, eggs, cornmeal, milk',
'Spaghetti': 'Ground beef, spaghetti rigatoni, mushrooms, tomato paste, tomato sauce, italian seasoning, black pepper, minced garlic, italian bread',
'Taco Dip': 'Leftover taco stuff (meat and beans), shredded cheese, sour cream',
'Beer battered halibut': 'Halibut, batter, beer, shortening',
'Trout': 'Trout, shortening, cornmeal',
'Blackened salmon': 'Salmon, butter, blackened seasonings',
'Tacos': 'Ground turkey, taco seasoning, tortillas, shredded cheese, lettuce, olives, avocado, sour cream',
'Shrimp scampi': 'Shrimp, butter, garlic, noodles',
'White bean chicken chili': 'White beans, chicken meat, olive oil, onion (chopped), chicken stock, salsa verde, ground cumin, coriander, two jalapenos, salt, ground white pepper, white corn',
'Chicken wings': 'Chicken wings'
}
#Define dinner options
dinner_options = [x for x in dinner_ingredients.keys()]
all_ingredients = []
dinner_list = []
#leftover_day_set = False
random_leftover_number = random.randint(4, 6)
for i in range(0, 7):
if i == random_leftover_number:
dinner_list.append('Leftovers')
continue
dinner_choice = random.choice(dinner_options)
dinner_list.append(dinner_choice)
#Remove dinner selection from list before making next selection
dinner_options.remove(dinner_choice)
#Change dinner choices if nachoes or taco dip found but tacoes are
#not a meal that week
if 'Tacos' not in dinner_list:
for f in ('Nachos', 'Taco Dip'):
if f in dinner_list:
dinner_list.remove(f)
dinner_choice = random.choice(dinner_options)
dinner_list.append(dinner_choice)
dinner_options.remove(dinner_choice)
#Loop thru dinnerList, assign to days, and get ingredients needed
#print week menu plan
dinner_menu = []
print('Dinners this week')
days_of_week = (
"Monday: ",
"Tuesday: ",
"Wednesday: ",
"Thursday: ",
"Friday: ",
"Saturday: ",
"Sunday: ")
for i, dinner_choice in enumerate(dinner_list):
try:
day = days_of_week[i-1]
except IndexError:
day = "Anyday"
print(day + dinner_choice)
dinner_menu.append((day + dinner_choice))
if dinner_choice != 'Leftovers':
found_ingredients = dinner_ingredients[dinner_choice]
all_ingredients.append(dinner_choice + ':')
all_ingredients.append(found_ingredients)
print('\nIngredients:')
print('\n'.join(all_ingredients))
#TODO: Format as shopping list with number of each item
#TODO: Output text file for each week
#dinner_file = open('dinner_week_one.txt', 'w')
#dinner_file.write('\n'.join(dinner_list))
#dinner_file.close()
#TODO: Parse text file and remove dinner options found from the past
#two weeks
|
UTF-8
|
Python
| false
| false
| 6,419
|
py
| 19
|
getDinnerMenuForWeek.py
| 8
| 0.728307
| 0.727372
| 0
| 114
| 55.307018
| 191
|
paul91125/abc
| 2,680,059,622,648
|
298983ed0535518f6de3c656a876d42b679261ca
|
ca539d2edb215e2394019b8efcdb040198ee9b95
|
/list.py
|
a59b4ed5e886e6e3147e2def5a91e54ba9ccff2f
|
[] |
no_license
|
https://github.com/paul91125/abc
|
4019b8eb836abe70faa6a2d3359dbcfbaeddc3dd
|
e3f7298ab5f1545f9ac2de528e16197329682649
|
refs/heads/master
| 2021-01-20T06:54:10.135928
| 2017-05-04T01:47:11
| 2017-05-04T01:47:11
| 89,942,620
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
class slist:
class _node:
def __init__(self,info):
self._data=info
self._next=None
def __init__(self):
self._head=None
self._tail=None
self._length=0
def is_empty(self):
return self._length==0
def count(self):
return self._length
def get_first(self):
if not self.is_empty():
return self._head._data
def add_to_head(self,ele):
new_node=self._node(ele)
if not self.is_empty():
new_node._next=self._head
self._head=new_node
else:
self._head=self._tail=new_node
self-_length+=1
def get_last(self):
if not self.is_empty():
return self._tail._data
def add_to_tail(self,ele):
new_node=self._node(ele)
if not self.is_empty():
self._tail._next=new_node
self._tail=new_node
else:
self._head=self._tail=new_node
self-_length+=1
|
UTF-8
|
Python
| false
| false
| 795
|
py
| 4
|
list.py
| 4
| 0.641509
| 0.636478
| 0
| 43
| 17.488372
| 33
|
louking/loutilities
| 1,597,727,885,810
|
d26013f788b4ca556dec9b1722177c9c93c2f07b
|
85764904e918310f9e4a209f64570dcdcf099818
|
/loutilities/user/__init__.py
|
700c855acee9f0587253145d7d3055f798dfff01
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/louking/loutilities
|
05bb20994ae06d2e68989cd6a779c350a9a430ad
|
aaf7410849d0167001cd5f06ab0dae6563e58ec7
|
refs/heads/master
| 2023-07-24T18:32:36.128102
| 2023-07-15T10:02:43
| 2023-07-15T10:02:43
| 5,824,315
| 2
| 2
| null | false
| 2023-05-10T09:59:37
| 2012-09-15T21:29:29
| 2023-01-18T18:28:06
| 2023-05-10T09:59:36
| 540
| 2
| 2
| 31
|
Python
| false
| false
|
'''
user - package supports user management for xtilities products
=========================================================================================
'''
# pypi
from flask import Flask, g
from flask_security import Security, SQLAlchemyUserDatastore, LoginForm, ForgotPasswordForm
# homegrown
from loutilities.configparser import getitems
from loutilities.user.model import User, Role
# hold application here
app = None
user_datastore = None
# TODO: should these messages be localized? See https://flask-security-too.readthedocs.io/en/stable/customizing.html#localization
user_messages = {
'ACCOUNT_NOT_PERMITTED' : 'Account not permitted for this application'
}
# login_form for application management
class UserLoginForm(LoginForm):
def validate(self, **kwargs):
# if some error was detected from standard validate(), we're done
if not super().validate(**kwargs):
return False
# if all ok otherwise, check roles to verify user allowed for this application
## collect applications
apps = set()
for thisrole in self.user.roles:
apps |= set(thisrole.applications)
## disallow login if this app isn't in one of user's roles
if g.loutility not in apps:
self.email.errors.append(user_messages['ACCOUNT_NOT_PERMITTED'])
return False
return True
# forgot_password for application management
class UserForgotPasswordForm(ForgotPasswordForm):
def validate(self, **kwargs):
# if some error was detected from standard validate(), we're done
if not super().validate(**kwargs):
return False
# if all ok otherwise, check roles to verify user allowed for this application
## collect applications
apps = set()
for thisrole in self.user.roles:
apps |= set(thisrole.applications)
## disallow password reset if this app isn't in one of user's roles
if g.loutility not in apps:
self.email.errors.append(user_messages['ACCOUNT_NOT_PERMITTED'])
return False
return True
# extend flask_security.Security to support application verification
class UserSecurity(Security):
def __init__(self, app=None, datastore=None, register_blueprint=True, **kwargs):
'''
replaces flask_security.Security
add login_form=UserLoginForm if caller hasn't already supplied
:param kwargs:
'''
if not 'login_form' in kwargs:
kwargs['login_form'] = UserLoginForm
if not 'forgot_password_form' in kwargs:
kwargs['forgot_password_form'] = UserForgotPasswordForm
return super().__init__(app, datastore, register_blueprint, **kwargs)
# used only for database initialization
# TODO: future use for loutilities.com landing page
def create_app(config_obj, configfiles=None):
'''
apply configuration object, then configuration files
'''
global app
app = Flask('loutilities')
app.config.from_object(config_obj)
if configfiles:
# backwards compatibility
if type(configfiles) == str:
configfiles = [configfiles]
for configfile in configfiles:
appconfig = getitems(configfile, 'app')
app.config.update(appconfig)
from .model import db
db.init_app(app)
global user_datastore
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
# need to force app context else get
# RuntimeError: Working outside of application context.
# RuntimeError: Attempted to generate a URL without the application context being pushed.
# see http://kronosapiens.github.io/blog/2014/08/14/understanding-contexts-in-flask.html
with app.app_context():
# set up scoped session
from sqlalchemy.orm import scoped_session, sessionmaker
# the following code causes binds not to work, because the session is artificially
# set to the base database engine via bind parameter
# db.session = scoped_session(sessionmaker(autocommit=False,
# autoflush=False,
# bind=db.engine))
# db.query = db.session.query_property()
return app
|
UTF-8
|
Python
| false
| false
| 4,326
|
py
| 95
|
__init__.py
| 69
| 0.650948
| 0.649098
| 0
| 114
| 36.95614
| 129
|
k-gerst/NET-SS3-Dissertation
| 16,011,638,093,244
|
2065bd1173d57a5c937428fd96dd0c17e65594d2
|
b490d8b7ef30658cce73bac77d8cfb81fa1d383b
|
/src/egoNetClass.py
|
4feeb17ae30ce74f203d8c1727658e5ce9de8b9c
|
[] |
no_license
|
https://github.com/k-gerst/NET-SS3-Dissertation
|
90f4f9366c64ac80d43ac5d54636aa6396aa6743
|
2b72ec2d5749402b73a5f33688e7c0d0e3f15b47
|
refs/heads/master
| 2021-09-09T22:25:27.622827
| 2018-03-20T01:17:40
| 2018-03-20T01:17:40
| 125,916,132
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import pandas as pd
import re # provides regular expression matching operations (https://docs.python.org/2/library/re.html#module-re)
import os
import networkx as nx
import matplotlib.pyplot as plt
def anon_alters(subid, node_list):
"""
Takes list of node names as input and returns an anonymized list of node IDs.
Node IDs use the following convention: 'SubID-NodeID'
"""
anon_list = [str(subid) + '-' + str(n).zfill(2) for n in list(range(1, len(node_list) + 1))]
mapper = dict(zip(node_list, anon_list))
return mapper
class egoNet(object):
"""
Custom class object that handles the pre-processing steps of the ego network data. Results in a networkx object.
Attributes:
Methods:
"""
def __init__(self, subid, path, weight=2, drop_parents=False):
"""
Returns a networkx object.
"""
self.subid = subid
self.path = path
# Retrieve weighted alter-alter adjacency matrix
file_wt_mat = [f for f in os.listdir(path) if
re.search(re.escape(str(self.subid)) + r'[\s-]*\d{2,4}[\s-]*\d{1,2}[\s-]*\d{1,4}_weighted_matrix',
f)]
wt_mat = pd.read_csv(path + file_wt_mat[0], index_col=0)
wt_mat['source'] = wt_mat.index.values
del wt_mat.index.name
# Generate source-target (long) matrix from weighted alter-alter adjacency matrix.
edges = pd.melt(wt_mat, id_vars='source', var_name='target', value_name='weight')
# Retrieve alter attribute matrix
file_attrib_mat = [f for f in os.listdir(path) if re.search(
re.escape(str(self.subid)) + r'[\s-]*\d{2,4}[\s-]*\d{1,2}[\s-]*\d{1,4}_alter_summary', f)]
nodes = pd.read_csv(path + file_attrib_mat[0])
# Generate anonymous alter ID key-value dictionary
mapper = anon_alters(subid=subid, node_list=wt_mat['source'])
# self.mapper = mapper
# Replace alter names with anonymous IDs
edges.replace(mapper, inplace=True)
nodes.replace(mapper, inplace=True)
nodes.set_index('Alter_Name', inplace=True)
nodes.rename(
columns=dict(zip(nodes.columns, [re.sub(r'[\s]?(Alter_)?', '', x) for x in nodes.columns])),
inplace=True)
relationship_map = {1: 'parent',
2: 'spouse',
3: 'significant other',
4: 'child',
5: 'sibling',
6: 'other relative',
7: 'friend',
8: 'co-worker',
9: 'other'}
nodes['Relationship'].replace(relationship_map, inplace=True)
if drop_parents:
parent_rows = nodes[nodes['Relationship'] == 'parent'].index
nodes = nodes.drop(parent_rows)
edges = edges.loc[~((edges['source'].isin(parent_rows)) | (edges['target'].isin(parent_rows)))]
# Create edge matrix conditional on weight of edge (default = 2)
self.edgeList = edges[edges['weight'] > weight]
# Create node
self.nodeList = nodes
G_foo = nx.Graph()
G_foo.add_nodes_from(nodes.index)
G_foo.add_edges_from([tuple(x) for x in self.edgeList[['target', 'source']].values])
self.G = G_foo
# self.G = nx.from_pandas_dataframe(edges, 'source', 'target', 'weight')
nx.set_node_attributes(self.G, nodes.to_dict(orient='index'))
def circular_draw(self):
nx.draw(self.G, pos=nx.circular_layout(self.G), labels=self.nodeList['Relationship'],
node_color=self.nodeList['AlcDrinkWithPerson'],
cmap=plt.cm.plasma, alpha=0.8)
plt.show()
def info(self):
print(self.subid)
print(nx.info(self.G))
def variable_recode(node_list):
"""
Returns relabeled features following item conventions.
"""
relationship_map = {1: 'parent',
2: 'spouse',
3: 'significant other',
4: 'child',
5: 'sibling',
6: 'other relative',
7: 'friend',
8: 'co-worker',
9: 'other'}
node_list['Relationship'].replace(relationship_map)
|
UTF-8
|
Python
| false
| false
| 4,443
|
py
| 9
|
egoNetClass.py
| 6
| 0.538375
| 0.529147
| 0
| 116
| 37.301724
| 121
|
nkelton/Project-Litter-Bug-Front-End
| 2,121,713,876,962
|
614a70203be4f4761326bfa1bc9691cdf95f9afe
|
a89133c42a75fea4003211cc6339a31264a1ec18
|
/script/urls.py
|
6c5c0923811a78ff8239b01789c938c07060ba80
|
[
"MIT"
] |
permissive
|
https://github.com/nkelton/Project-Litter-Bug-Front-End
|
cdbfe6ee30fa50d7e15dea60a538c8f5d531a089
|
366f1777091cac84c464204bfb39e1f54fa004f2
|
refs/heads/master
| 2022-12-09T16:06:12.658283
| 2020-11-10T21:06:02
| 2020-11-10T21:06:02
| 193,614,189
| 0
| 0
|
MIT
| false
| 2022-12-08T01:22:26
| 2019-06-25T01:54:35
| 2020-11-10T21:06:06
| 2022-12-08T01:22:25
| 658
| 0
| 0
| 6
|
C
| false
| false
|
from rest_framework.urlpatterns import format_suffix_patterns
from django.conf.urls import url
from . import views
urlpatterns = [
url('script/$', views.ScriptList.as_view()),
url('script/(?P<pk>\d+)/$', views.ScriptDetail.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
UTF-8
|
Python
| false
| false
| 299
|
py
| 42
|
urls.py
| 26
| 0.719064
| 0.719064
| 0
| 12
| 23.916667
| 62
|
harishankar18/SmartDisplay
| 17,575,006,204,998
|
9e522824028e73fe1a4af5ab4d2e4ef4fd8b38fc
|
df3dd1c22324293db47c86b6574a872ec9652e6d
|
/web_interface.py
|
af4cc799a8bb409c78b6a35f58478d180ccff0b6
|
[] |
no_license
|
https://github.com/harishankar18/SmartDisplay
|
ec458562c5ab4297f62d922b081330dd8ec7c910
|
bd0e4dfd193ecf21af491d8f304d0e479d604439
|
refs/heads/master
| 2020-04-14T11:44:45.687433
| 2019-01-02T09:50:06
| 2019-01-02T09:50:06
| 119,332,834
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask
from MessageQueue import MultiMessageQueue
from ConfigReader import ConfigReader
from Yammer import YammerHandle
from SmartLogger import SmartLogger
from WeatherReport import WeatherReport
from UploadHandler import UploadHandler
class FPDisplay:
"""
Main class for FPDisplay application which is
responsible for following tasks.
1. Reading all the configuration.
2. starting the monitoring threads.
"""
def __init__(self, config_directory_path = "Resources/config"):
self.app = Flask(__name__)
self.cfg = ConfigReader(config_directory_path)
self.cfg.read_config()
self.logger = SmartLogger(self.cfg).get_smart_logger()
self.message_queue = MultiMessageQueue(self.logger, self.cfg)
self.upload_handler = UploadHandler(self.app, self.cfg, self.message_queue)
self.yammer = YammerHandle(self.cfg, self.message_queue, self.logger)
#self.weather_report = WeatherReport (self.cfg, self.message_queue, self.logger)
def handle_request(self):
@self.app.route('/<device>')
def fp_display(device):
print("Received request for the device", device)
message = self.message_queue.dequeue(device)
if message is not None:
return message.render_page(device)
else:
return "<h1> Dummy return </h1>"
@self.app.route('/upload')
def upload_file():
return self.upload_handler.render_upload_template()
@self.app.route('/uploader', methods=['GET', 'POST'])
def save_file():
return self.upload_handler.save_file()
def start(self):
self.app.run(host='0.0.0.0', port=5000, threaded=True)
def start_monitoring_threads(self):
self.yammer.start()
#self.weather_report.start()
if __name__ == "__main__":
fd = FPDisplay()
fd.handle_request()
fd.start_monitoring_threads()
fd.start()
|
UTF-8
|
Python
| false
| false
| 2,064
|
py
| 13
|
web_interface.py
| 12
| 0.621609
| 0.615795
| 0
| 59
| 32.983051
| 88
|
Amitabitbul/Bit2c.co.il.API.Python
| 3,521,873,207,360
|
53ac5f8cdc9e4202beb3ef112b312e8044969fa0
|
23895f47a9fd6581f690b5bc9a4fd1abc2f99e06
|
/Balance.py
|
a2e9f647d1906d7d9b8a8480e9115cf8730487d5
|
[] |
no_license
|
https://github.com/Amitabitbul/Bit2c.co.il.API.Python
|
c8e07f98b7d65d63503bd570b26c9aeac6665d2b
|
b57fe5c4f94ca2733eebe49d06079f9eb6f3ab3c
|
refs/heads/master
| 2021-01-18T23:36:30.997176
| 2018-01-07T19:08:10
| 2018-01-07T19:08:10
| 15,962,470
| 1
| 6
| null | false
| 2018-01-07T19:08:11
| 2014-01-16T08:41:18
| 2017-12-06T15:30:50
| 2018-01-07T19:08:10
| 174
| 1
| 6
| 1
|
Python
| false
| null |
class Balance:
def __init__(self,NIS,LTC,BTC):
# # Types
self.BalanceBTC = BTC # decimal
self.BalanceNIS = NIS # decimal
self.BalanceLTC = LTC # decimal
|
UTF-8
|
Python
| false
| false
| 217
|
py
| 21
|
Balance.py
| 19
| 0.502304
| 0.502304
| 0
| 7
| 30.142857
| 41
|
bgruening/ngsutils
| 3,917,010,220,248
|
1f1ed1259dc99162d65c7fc9e95b425866d0f957
|
94bd032bc21bfd24e6dcbcfe642331f58829e574
|
/ngsutils/gtf/t/test_junctions.py
|
829573989fd9375f501363497c29204dc8eccee7
|
[
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI"
] |
permissive
|
https://github.com/bgruening/ngsutils
|
4c1d935eb0ff337de996ce9d71b8e79ebf2faee7
|
417e90dc1918fb553dd84990f2c54bd8cea8f44d
|
refs/heads/master
| 2021-01-21T20:33:45.678884
| 2019-06-25T20:48:45
| 2019-06-25T20:48:45
| 45,920,499
| 0
| 0
|
BSD-3-Clause
| true
| 2019-07-16T10:09:01
| 2015-11-10T15:21:30
| 2015-11-10T15:21:32
| 2019-07-16T10:08:58
| 5,658
| 0
| 0
| 0
|
Python
| false
| false
|
#!/usr/bin/env python
'''
Tests for gtfutils / junctions
'''
import os
import unittest
import StringIO
import ngsutils.gtf.junctions
from ngsutils.gtf import GTF
# >test1
# 1 2 3 4 5 6 7 8 9 100
# 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
# aaaaaaaaaCCCCCCCATGCtttttttttGCGCTTTGATCcccccccccCTGAGGGGGGGGGGGGGATCGgggggggggACTgggggggTCGAGGGGGGG
# exons:
# 10,20
# 30,40
# 50,70
# 90,100
# opt: 80-82
fa = os.path.join(os.path.dirname(__file__), 'test-junc.fa')
class GTFJunctionsTest(unittest.TestCase):
def testJunctionsSimple(self):
gtf = GTF(fileobj=StringIO.StringIO('''\
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|30|40|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|50|70|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
'''.replace('|', '\t')), quiet=True)
valid = '''\
>test1:16-20,29-33
ATGCGCGC
>test1:16-20,49-53
ATGCCTGA
>test1:16-20,89-93
ATGCTCGA
>test1:36-40,49-53
GATCCTGA
>test1:36-40,89-93
GATCTCGA
>test1:66-70,89-93
ATCGTCGA
'''
out = StringIO.StringIO('')
ngsutils.gtf.junctions.gtf_junctions(gtf, fa, fragment_size=4, min_size=8, out=out, quiet=True)
self.assertEqual(out.getvalue(), valid)
def testJunctionsMultiExon(self):
gtf = GTF(fileobj=StringIO.StringIO('''\
test1|test|exon|30|40|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|50|70|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|80|82|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
'''.replace('|', '\t')), quiet=True)
valid = '''\
>test1:36-40,49-53
GATCCTGA
>test1:36-40,79-82,89-93
GATCACTTCGA
>test1:36-40,89-93
GATCTCGA
>test1:66-70,79-82,89-93
ATCGACTTCGA
>test1:66-70,89-93
ATCGTCGA
'''
out = StringIO.StringIO('')
ngsutils.gtf.junctions.gtf_junctions(gtf, fa, fragment_size=4, min_size=8, out=out, quiet=True)
self.assertEqual(out.getvalue(), valid)
def testJunctionsIsoforms(self):
gtf = GTF(fileobj=StringIO.StringIO('''\
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|30|40|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
test1|test|exon|50|70|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
'''.replace('|', '\t')), quiet=True)
valid = '''\
>test1:16-20,29-33
ATGCGCGC
>test1:16-20,49-53
ATGCCTGA
>test1:16-20,89-93
ATGCTCGA
>test1:36-40,49-53
GATCCTGA
>test1:36-40,89-93
GATCTCGA
>test1:66-70,89-93
ATCGTCGA
'''
out = StringIO.StringIO('')
ngsutils.gtf.junctions.gtf_junctions(gtf, fa, fragment_size=4, min_size=8, out=out, quiet=True)
self.assertEqual(out.getvalue(), valid)
def testJunctionsIsoformsKnown(self):
gtf = GTF(fileobj=StringIO.StringIO('''\
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|30|40|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
test1|test|exon|50|70|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
'''.replace('|', '\t')), quiet=True)
valid = '''\
>test1:16-20,29-33
ATGCGCGC
>test1:36-40,89-93
GATCTCGA
>test1:16-20,49-53
ATGCCTGA
>test1:66-70,89-93
ATCGTCGA
'''
out = StringIO.StringIO('')
ngsutils.gtf.junctions.gtf_junctions(gtf, fa, fragment_size=4, min_size=8, known=True, out=out, quiet=True)
self.assertEqual(out.getvalue(), valid)
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false
| false
| 4,359
|
py
| 126
|
test_junctions.py
| 121
| 0.663455
| 0.543014
| 0
| 136
| 31.051471
| 115
|
liqing9399/data_struct
| 17,549,236,401,397
|
08d5a33aa12be84247845b629fd8808078168900
|
d8f973a12bde4d0e000d66e61d46200cb1d8a8a1
|
/feature_test/tpybind11/t.py
|
e735d20a38644a3d71c2f2703034efe06073ba3d
|
[] |
no_license
|
https://github.com/liqing9399/data_struct
|
2103d184ed4d95e0c8a2babfdbaf06ab412b0cd2
|
d2cfe7a8d3af51e578d6bd8befc31323415b3776
|
refs/heads/master
| 2023-09-05T23:34:22.506292
| 2023-08-21T09:49:37
| 2023-08-21T09:49:37
| 157,897,147
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#######p############################################
#filename : t.py
#author : litao
#e-mail : 362085095@qq.com
#create time : 2023-05-23 17:43:04
#last modified : 2023-05-23 17:43:04
####################################################
#!/usr/bin/env python
import emp
emp.add(1,1)
emp.mul(2,3)
p = emp.pet("pag")
n = p.getName()
print(n)
print(p)
|
UTF-8
|
Python
| false
| false
| 372
|
py
| 50
|
t.py
| 40
| 0.438172
| 0.327957
| 0
| 16
| 22.25
| 52
|
bpeterso2000/jsonjoin
| 11,493,332,521,238
|
16d6f9e39fb9f224457b02dd4f27f1f334d64ec5
|
07b017b57cd5a5c17ac6773334209a12cef6e442
|
/jsonjoin/core.py
|
3273c9bdd99b1b2d275143202de10295d20c9a9b
|
[
"MIT"
] |
permissive
|
https://github.com/bpeterso2000/jsonjoin
|
68fcef8d164aac3feccb6c2828218bcd0607ce77
|
4a72d2660a641d904deab4d65934fa11be49beaa
|
refs/heads/master
| 2021-01-19T11:49:17.683228
| 2017-04-19T00:04:43
| 2017-04-19T00:04:43
| 87,998,574
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Core code.
INNER JOIN LEFT JOIN RIGHT JOIN OUTER JOIN SYMMETRIC JOIN
+---+--+---+ +---+--+---+ +---+--+---+ +---+--+---+ +---+--+---+
| :**: | |***:**: | | :**:***| |***:**:***| |***| |***|
| L :**: R | |*L*:**: R | | L :**:*R*| |*L*:**:*R*| |*L*| |*R*|
| :**: | |***:**: | | :**:***| |***:**:***| |***| |***|
+---+--+---+ +---+--+---+ +---+--+---+ +---+--+---+ +---+--+---+
"""
from jsoncut.core import get_rootkey, select_key
from jsoncut.tokenizer import parse_keystr
from jsoncut.sequencer import Items
JOIN_FUNCTS = {
'inner': lambda a, b: a & b,
'left': lambda a, b: a,
'right': lambda a, b: b,
'outer': lambda a, b: a | b,
'symmetric': lambda a, b: a ^ b
}
def join_data(d, keys, type_='inner'):
"""Join the two data sequences using the specified join type.
Args:
data (Item, Item): a 2d tuple of data sequences.
keys (str, str): a 2d tuple of primary key lists used for join)
jointype (str): 'inner', 'left', 'right', 'outer' or 'symmetric'.
"""
values = tuple({select_key(k, *keys[i][0]): k for k in j.items}
for i, j in enumerate(d))
keys = JOIN_FUNCTS[type_](set(values[0]), set(values[1]))
if type_ == 'right':
values = tuple(reversed(values))
d[0].items = [{**values[0].get(i, {}), **values[1].get(i, {})}
for i in keys]
return d[0].value
def join_(left, right, key, rgtkey=None, root=None, rgtroot=None,
jointype='inner', fullscan=False, quotechar='"'):
"""The hub/core of JSON join.
Args:
data (obj, obj): a 2d tuple of JSON encodable objects.
keys (str, str): a 2d tuple of primary keys used for join (JSON keys.)
rootkeys (str, str): set the root of the object (JSON Key.)
jointype (str): 'Inner', 'Left', 'Right', 'Outer' or 'Symmetric'.
listkeys (bool): enumerated, sorted list all unique JSON Keys.
inspect (bool): sorted list of all unique JSON Keys.
fullpath (bool): used with get*; include the full key name path.
fullscan (bool): don't skip previously visited JSON Keys.
quotechar (str): the quote charcter used around JSON Keys.
"""
quote = quotechar
data = [left, right]
keys = [key, key] if not rgtkey else [key, rgtkey]
roots = ([root, root] if not rgtroot else [root, rgtroot])
for i, d in enumerate(data):
if roots[i]:
keylist = parse_keystr(roots[i], data[i], quote, None, fullscan)
data[i] = get_rootkey(data[i], *keylist[0])
data[i] = Items(d)
keys[i] = parse_keystr(keys[i], data[i].items, quote, None, fullscan)
return join_data(data, keys, jointype)
|
UTF-8
|
Python
| false
| false
| 2,742
|
py
| 6
|
core.py
| 4
| 0.525894
| 0.521517
| 0
| 67
| 39.925373
| 78
|
xeroborn/Intro-to-Programming-Quiz-Project
| 18,708,877,543,333
|
07de6c1788ef312c176b16a7819308452fb8d8d8
|
2dfcbb819f1d6fc0693b2de69595c04dab19566b
|
/quizv3.py
|
f17a64fb7458efcf47a596f39e88c4ec76c0ed80
|
[] |
no_license
|
https://github.com/xeroborn/Intro-to-Programming-Quiz-Project
|
3ee23fdd5e3cd38360a62c5d305a16fb7976a1aa
|
7cdac6f4b15f7a668263b6fc0ec5dc00946fbcf5
|
refs/heads/master
| 2020-06-16T06:13:44.525188
| 2017-03-28T14:49:25
| 2017-03-28T14:49:25
| 75,239,138
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#Quiz Game Project
#Michael Phillips
easy_questions = ["A ________ is a pointer to an object.", "_________, or procedures, contain code that is likely to be reused.",
"______ are passed to a function.", "_______ are returned from a function."]
med_questions = ["A ___ loop will iterate through through an entire list object.", "A _____ loop will run only when certain conditions are met.",
"3. A ____ is used to hold any number of objects, and can be accesed by using an index.", "_________ is the process of finding and fixing errors in code."]
hard_questions = ["The ___ operator is another way to pass parameters to a function.", "__________ are passed into a function.",
"____ cases are used to verify code is working as intended.", "Proper ___________ is required for python code to compile."]
easy_answers = ["variable", "functions", "inputs", "outputs"]
med_answers = ["for", "while", "list", "debugging"]
hard_answers = ["dot", "parameters", "test", "indentation"]
easy_sentences = ["A VARIABLE is a pointer to an object.", "FUNCTIONS, or procedures, contain code that is likely to be reused.",
"INPUTS are passed to a function.", "OUTPUTS are returned from a function."]
med_sentences = ["A FOR loop will iterate through through an entire list object.", "A WHILE loop will run only when certain conditions are met.",
"A LIST is used to hold any number of objects, and can be accesed by using an index.", "DEBUGGING is the process of finding and fixing errors in code."]
hard_sentences = ["The DOT operator is another way to pass parameters to a function.", "PARAMETERS are passed into a function.",
"TEST cases are used to verify code is working as intended.", "Proper INDENTATION is required for python code to compile."]
#Main game flow, including introduction, leads to user choice on # of guesses, level select, and the question/input function
def game_flow():
print ""
print "Hello, welcome to an Introduction to Programming Quiz on Key Topics!"
print ""
print "Type in the correct response when prompted to advance further in the quiz."
guesses_left = num_guesses()
diff_level = level_select()
print ""
print "Type in the correct response when prompted to advance further in the quiz."
if diff_level == 1:
ask_question(easy_questions, easy_answers, easy_sentences, guesses_left)
if diff_level == 2:
ask_question(med_questions, med_answers, med_sentences, guesses_left)
if diff_level == 3:
ask_question(hard_questions, hard_answers, hard_sentences, guesses_left)
#User chooses how many guesses they will have before they lose.
def num_guesses():
print ""
print "How many guesses do you think you need before the game will end?"
while True:
try:
chances = int(raw_input("Enter a number between 1 and 5: "))
if 1 <= chances <= 5:
return chances
print "Please enter a number between 1 and 5."
except ValueError:
print ""
print "Please enter a number between 1 and 5."
print ""
#User picks their difficulty level
def level_select():
print ""
print "Level Select: Enter 1 for EASY difficulty, 2 for MEDIUM, or 3 for HARD."
while True:
try:
level_input = int(raw_input("Enter 1, 2, or 3: "))
if level_input == 1:
print "You chose Easy!"
return level_input
if level_input == 2:
print "You chose Medium!"
return level_input
if level_input == 3:
print "You chose Hard!"
return level_input
print ""
print "Please enter a number between 1 and 3."
except ValueError:
print ""
print "Please enter a number between 1 and 3."
#Checks whether or not the user is out of guesses based on the value they chose.
def check_guesses(guesses_left):
total_guesses = guesses_left
if total_guesses > 1:
print ""
print "You have " + str(guesses_left) + " guesses left."
elif total_guesses == 1:
print ""
print "You have " + str(guesses_left) + " guess left."
elif total_guesses == 0:
print ""
print "You lost. Type 1 to play again, or 2 to quit the game."
print ""
end_game()
return
#Asks the user a quiz question, checks the answer.
def ask_question(list_of_questions, list_of_answers, sentence_list, guesses_left):
total_questions = len(list_of_questions)
question_index = 0
while question_index < total_questions:
check_guesses(guesses_left)
print ""
print list_of_questions[question_index]
print ""
user_answer = (raw_input("Type in the answer: ").lower())
if list_of_answers[question_index] in user_answer:
print_right_answer(sentence_list, question_index)
print "Great job!"
question_index += 1
else:
guesses_left -= 1
print ""
print "Congrats! You won! To return to level select enter '1'. To quit the game, type '2'"
print ""
end_game()
#Prints the complete quiz question if the user enters the answer.
def print_right_answer(sentence_list, question_index):
print ""
print sentence_list[question_index]
print ""
return
def end_game():
'''User can chose whether to restart the game or to quit the program'''
while True:
try:
game_end_input = int(raw_input("1 to play again, 2 to quit: "))
if game_end_input == 1:
game_flow()
if game_end_input == 2:
exit()
print "Please enter 1 or 2."
except ValueError:
print ""
print "Please enter 1 or 2."
print ""
game_flow()
|
UTF-8
|
Python
| false
| false
| 5,281
|
py
| 2
|
quizv3.py
| 1
| 0.689263
| 0.681121
| 0
| 141
| 36.460993
| 155
|
dan22m/friendly_telegram
| 7,524,782,722,077
|
7a887ecef88e20a926874a1dde2302260b4c8ff0
|
482a314311240e2ca3b5326b908d532d0f41b2dd
|
/BackupManUA.py
|
e26298554001f0f41b16805defd16340d12b4301
|
[] |
no_license
|
https://github.com/dan22m/friendly_telegram
|
da3fcba5a542ced34e73614bec4c0c336edcf621
|
dbbffb2301829b4bf2b762ff760c5c20a37450ad
|
refs/heads/main
| 2023-08-19T08:20:26.600876
| 2021-09-27T12:58:01
| 2021-09-27T12:58:01
| 410,839,927
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# Coded by D4n13l3k00 #
# t.me/D4n13l3k00 #
# This code under AGPL-3.0 #
# translated to UA from t.me/Daniel_Maklein
import ast
import io
from .. import loader, utils
@loader.tds
class BackupManMod(loader.Module):
"""BackupMan"""
strings = {'name': 'BackupManUA'}
async def client_ready(self, client, db):
self._db = db
@loader.owner
async def restmcmd(self, m):
"Установить все модули из *.bkm файла"
reply = await m.get_reply_message()
if not reply or not reply.file or reply.file.name.split('.')[-1] != "bkm":
return await m.edit("<b>[BackupMan]</b> Reply to <code>*.bkm</code> file")
modules = self._db.get(
"friendly-telegram.modules.loader", "loaded_modules", [])
txt = io.BytesIO(await reply.download_media(bytes))
valid, already_loaded = 0, 0
for i in txt.read().decode('utf-8').split("\n"):
if i not in modules:
valid += 1
modules.append(i)
else:
already_loaded += 1
self._db.set("friendly-telegram.modules.loader",
"loaded_modules", modules)
await m.edit(f"<b>[BackupMan]</b>\n\n<i>Загружено модулів:</i> <code>{valid}</code>\n<i>Загружено раніше:</i> <code>{already_loaded}</code>\n\n" + ("<b>> Юзербот автоматично перезагрузиться</b>" if valid != 0 else "<b>> Нічого не загружено</b>"))
if valid != 0:
await self.allmodules.commands["restart"](await m.respond("_"))
@loader.owner
async def backmcmd(self, m):
"Зробити бекап модулів в *.bkm файл"
modules = self._db.get(
"friendly-telegram.modules.loader", "loaded_modules", [])
txt = io.BytesIO("\n".join(modules).encode('utf-8'))
txt.name = "BackupMan-{}.bkm".format(str((await m.client.get_me()).id))
await m.client.send_file(m.to_id, txt, caption=f"<b>[BackupMan]</b> <i>Бекап модулів</i>\n<i>Модулів:</i> <code>{len(modules)}</code>\n<i>Для загрузки бекапа використовуй модуль модуль:</i>\n<code>.dlmod https://raw.githubusercontent.com/dan22m/friendly_telegram/main/BackupManUA.py</code>")
await m.delete()
@loader.owner
async def restncmd(self, m):
"Встановити всі замітки з *.bkn файлу\n<f> - Заміняти вуже існуючі замітки"
args: list or None = utils.get_args_raw(m)
force = False
if "f" in args.lower():
force = True
reply = await m.get_reply_message()
if not reply or not reply.file or reply.file.name.split('.')[-1] != "bkn":
return await m.edit("<b>[BackupMan]</b> Reply to <code>*.bkn</code> file")
notes = self._db.get("friendly-telegram.modules.notes", "notes", {})
txt = io.BytesIO(await reply.download_media(bytes))
valid, already_loaded = 0, 0
for k, v in ast.literal_eval(txt.read().decode('utf-8')).items():
if k not in notes or force:
notes[k] = v
valid += 1
else:
already_loaded += 1
self._db.set("friendly-telegram.modules.notes", "notes", notes)
await m.edit(f"<b>[BackupMan]</b>\n\n<i>Загружено/замінено заміток:</i> <code>{valid}</code>\n<i>Загружені раніше:</i> <code>{already_loaded}</code>")
@loader.owner
async def backncmd(self, m):
"Зробити бекап заміток в *.bkn файл"
modules = self._db.get("friendly-telegram.modules.notes", "notes", {})
txt = io.BytesIO(str(modules).encode('utf-8'))
txt.name = "BackupMan-{}.bkn".format(str((await m.client.get_me()).id))
await m.client.send_file(m.to_id, txt, caption=f"<b>[BackupMan]</b> <i>Бекап заміток</i>\n<i>Заміток:</i> <code>{len(modules)}</code>\n<i>Для загрузки бекапу використовуй модуль:</i>\n<code>.dlmod https://raw.githubusercontent.com/dan22m/friendly_telegram/main/BackupManUA.py</code>")
await m.delete()
|
UTF-8
|
Python
| false
| false
| 4,377
|
py
| 3
|
BackupManUA.py
| 3
| 0.58352
| 0.575056
| 0
| 80
| 48.2125
| 299
|
Ioana-Ionel/server-client
| 7,138,235,686,567
|
e34182505b727046e9b65a2167dac7c13b96301d
|
750c9b91d708f0b8932fc6b74150e72fd55fb07d
|
/server.py
|
85a8f97ec7a216d8ad75533f78ca9be6c3ac80ac
|
[] |
no_license
|
https://github.com/Ioana-Ionel/server-client
|
5ab6613543e513b1d41ea26ef35f9b0327408e62
|
eb3738472bfc8af4ac5c6c14c38958e389e3bf88
|
refs/heads/master
| 2020-04-05T13:48:36.492163
| 2018-11-09T19:47:21
| 2018-11-09T19:47:21
| 156,910,515
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import socket
# from __future__ import print_function
def prime_numbers(a):
if a is 0 or a is 1:
return False
for d in range(2, a/2):
if a % d == 0:
return False
return True
def server_program():
host = socket.gethostname()
port = 5000
# initiate connection
server_socket=socket.socket()
# bind host address and port together
server_socket.bind((host, port))
# set how many clients the server can have at the same time
server_socket.listen(3)
conn, address = server_socket.accept()
print ('Connection from ', str(address))
while True:
# the data received from the client
data = conn.recv(1025).decode()
if not data:
break
print ('from connected user: ' + str(data))
data = int(data)
print('the number is ' + str(data) + '. Is it prime? ' + str(prime_numbers(data)))
if prime_numbers(data) is True:
data = 'The number is prime'
else:
data = 'The number is not prime'
# send data to the client
conn.send(data.encode())
conn.close()
if __name__ == '__main__':
server_program()
|
UTF-8
|
Python
| false
| false
| 1,187
|
py
| 2
|
server.py
| 2
| 0.579612
| 0.567818
| 0
| 42
| 27.285714
| 91
|
grey-swan/furniture
| 6,828,998,030,604
|
6c782c28d6dc466a2bf9a3ed26d0a0373461e0cd
|
f59019a50bdf13324b46853215745c97c7e149b7
|
/product/models.py
|
3f7161b43a746c8767913a065426ed5786f79c04
|
[] |
no_license
|
https://github.com/grey-swan/furniture
|
70cd608c9dac2c32244bb18982764bc5194d40ea
|
96efde9053cffa0da4a129de4601f3679f91d8fa
|
refs/heads/master
| 2022-12-08T16:34:59.179270
| 2019-10-07T09:02:37
| 2019-10-07T09:02:37
| 201,612,028
| 0
| 0
| null | false
| 2022-07-05T21:34:07
| 2019-08-10T09:58:54
| 2019-10-07T09:02:45
| 2022-07-05T21:34:03
| 1,077
| 0
| 0
| 9
|
Python
| false
| false
|
from django.db import models
from django.contrib.auth.models import User
# class Category(models.Model):
# """商品分类"""
#
# name = models.CharField(max_length=32, verbose_name='名称')
# alias = models.CharField(max_length=32, verbose_name='别名')
#
# def __str__(self):
# return self.name
#
#
# class Product(models.Model):
# """商品"""
#
# title = models.CharField(max_length=64, verbose_name='标题')
# subtitle = models.CharField(max_length=128, verbose_name='副标题')
# price = models.IntegerField(verbose_name='价格')
# content = models.TextField(verbose_name='详情')
# updated = models.DateTimeField(auto_now=True, verbose_name='更新时间')
# created = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
#
# category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name='product', verbose_name='分类')
# user = models.ForeignKey(User, related_name='product', on_delete=models.CASCADE, verbose_name='创建人')
#
# def __str__(self):
# return self.title
#
#
# class Case(models.Model):
# """案例"""
#
# title = models.CharField(max_length=64, verbose_name='标题')
# subtitle = models.CharField(max_length=128, verbose_name='副标题')
# price = models.IntegerField(verbose_name='价格')
# content = models.TextField(verbose_name='详情')
# updated = models.DateTimeField(auto_now=True, verbose_name='更新时间')
# created = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
#
# user = models.ForeignKey(User, related_name='case', on_delete=models.CASCADE, verbose_name='创建人')
#
# def __str__(self):
# return self.title
#
#
# class Dw(models.Model):
# """全屋软装搭配"""
#
#
# class Designer(models.Model):
# """设计师"""
#
# name = models.CharField(max_length=32, verbose_name='姓名')
# position = models.CharField(max_length=16, verbose_name='职称')
# avatar = models.URLField(verbose_name='头像')
# desc = models.TextField(verbose_name='个人简介')
#
#
# class Banner(models.Model):
# """轮播列表"""
#
# # type_list = ((0, '首页轮播1'), (1, '首页轮播2'), (2, '商品轮播'))
#
# title = models.CharField(max_length=64, verbose_name='标题')
# url = models.URLField(verbose_name='图片地址')
# sort_order = models.IntegerField(verbose_name='排序')
# # types = models.IntegerField(verbose_name='类型', choices=type_list)
#
# product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='banner',
# verbose_name='商品')
#
#
# class Cart(models.Model):
# """搭配间-只针对product"""
#
# product = models.ForeignKey(Product, related_name='cart', on_delete=models.CASCADE, verbose_name='所选商品')
# user = models.ForeignKey(User, related_name='cart', on_delete=models.CASCADE, verbose_name='创建人')
#
#
# class Order(models.Model):
# """订单"""
#
# status_list = ((0, '未付款'), (1, '已完成'), (2, '已取消'))
# type_list = ((0, '全屋软装搭配-设计'), (1, '获取报价-家具'), (2, '我想这样搭-案例'), (3, '预约设计师-设计师'))
#
# order_id = models.IntegerField(verbose_name='订单号')
# status = models.IntegerField(verbose_name='订单状态', choices=status_list)
# types = models.IntegerField(verbose_name='类型', choices=type_list)
#
# name = models.CharField(max_length=16, verbose_name='客户姓名')
# address = models.CharField(max_length=64, verbose_name='收货地址')
# phone = models.CharField(max_length=16, verbose_name='联系电话')
# community = models.CharField(max_length=64, verbose_name='小区', null=True, blank=True)
#
# updated = models.DateTimeField(auto_now=True, verbose_name='修改时间')
# created = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
#
# user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='order', verbose_name='创建人')
#
#
# class OrderProduct(models.Model):
# """家具订单详情"""
#
# category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name='order_product',
# verbose_name='商品分类')
# product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='order_product',
# verbose_name='所选商品')
# order = models.ForeignKey(Order, on_delete=models.CASCADE, related_name='order_product',
# verbose_name='所属订单')
#
#
# class OrderCase(models.Model):
# """案例订单详情"""
#
# case = models.ForeignKey(Case, on_delete=models.CASCADE, related_name='order_case',
# verbose_name='所选案例')
# order = models.ForeignKey(Order, on_delete=models.CASCADE, related_name='order_case',
# verbose_name='所属订单')
#
#
# class OrderDesigner(models.Model):
# """用户选择的设计师订单详情"""
#
# designer = models.ForeignKey(Designer, on_delete=models.CASCADE, related_name='order_designer',
# verbose_name='所选设计师')
# order = models.ForeignKey(Order, on_delete=models.CASCADE, related_name='order_designer',
# verbose_name='所属订单')
#
#
# class OrderDw(models.Model):
# """全屋软装订单详情"""
|
UTF-8
|
Python
| false
| false
| 5,505
|
py
| 25
|
models.py
| 21
| 0.625125
| 0.617136
| 0
| 133
| 36.639098
| 113
|
bdaskalov/pyhash
| 3,667,902,101,404
|
37c374e992d0f733dd71f8adae5e7f4b64e2803d
|
a5468219e8fa3b8063ffd4d544b0f920ff674028
|
/pyhash.py
|
61a04e19495715d5a346f2de9d5c356d81ea751b
|
[] |
no_license
|
https://github.com/bdaskalov/pyhash
|
948644f6588524407506ddd32fd52e2db50c5751
|
8de688af2aa1924c57b0f7e3bbffb5399e274be2
|
refs/heads/master
| 2020-03-21T08:34:11.340658
| 2018-06-26T15:10:03
| 2018-06-26T15:10:03
| 138,351,491
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import pyhash_ext
import collections
class SparseHashMap(collections.MutableMapping):
def __init__(self, key_type='L', value_type='O'):
self.store = pyhash_ext.IIMap(key_type, value_type)
def __getitem__(self, key):
return self.store[key]
def __setitem__(self, key, value):
self.store[key] = value
def __delitem__(self, key):
del self.store[key]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
|
UTF-8
|
Python
| false
| false
| 509
|
py
| 4
|
pyhash.py
| 4
| 0.603143
| 0.603143
| 0
| 21
| 23.285714
| 59
|
etinaude/130A1
| 3,599,182,633,938
|
f53b0cf567143ab83632ca88fa7e8ccbc6e44390
|
396c9ecbd8abef9dfb0b70554bd82c93bee4d8d1
|
/A1GUI.py
|
c3d3d22db3e0a2156b5ecf377753e85db377d9d2
|
[
"MIT"
] |
permissive
|
https://github.com/etinaude/130A1
|
b89d7a11af276989a73ea793ff1d8edda47942a0
|
5026ef80f2a23aaf53dfbb0e5d744dfd613aa708
|
refs/heads/master
| 2022-02-17T11:19:21.085220
| 2019-09-11T11:22:50
| 2019-09-11T11:22:50
| 207,794,442
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import tkinter as tk
from tkinter import ttk
from A1 import A1
from Histogram import Histogram
from Table import Table
import sys
data2=[]
def redirector(inputStr):
textbox.insert(tk.END, inputStr)
def load_file():
data = A1(entry_file.get())
print(data.get_statistics())
def click():
pass #modify this
if comboData.current() == 0:
# get sum
my_unit = 100
elif comboData.current() == 1:
# get freq
my_unit = 1
else:
#get average
my_unit = 10
if comboOutput.current() == 0:
#create table
pass
elif comboOutput.current() == 1:
#create histogram (horizontal)
pass
else:
#create histogram (vertical)
pass
app = tk.Tk()
app.title("COMPSCI130 A1")
app.geometry('900x500')
label1 = tk.Label(app, text = "File:")
label1.grid(column=0, row=0)
entry_file = tk.Entry(app)
entry_file.grid(column=1, row=0)
entry_file.insert(tk.END, 'trace40.txt')
data = A1(entry_file.get())
load_file_button = ttk.Button(app, text="Load", command=load_file)
load_file_button.grid(column=2, row=0)
entry_ip = tk.Entry(app)
entry_ip.grid(column=3, row=0)
entry_ip.insert(tk.END, '192.168.0.24')
label2 = tk.Label(app, text = "Choose type:")
label2.grid(column=4, row=0)
comboData = ttk.Combobox(app, values=["Sum","Freq", "Average"])
comboData.grid(column=5, row=0)
comboData.current(0)
label3 = tk.Label(app, text = "Choose output:")
label3.grid(column=6, row=0)
comboOutput = ttk.Combobox(app, values=["Table", "H Histogram", "V Histogram"])
comboOutput.grid(column=7, row=0)
comboOutput.current(1)
action = ttk.Button(app, text="Run", command=click)
action.grid(column=8,row=0)
textbox = tk.Text(app)
textbox.grid(column=0, row=1, columnspan=9,padx=2, pady=2)
sys.stdout.write = redirector
app.mainloop()
|
UTF-8
|
Python
| false
| false
| 1,819
|
py
| 7
|
A1GUI.py
| 7
| 0.664651
| 0.627817
| 0
| 77
| 21.597403
| 80
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.