repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Greenleaf88/MDRSteg-large-capacity-image-steganography-based-on-multi-scale-dilated-ResNet-and-combined-chi-squ
| 16,303,695,877,752
|
76019b1d462d55a7f7e6fd06db4ee609611d11ea
|
db3fbb7f5dda676b36b1d8db5d6a613ec7192dad
|
/SingleSizeModel.py
|
1a37efe350a78e2f3945d6ca9b23e7dd0b1de9d7
|
[
"MIT"
] |
permissive
|
https://github.com/Greenleaf88/MDRSteg-large-capacity-image-steganography-based-on-multi-scale-dilated-ResNet-and-combined-chi-squ
|
edf23fe541baf71ad0072059d5e2085fceb86133
|
a80a73fb1f104e530168319bc0049878a38ed76a
|
refs/heads/main
| 2023-05-12T13:51:32.625426
| 2021-06-03T02:50:18
| 2021-06-03T02:50:18
| 373,348,246
| 4
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import os
import time
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import numpy as np
# import cv2
import tensorflow as tf
from tensorflow.data import Iterator
from Dataset import SegDataLoader, VocRgbDataLoader, VocDataLoader, LfwRgbDataLoader, ImageNetRgbDataLoader
from visulize import save_test_images
from utils import rgb2yuv_tf, yuv2rgb_tf
from model import Discriminator, encode_net, decode_net
from ResNet import resnet_nopooling
class Model():
def __init__(self):
self.run_time = time.strftime("%m%d-%H%M")
# self.learning_rate = 0.0001
self.starter_learning_rate = 0.001
self.epoches = 70
self.log_path = 'logs/'+self.run_time + '/'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.InteractiveSession(config=config)
self.secret_tensor = tf.placeholder(shape=[None, 256, 256, 3], dtype=tf.float32, name="secret_tensor")
self.cover_tensor = tf.placeholder(shape=[None, 256, 256, 3], dtype=tf.float32, name="cover_tensor")
self.cover_yuv = rgb2yuv_tf(self.cover_tensor)
self.secret_yuv = rgb2yuv_tf(self.secret_tensor)
self.global_step_tensor = tf.Variable(0, trainable=False, name='global_step')
# self.test_op = self.prepare_test_graph(self.secret_tensor, self.cover_tensor)
def get_hiding_network_op(self, cover_tensor, secret_tensor, is_training):
concat_input = tf.concat([cover_tensor, secret_tensor], axis=-1, name='images_features_concat')
# output = resnet_nopooling(concat_input, name='encode', n_class=3, dilate=[2,4,8,16], is_training=is_training)
output = resnet_nopooling(concat_input, name='encode', n_class=3, is_training=is_training)
return output
def get_reveal_network_op(self, container_tensor, is_training):
output = resnet_nopooling(container_tensor, name='decode', n_class=3, is_training=is_training)
return output
def get_noise_layer_op(self,tensor,std=.1):
# with tf.variable_scope("noise_layer"):
# return tensor + tf.random_normal(shape=tf.shape(tensor), mean=0.0, stddev=std, dtype=tf.float32)
return tensor
def get_loss_op(self,secret_true,secret_pred,cover_true,cover_pred):
# D_real_secret = Discriminator(secret_true)
# D_fake_secret = Discriminator(secret_pred, reusing=True)
# D_real = Discriminator(cover_true, reusing=True)
# D_fake = Discriminator(cover_pred, reusing=True)
# D_real_secret = Discriminator(secret_true, name='secret', reusing=False)
# D_fake_secret = Discriminator(secret_pred, name='secret', reusing=True)
# D_real = Discriminator(cover_true, name='cover', reusing=False)
# D_fake = Discriminator(cover_pred, name='cover', reusing=True)
#
# D_real = tf.concat([D_real, D_real_secret], axis=0, name='gan_true_concat')
# D_fake = tf.concat([D_fake, D_fake_secret], axis=0, name='gan_pred_concat')
#
# D_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real, labels=tf.ones_like(D_real)) + tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.zeros_like(D_fake)))
# G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.ones_like(D_fake)))
with tf.variable_scope("huber_losses"):
# secret_mse = tf.losses.mean_squared_error(secret_true,secret_pred)
# cover_mse = tf.losses.mean_squared_error(cover_true,cover_pred)
# secret_mse = tf.reduce_mean(tf.losses.huber_loss(secret_true, secret_pred, delta=0.5))
# cover_mse = tf.reduce_mean(tf.losses.huber_loss(cover_true, cover_pred, delta=0.5))
secret_mse = tf.reduce_mean(tf.losses.absolute_difference(secret_true, secret_pred))
cover_mse = tf.reduce_mean(tf.losses.absolute_difference(cover_true, cover_pred))
with tf.variable_scope("ssim_losses"):
#secret_ssim = 1. - tf.reduce_mean(tf.image.ssim(secret_true, secret_pred, max_val=1.0))
#cover_ssim = 1. - tf.reduce_mean(tf.image.ssim(cover_true, cover_pred, max_val=1.0))
secret_ssim = 1. - (tf.reduce_mean(tf.image.ssim(secret_true[:,:,:,:1],secret_pred[:,:,:,:1], max_val=1.0)) + tf.reduce_mean(tf.image.ssim(secret_true[:,:,:,1:2],secret_pred[:,:,:,1:2], max_val=1.0)) + tf.reduce_mean(tf.image.ssim(secret_true[:,:,:,2:],secret_pred[...,2:], max_val=1.0)))/3.
cover_ssim = 1. - (tf.reduce_mean(tf.image.ssim(cover_true[:,:,:,:1],cover_pred[:,:,:,:1], max_val=1.0)) + tf.reduce_mean(tf.image.ssim(cover_true[:,:,:,1:2],cover_pred[:,:,:,1:2], max_val=1.0)) + tf.reduce_mean(tf.image.ssim(cover_true[:,:,:,2:],cover_pred[:,:,:,2:], max_val=1.0)))/3.
# D_final_loss = cover_mse + secret_mse + secret_ssim + cover_ssim + D_loss
# D_final_loss = D_loss
G_final_loss = 5*cover_mse + 5*secret_mse + secret_ssim + cover_ssim
# G_final_loss = cover_mse + secret_mse + secret_ssim + cover_ssim
# return D_final_loss, G_final_loss, D_loss, G_loss, secret_mse, cover_mse, secret_ssim, cover_ssim
return G_final_loss, secret_mse, cover_mse, secret_ssim, cover_ssim
def get_tensor_to_img_op(self,tensor):
with tf.variable_scope("",reuse=True):
# t = tensor*tf.convert_to_tensor([0.229, 0.224, 0.225]) + tf.convert_to_tensor([0.485, 0.456, 0.406])
tensor = yuv2rgb_tf(tensor)
return tf.clip_by_value(tensor,0,1)
# return tf.clip_by_value(tensor,0,255)
def prepare_training_graph(self,secret_tensor,cover_tensor,global_step_tensor):
hidden = self.get_hiding_network_op(cover_tensor=cover_tensor, secret_tensor=secret_tensor, is_training=True)
reveal_output_op = self.get_reveal_network_op(hidden, is_training=True)
G_final_loss, secret_mse, cover_mse, secret_ssim, cover_ssim = self.get_loss_op(secret_tensor,reveal_output_op,cover_tensor,hidden)
global_variables = tf.global_variables()
gan_varlist = [i for i in global_variables if i.name.startswith('Discriminator')]
en_de_code_varlist = [i for i in global_variables if i not in gan_varlist]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# train_op = optimiser.minimize(loss, global_step=global_step)
# D_minimize_op = tf.train.AdamOptimizer(self.learning_rate).minimize(D_final_loss, var_list=gan_varlist, global_step=global_step_tensor)
G_minimize_op = tf.train.AdamOptimizer(self.learning_rate).minimize(G_final_loss, var_list=en_de_code_varlist, global_step=global_step_tensor)
# G_minimize_op = tf.train.AdamOptimizer(self.learning_rate).minimize(G_final_loss, global_step=global_step_tensor)
# tf.summary.scalar('D_loss', D_final_loss,family='train')
tf.summary.scalar('G_loss', G_final_loss,family='train')
tf.summary.scalar('secret_mse', secret_mse,family='train')
tf.summary.scalar('cover_mse', cover_mse,family='train')
tf.summary.scalar('learning_rate', self.learning_rate,family='train')
tf.summary.scalar('secret_ssim', secret_ssim)
tf.summary.scalar('cover_ssim', cover_ssim)
tf.summary.image('secret',self.get_tensor_to_img_op(secret_tensor),max_outputs=1,family='train')
tf.summary.image('cover',self.get_tensor_to_img_op(cover_tensor),max_outputs=1,family='train')
tf.summary.image('hidden',self.get_tensor_to_img_op(hidden),max_outputs=1,family='train')
# tf.summary.image('hidden_noisy',self.get_tensor_to_img_op(noise_add_op),max_outputs=1,family='train')
tf.summary.image('revealed',self.get_tensor_to_img_op(reveal_output_op),max_outputs=1,family='train')
merged_summary_op = tf.summary.merge_all()
return G_minimize_op, G_final_loss, merged_summary_op, secret_mse,cover_mse, secret_ssim, cover_ssim
def prepare_test_graph(self,secret_tensor,cover_tensor):
# y_output, hiding_output_op = self.get_hiding_network_op(cover_tensor=cover_tensor,secret_tensor=secret_tensor, is_training=True)
hidden = self.get_hiding_network_op(cover_tensor=cover_tensor,secret_tensor=secret_tensor, is_training=False)
# reveal_output_op = self.get_reveal_network_op(y_output, is_training=True)
reveal_output_op = self.get_reveal_network_op(hidden, is_training=False)
G_final_loss, secret_mse, cover_mse, secret_ssim, cover_ssim = self.get_loss_op(secret_tensor,reveal_output_op,cover_tensor,hidden)
# tf.summary.scalar('loss', loss_op,family='test')
# tf.summary.scalar('reveal_net_loss', secret_loss_op,family='test')
# tf.summary.scalar('cover_net_loss', cover_loss_op,family='test')
#
# tf.summary.image('secret',self.get_tensor_to_img_op(secret_tensor),max_outputs=1,family='test')
# tf.summary.image('cover',self.get_tensor_to_img_op(cover_tensor),max_outputs=1,family='test')
# tf.summary.image('hidden',self.get_tensor_to_img_op(hiding_output_op),max_outputs=1,family='test')
# tf.summary.image('revealed',self.get_tensor_to_img_op(reveal_output_op),max_outputs=1,family='test')
# merged_summary_op = tf.summary.merge_all()
return hidden, reveal_output_op, G_final_loss, secret_mse, cover_mse, secret_ssim, cover_ssim
def save_chkp(self,path):
global_step = self.sess.run(self.global_step_tensor)
self.saver.save(self.sess,path,global_step)
def load_chkp(self,path):
self.saver.restore(self.sess,path)
print("LOADED")
def train(self):
with tf.device('/cpu:0'):
# segdl = VocRgbDataLoader('/home/jion/moliq/Documents/VOC2012/JPEGImages/', 4, (256, 256), (256, 256), 'voc_train.txt', split='train')
# segdl_val = VocRgbDataLoader('/home/jion/moliq/Documents/VOC2012/JPEGImages/', 4, (256, 256), (256, 256), 'voc_valid.txt', split='val')
#segdl = LfwRgbDataLoader('/home/jion/moliq/Documents/lfw/', 2, (256, 256), (256, 256),
# 'dataset/lfw_train.txt', split='train')
#segdl_val = LfwRgbDataLoader('/home/jion/moliq/Documents/lfw/', 2, (256, 256), (256, 256),
# 'dataset/lfw_valid.txt', split='val')
segdl = ImageNetRgbDataLoader('/home/jion/moliq/Documents/imagenet/ILSVRC2012_img_val/', 4, (256, 256), (256, 256),
'dataset/imagenet_train.txt', split='train')
segdl_val = ImageNetRgbDataLoader('/home/jion/moliq/Documents/imagenet/ILSVRC2012_img_test/', 4, (256, 256), (256, 256),
'dataset/imagenet_valid.txt', split='val')
iterator = Iterator.from_structure(segdl.data_tr.output_types, segdl.data_tr.output_shapes)
iterator_val = Iterator.from_structure(segdl_val.data_tr.output_types, segdl_val.data_tr.output_shapes)
next_batch = iterator.get_next()
next_batch_val = iterator_val.get_next()
training_init_op = iterator.make_initializer(segdl.data_tr)
training_init_op_val = iterator_val.make_initializer(segdl_val.data_tr)
steps_per_epoch = segdl.data_len / segdl.batch_size
steps_per_epoch_val = segdl_val.data_len / segdl_val.batch_size
self.learning_rate = tf.train.exponential_decay(self.starter_learning_rate, self.global_step_tensor,
steps_per_epoch*15, 0.1, staircase=True)
self.train_op_G, G_final_loss, self.summary_op, self.secret_mse, self.cover_mse, self.secret_ssim, self.cover_ssim = \
self.prepare_training_graph(self.secret_yuv, self.cover_yuv, self.global_step_tensor)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
self.writer = tf.summary.FileWriter(self.log_path, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=30)
# beta1_power = self.sess.graph.get_tensor_by_name('beta1_power:0')
# out = self.sess.run(beta1_power)
# print('beta1_power ', out)
# exclude_vars = ['beta1_power:0', 'beta2_power:0', 'global_step:0']
# exclude_vars = ['']
# restore_variables = [i for i in tf.global_variables() if not i.name in exclude_vars]
saver = tf.train.Saver()
loader = tf.train.latest_checkpoint('logs/0509-0030')
saver.restore(self.sess, loader)
print('loaded pretrained model')
#beta1_power = self.sess.graph.get_tensor_by_name('beta1_power:0')
#out = self.sess.run(beta1_power)
#print('beta1_power ', out)
for epoch in range(1, 1+self.epoches):
print('epoch %d'%epoch)
self.sess.run(training_init_op)
for i in range(steps_per_epoch):
cover_tensor, secret_tensor = self.sess.run(next_batch)
_, G_loss, secret_mse, cover_mse, secret_ssim, cover_ssim, summary, global_step = \
self.sess.run([self.train_op_G, G_final_loss, self.secret_mse, self.cover_mse, self.secret_ssim, self.cover_ssim, self.summary_op, self.global_step_tensor],
feed_dict={self.secret_tensor: secret_tensor, self.cover_tensor: cover_tensor})
self.writer.add_summary(summary, global_step)
# if i % 5 == 0:
# _, D_loss, summary = \
# self.sess.run([self.train_op_D, D_final_loss, self.summary_op],
# feed_dict={self.secret_tensor: secret_tensor,self.cover_tensor: cover_tensor})
# self.writer.add_summary(summary, global_step)
if i % 30 == 0:
print('Epoch [{}/{}] Step [{}/{}] G_Loss {:.4f} encoder_ssim {:.4f} encoder_mse {:.4f}'
' decoder_ssim {:.4f} decoder_mse {:.4f} '.format(
epoch, self.epoches, i, steps_per_epoch, G_loss,
cover_ssim, cover_mse, secret_ssim, secret_mse ))
# run validation
self.sess.run(training_init_op_val)
# D_loss_val_this_epoch = []
G_loss_val_this_epoch = []
secret_ssim_this_epoch = []
cover_ssim_this_epoch = []
for i in range(steps_per_epoch_val):
cover_tensor_val, secret_tensor_val = self.sess.run(next_batch_val)
G_loss, secret_mse, cover_mse, secret_ssim, cover_ssim = \
self.sess.run([G_final_loss, self.secret_mse,self.cover_mse, self.secret_ssim, self.cover_ssim],
feed_dict={self.secret_tensor: secret_tensor_val,
self.cover_tensor: cover_tensor_val})
# D_loss_val_this_epoch.append(D_loss)
G_loss_val_this_epoch.append(G_loss)
secret_ssim_this_epoch.append(secret_ssim)
cover_ssim_this_epoch.append(cover_ssim)
# mean_D_loss_val_this_epoch = sum(D_loss_val_this_epoch) / len(D_loss_val_this_epoch)
mean_G_loss_val_this_epoch = sum(G_loss_val_this_epoch) / len(G_loss_val_this_epoch)
mean_secret_ssim_this_epoch = sum(secret_ssim_this_epoch) / len(secret_ssim_this_epoch)
mean_cover_ssim_this_epoch = sum(cover_ssim_this_epoch) / len(cover_ssim_this_epoch)
# print('global step: %d, validation loss: %.4f'%(global_step, mean_loss_val_this_epoch))
print('VALIDATION Epoch {} global step {} G_Loss {:.4f} encoder_ssim {:.4f} decoder_ssim {:.4f}'.format(
epoch, global_step, mean_G_loss_val_this_epoch,
mean_cover_ssim_this_epoch, mean_secret_ssim_this_epoch))
# self.save_chkp(self.log_path+'%d_%.3f.ckpt'%(epoch, mean_loss_val_this_epoch))
self.save_chkp(self.log_path)
def test_performance(self, log_path):
hiding_output_op, reveal_output_op, G_final_loss, secret_mse, cover_mse, secret_ssim, cover_ssim = \
self.prepare_test_graph(self.secret_yuv, self.cover_yuv)
loader = tf.train.latest_checkpoint(log_path)
# from tensorflow.python.tools import inspect_checkpoint as chkp
# chkp.print_tensors_in_checkpoint_file(loader, tensor_name='', all_tensors=True)
# from inspect_checkpoint import print_tensors_in_checkpoint_file
# print_tensors_in_checkpoint_file(loader, tensor_name='', all_tensors=True)
# variables = [i for i in tf.global_variables() if i.name not in ['global_step:0']]
# saver_variables_dict = {value.name[:-2]:value for value in variables}
# custom_saver = tf.train.Saver(saver_variables_dict)
# custom_saver.restore(self.sess, loader)
# print('load model %s'%loader)
# self.saver = tf.train.Saver(var_list=tf.global_variables())
self.saver = tf.train.Saver()
self.saver.restore(self.sess, loader)
print('load model %s'%loader)
with tf.device('/cpu:0'):
# segdl_val = VocRgbDataLoader('/home/jion/moliq/Documents/VOC2012/JPEGImages/', 16, (256, 256), (256, 256), 'voc_valid.txt', split='val')
segdl_val = LfwRgbDataLoader('/home/jion/moliq/Documents/lfw/', 16, (256, 256), (256, 256),
'dataset/lfw_valid.txt', split='val')
iterator_val = Iterator.from_structure(segdl_val.data_tr.output_types, segdl_val.data_tr.output_shapes)
next_batch_val = iterator_val.get_next()
training_init_op_val = iterator_val.make_initializer(segdl_val.data_tr)
steps_per_epoch_val = segdl_val.data_len / segdl_val.batch_size
loss_val_this_epoch = []
secret_mse_val_this_epoch = []
cover_mse_val_this_epoch = []
secret_ssim_this_epoch = []
cover_ssim_this_epoch = []
self.sess.run(training_init_op_val)
# self.saver.restore(self.sess, loader)
# print('load model %s'%loader)
for i in range(steps_per_epoch_val):
cover_tensor_val, secret_tensor_val = self.sess.run(next_batch_val)
stego, secret_reveal, loss_value, secret_mse_value, cover_mse_value, secret_ssim_value, cover_ssim_value = \
self.sess.run([hiding_output_op, reveal_output_op, G_final_loss, secret_mse, cover_mse, secret_ssim, cover_ssim],
feed_dict={self.secret_tensor: secret_tensor_val,
self.cover_tensor: cover_tensor_val})
cover_names = segdl_val.imgs_files[i*segdl_val.batch_size:(i+1)*segdl_val.batch_size]
secret_names = segdl_val.labels_files[i*segdl_val.batch_size:(i+1)*segdl_val.batch_size]
loss_val_this_epoch.append(loss_value)
secret_mse_val_this_epoch.append(secret_mse_value)
cover_mse_val_this_epoch.append(cover_mse_value)
secret_ssim_this_epoch.append(secret_ssim_value)
cover_ssim_this_epoch.append(cover_ssim_value)
if i%10 == 0:
print('%d %.3f %.3f %.3f %.3f %.3f'%(i, loss_value, secret_mse_value, cover_mse_value, secret_ssim_value, cover_ssim_value))
save_test_images(cover_names, secret_names, cover_tensor_val, secret_tensor_val, stego, secret_reveal, log_path)
# np.save('%d %.3f %.3f %.3f %.3f %.3f_cover.npy'%(i, loss_value, secret_mse_value, cover_mse_value, secret_ssim_value, cover_ssim_value), cover_tensor_val)
# np.save('%d %.3f %.3f %.3f %.3f %.3f_secret.npy'%(i, loss_value, secret_mse_value, cover_mse_value, secret_ssim_value, cover_ssim_value), secret_tensor_val)
# np.save('%d %.3f %.3f %.3f %.3f %.3f_stego.npy'%(i, loss_value, secret_mse_value, cover_mse_value, secret_ssim_value, cover_ssim_value), stego)
# np.save('%d %.3f %.3f %.3f %.3f %.3f_secret_reveal.npy'%(i, loss_value, secret_mse_value, cover_mse_value, secret_ssim_value, cover_ssim_value), secret_reveal)
# mean_loss_val_this_epoch = sum(loss_val_this_epoch) / len(loss_val_this_epoch)
# mean_secret_mse_val_this_epoch = sum(secret_mse_val_this_epoch) / len(secret_mse_val_this_epoch)
# mean_cover_mse_val_this_epoch = sum(cover_mse_val_this_epoch) / len(cover_mse_val_this_epoch)
# mean_secret_ssim_this_epoch = sum(secret_ssim_this_epoch) / len(secret_ssim_this_epoch)
# mean_cover_ssim_this_epoch = sum(cover_ssim_this_epoch) / len(cover_ssim_this_epoch)
mean_loss_val_this_epoch = np.mean(loss_val_this_epoch)
mean_secret_mse_val_this_epoch = np.mean(secret_mse_val_this_epoch)
mean_cover_mse_val_this_epoch = np.mean(cover_mse_val_this_epoch)
mean_secret_ssim_this_epoch = np.mean(secret_ssim_this_epoch)
mean_cover_ssim_this_epoch = np.mean(cover_ssim_this_epoch)
print('validation loss: %.4f' % mean_loss_val_this_epoch)
print('secret mse: %.4f' % mean_secret_mse_val_this_epoch)
print('cover mse : %.4f' % mean_cover_mse_val_this_epoch)
print('secret ssim: %.4f' % mean_secret_ssim_this_epoch)
print('cover ssim: %.4f' % mean_cover_ssim_this_epoch)
if __name__ == '__main__':
train_model = Model()
train_model.train()
# train_model.test_performance(train_model.log_path)
# train_model.test_performance('logs/0427-1506')
# train_model.test_performance('logs/0428-2048')
# train_model.test_performance('logs/0505-1617')
|
UTF-8
|
Python
| false
| false
| 21,883
|
py
| 14
|
SingleSizeModel.py
| 6
| 0.624412
| 0.607458
| 0
| 370
| 58.091892
| 303
|
joeljwilliams/csc312-project-tvmdb
| 18,554,258,724,002
|
20e72ede1a2e21d7954d491f8a6ec1e86180fd2f
|
ac85cf7e8e4047359205ddd12c343fc77a192f4b
|
/mediafollower/models.py
|
445976abd49b2d8b2e0a38931e0b941a3be47e68
|
[] |
no_license
|
https://github.com/joeljwilliams/csc312-project-tvmdb
|
2eaf6b708985c3c04ca90944c9b3e207c7e5da53
|
239347fbf0d5420927927847046331fa00ddd515
|
refs/heads/master
| 2022-09-16T21:16:21.675728
| 2013-05-22T14:24:25
| 2013-05-22T14:24:25
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from django.contrib.auth.models import User
class Genre(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Media(models.Model):
title = models.CharField(max_length=45)
description = models.CharField(max_length=255)
premiere = models.DateField()
rating = models.IntegerField()
votes = models.IntegerField()
external_id = models.CharField(max_length=10)
genres = models.ManyToManyField(Genre)
def __unicode__(self):
return self.title
class UserProfile(models.Model):
user = models.OneToOneField(User)
gender = models.CharField(max_length=1)
birth_date = models.DateField()
media = models.ManyToManyField(Media)
#class User(models.Model):
# username = models.CharField(max_length=45)
# email = models.EmailField()
# gender = models.CharField(max_length=1)
# password = models.CharField(max_length=32)
# birth_date = models.DateField()
# admin = models.BooleanField()
# media = models.ManyToManyField(Media)
class Episode(models.Model):
title = models.CharField(max_length=45)
airdate = models.DateTimeField()
description = models.CharField(max_length=255)
season = models.IntegerField()
number = models.IntegerField()
series = models.ForeignKey(Media)
def __unicode__(self):
return self.title
class Media_Request(models.Model):
title = models.CharField(max_length=45)
external_id = models.CharField(max_length=10)
user = models.ForeignKey(User)
def __unicode__(self):
return self.title
|
UTF-8
|
Python
| false
| false
| 1,629
|
py
| 14
|
models.py
| 7
| 0.690608
| 0.675875
| 0
| 59
| 26.610169
| 50
|
zconn/PythonCert220Assign
| 3,573,412,793,030
|
aa0f793de060ef704b025448edd562bdd1eab1c7
|
0c72282d601ccf840dd4e41b675c0675de7bc916
|
/students/DanCornutt/lessons/lesson03/alt_assignment/tutorial/tutorial_sqlite3.py
|
875eb1e1ec162d98942ae5ef6bb5b040507e808c
|
[] |
no_license
|
https://github.com/zconn/PythonCert220Assign
|
c7fedd9ffae4f9e74e5e4dfc59bc6c511c7900ab
|
99271cd60485bd2e54f8d133c9057a2ccd6c91c2
|
refs/heads/master
| 2020-04-15T14:42:08.765699
| 2019-03-14T09:13:36
| 2019-03-14T09:13:36
| 164,763,504
| 2
| 0
| null | true
| 2019-01-09T01:34:40
| 2019-01-09T01:34:40
| 2019-01-08T20:21:44
| 2019-01-08T20:21:43
| 788
| 0
| 0
| 0
| null | false
| null |
import sqlite3
from sqlite3 import Error
def sql_connection():
try:
con = sqlite3.connect('mydatabase.db')
print("Connection is established: Database is created in memory")
except Error:
print(Error)
def sql_table(con):
"""Creates db table and db"""
cursorObj = con.cursor()
cursorObj.execute("CREATE TABLE customer(customer_id integer PRIMARY KEY, first_name text, last_name text, home_address text, phone_number text, email_address text, status boolean, credit_limit float)")
con.commit()
def search_customer(con, customer_id):
cursorObj = con.cursor()
searched = cursorObj.execute("SELECT * FROM customer")
return searched
if __name__ == __main__():
con = sql_connection()
sql_table(con)
|
UTF-8
|
Python
| false
| false
| 770
|
py
| 375
|
tutorial_sqlite3.py
| 326
| 0.675325
| 0.671429
| 0
| 31
| 23.83871
| 206
|
RevansChen/online-judge
| 6,459,630,823,750
|
36dfead4ae087add5584ea0b4fc0967ddb78fda5
|
abad82a1f487c5ff2fb6a84059a665aa178275cb
|
/Codewars/7kyu/the-office-ii-boredom-score/Python/test.py
|
09a96be24d3ece9180479365ab78167ed3d21c1d
|
[
"MIT"
] |
permissive
|
https://github.com/RevansChen/online-judge
|
8ae55f136739a54f9c9640a967ec931425379507
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
refs/heads/master
| 2021-01-19T23:02:58.273081
| 2019-07-05T09:42:40
| 2019-07-05T09:42:40
| 88,911,035
| 9
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# Python - 3.6.0
Test.describe('Basic tests')
Test.assert_equals(boredom({
'tim': 'change', 'jim': 'accounts',
'randy': 'canteen', 'sandy': 'change', 'andy': 'change', 'katie': 'IS',
'laura': 'change', 'saajid': 'IS', 'alex': 'trading', 'john': 'accounts',
'mr': 'finance'
}), 'kill me now')
Test.assert_equals(boredom({
'tim': 'IS', 'jim': 'finance',
'randy': 'pissing about', 'sandy': 'cleaning', 'andy': 'cleaning',
'katie': 'cleaning', 'laura': 'pissing about', 'saajid': 'regulation',
'alex': 'regulation', 'john': 'accounts', 'mr': 'canteen'
}), 'i can handle this')
Test.assert_equals(boredom({
'tim': 'accounts', 'jim': 'accounts',
'randy': 'pissing about', 'sandy': 'finance', 'andy': 'change',
'katie': 'IS', 'laura': 'IS', 'saajid': 'canteen', 'alex': 'pissing about',
'john': 'retail', 'mr': 'pissing about'
}), 'party time!!')
|
UTF-8
|
Python
| false
| false
| 889
|
py
| 2,569
|
test.py
| 1,607
| 0.572553
| 0.569179
| 0
| 21
| 41.333333
| 79
|
karthikpappu/pyc_source
| 10,015,863,776,042
|
c16ff6024908f8357bc1834fb7db8b6df73e9c37
|
91fa095f423a3bf47eba7178a355aab3ca22cf7f
|
/pypi_install_script/spamme-0.0.1.tar/setup.py
|
2464928f9ce1fe902b972a899f625ad011eeabe5
|
[] |
no_license
|
https://github.com/karthikpappu/pyc_source
|
0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f
|
739e7e73180f2c3da5fd25bd1304a3fecfff8d6e
|
refs/heads/master
| 2023-02-04T11:27:19.098827
| 2020-12-27T04:51:17
| 2020-12-27T04:51:17
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from setuptools import setup
setup(name = 'spamme',
version = '0.0.1',
description = 'The trash clean utilities',
url = 'http://fake.usr.com',
author = 'Z.G.',
author_email = 'zg@fake.usr.cim',
license ='MIT',
packages = ['spamme'],
zip_safe = False)
|
UTF-8
|
Python
| false
| false
| 284
|
py
| 114,545
|
setup.py
| 111,506
| 0.588028
| 0.577465
| 0
| 10
| 27.3
| 46
|
alipay/alipay-sdk-python-all
| 11,716,670,794,198
|
6b29a6a632181398beb0975f108629114b5a55be
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayInsCooperationProductQrcodeApplyResponse.py
|
990bce50e10a201d145d644966ef44c95a441668
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| false
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
| 2023-04-20T12:00:51
| 2023-04-16T10:42:27
| 12,485
| 221
| 54
| 27
|
Python
| false
| false
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayInsCooperationProductQrcodeApplyResponse(AlipayResponse):
def __init__(self):
super(AlipayInsCooperationProductQrcodeApplyResponse, self).__init__()
self._code_url = None
@property
def code_url(self):
return self._code_url
@code_url.setter
def code_url(self, value):
self._code_url = value
def parse_response_content(self, response_content):
response = super(AlipayInsCooperationProductQrcodeApplyResponse, self).parse_response_content(response_content)
if 'code_url' in response:
self.code_url = response['code_url']
|
UTF-8
|
Python
| false
| false
| 745
|
py
| 9,331
|
AlipayInsCooperationProductQrcodeApplyResponse.py
| 9,330
| 0.691275
| 0.689933
| 0
| 25
| 28.8
| 119
|
Hayato-t/pythonNeuronSimulation
| 9,096,740,783,705
|
86d60d77fd8024b2c1bb76fa4c528f4ae43f363a
|
6d9ce5b5a0afd585c6e714ed52509b75e2202661
|
/util/graphPlotter.py
|
52f939c75a0d6ba33298a47c33673f82ea9a5a78
|
[] |
no_license
|
https://github.com/Hayato-t/pythonNeuronSimulation
|
0854e44a5984d8d77fd1e405b08bc276cdfd2569
|
372d542b8f2a88f1b6d5b199698b3e1fa0ec0653
|
refs/heads/master
| 2021-05-02T07:13:45.994235
| 2019-02-12T02:34:11
| 2019-02-12T02:34:11
| 120,871,690
| 0
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("/home/hayato/lib/python")
sys.path.append("./modules")
import argparse
import pickle
if not len(sys.argv) == 2:
print("this program requires 1 argument (filepath)")
exit()
with open(sys.argv[1], mode='rb') as f:
data = pickle.load(f)
print(data)
r_v_list = data['results']['r_v_list']
t = data['results']['t']
for v in r_v_list:
plt.plot(t, v)
plt.show()
|
UTF-8
|
Python
| false
| false
| 472
|
py
| 57
|
graphPlotter.py
| 29
| 0.641949
| 0.635593
| 0
| 20
| 21.6
| 56
|
AgencyPMG/awslogin
| 7,705,171,376,163
|
4b0742120748164f2d6eb9b1c3f1e9113a41c4f2
|
578c147c09a30fb34ba1a187ef352ba9d084f6eb
|
/awslogin.py
|
85b97c91562bd843dc3eb321f70ac426010e2641
|
[] |
no_license
|
https://github.com/AgencyPMG/awslogin
|
90538a3df60bc4ba0d32bef8737c5fdb367c3616
|
b65de13281133e8265925b8beb20096f23c058bf
|
refs/heads/master
| 2021-01-25T08:13:45.561783
| 2017-06-08T13:47:58
| 2017-06-08T13:47:58
| 93,754,673
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
A script that can be used to login to AWS via federated login.
This is useful when you have a single account with actual users/passwords and
allow role switching to other accounts.
Your credentials setup should be something like this:
# ~/.aws/credentials
[default]
region = us-east-1
aws_access_key_id = {yourAccessKeyId}
aws_secret_access_key = {yourSecretAccessKey}
[childaccount]
# X = child account ID
role_arn = arn:aws:iam::X:role/RoleName
source_profile = default
# Y = default account id
mfa_serial = arn:aws:iam::Y:mfa/UserName
region = us-east-1
# optional, but useful to display what the account is
# I like to use `UserName`.
role_session_name = UserName
"""
from __future__ import print_function
import argparse
import json
import webbrowser
try:
from urllib.parse import quote_plus
from urllib.request import urlopen
except ImportError:
from urllib import quote_plus, urlopen
import boto3 as aws
def _parse_args(args=None):
p = argparse.ArgumentParser(description='Log into AWS')
p.add_argument('profile', help='The AWS profile for which a login should be generated')
return p.parse_args(args)
def login(session, fetch=urlopen):
"""
Perform the login dance and return a URL that can be opened in the browser.
"""
creds = session.get_credentials() # will prompt for MFA
fed = {
'sessionId': creds.access_key,
'sessionKey': creds.secret_key,
'sessionToken': creds.token,
}
url = 'https://signin.aws.amazon.com/federation?Action=getSigninToken&Session={sess}'.format(
sess=quote_plus(json.dumps(fed))
)
resp = fetch(url)
body = json.loads(resp.read())
return 'https://signin.aws.amazon.com/federation?Action=login&Issuer=&Destination={dest}&SigninToken={token}'.format(
dest=quote_plus('https://console.aws.amazon.com/'),
token=quote_plus(body['SigninToken']),
)
def main(args=None):
args = _parse_args(args)
session = aws.Session(profile_name=args.profile)
url = login(session)
webbrowser.open_new_tab(url)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false
| false
| 2,196
|
py
| 3
|
awslogin.py
| 2
| 0.67714
| 0.675774
| 0
| 78
| 27.153846
| 121
|
alexbennett/aioguardian
| 10,239,202,052,936
|
86307c895299cb2478ef30198b332578a17969f3
|
4a53ac8b71481a6dfea359fb7c4b4139c0c5a9b4
|
/examples/test_sensor.py
|
36e739a438c5af3f5f88b703750477d4ae9a5942
|
[
"MIT"
] |
permissive
|
https://github.com/alexbennett/aioguardian
|
a8b8e80ec12b877c33dbc302156f62b7420e7317
|
b33e277f7972fc43c2e50f192d391a6fab94ff8c
|
refs/heads/master
| 2022-12-14T23:33:37.183869
| 2020-09-23T23:02:40
| 2020-09-23T23:02:40
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Run an example script to quickly test the guardian."""
import asyncio
import logging
from aioguardian import Client
from aioguardian.errors import GuardianError
_LOGGER = logging.getLogger(__name__)
async def main() -> None:
"""Create the aiohttp session and run the example."""
logging.basicConfig(level=logging.INFO)
async with Client("172.16.11.208") as guardian:
try:
pair_dump_response = await guardian.sensor.pair_dump()
_LOGGER.info("pair_dump_response command response: %s", pair_dump_response)
for uid in pair_dump_response["data"]["paired_uids"]:
paired_sensor_status_resp = await guardian.sensor.paired_sensor_status(
uid
)
_LOGGER.info(
"paired_sensor_status command response (UID: %s): %s",
uid,
paired_sensor_status_resp,
)
# pair_sensor_response = await guardian.sensor.pair_sensor("<UID>")
# _LOGGER.info("pair_response command response: %s", pair_sensor_response)
# unpair_sensor_response = await guardian.sensor.unpair_sensor("<UID>")
# _LOGGER.info("unpair_response command response: %s", unpair_sensor_response)
except GuardianError as err:
_LOGGER.info(err)
asyncio.run(main())
|
UTF-8
|
Python
| false
| false
| 1,380
|
py
| 81
|
test_sensor.py
| 47
| 0.603623
| 0.596377
| 0
| 39
| 34.384615
| 90
|
jh247247/euler
| 14,774,687,507,995
|
7117fd24a7227d0cfbd4e9dcf74ec4882f43ceef
|
feae26a0f91a735326104af4031257eee23115c7
|
/p21/p21.py
|
c0e60acf68b7d63f1acce857c92d0849214f3269
|
[] |
no_license
|
https://github.com/jh247247/euler
|
a9679e678e378e663551227ec98c6f03ad551dfb
|
8a1f25b3b7cff337a8f41c9de14f56e2a25298c9
|
refs/heads/master
| 2021-01-01T05:41:24.806989
| 2015-08-18T02:39:51
| 2015-08-18T02:39:51
| 40,878,313
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3.5
# gets the sum of the factors of a number
def sumOfFactors(num):
if num is 0:
return 0
return sum([x for x in range(1,int(num/2)+1) if num%x is 0])
def isAmicable(num1, num2):
return (sumOfFactors(num1) == num2 and
sumOfFactors(num2) is num1)
amicable = []
for i in range(10000):
j = sumOfFactors(i)
if not j in amicable \
and i != j \
and i == sumOfFactors(j):
amicable.append(i)
amicable.append(j)
print(amicable)
print(sum(amicable))
|
UTF-8
|
Python
| false
| false
| 541
|
py
| 20
|
p21.py
| 19
| 0.598891
| 0.563771
| 0
| 23
| 22.521739
| 64
|
iamEi/Dropper
| 7,584,912,252,818
|
c873232d96078f108ba29d489ce08aad59d3f1ae
|
b6e913f50ae50e7519d4a99e407fb73b42e0b7a8
|
/main.pyw
|
0e8476141fbc32d82a3ca8a9524272132855221b
|
[] |
no_license
|
https://github.com/iamEi/Dropper
|
e38c87cd674b18fcb78c9b7c614c40e92d23b88e
|
27f322c908ab5aab25ae9e12dd1340d83e30594c
|
refs/heads/master
| 2023-08-29T23:05:09.687009
| 2021-09-28T03:29:46
| 2021-09-28T03:29:46
| 409,289,547
| 0
| 0
| null | false
| 2021-09-28T03:14:56
| 2021-09-22T17:06:09
| 2021-09-24T08:39:47
| 2021-09-28T03:14:56
| 6,130
| 0
| 0
| 0
|
Python
| false
| false
|
import pygame, sys
from player import Player
from platforms import Platforms
from spikes import Spikes
from text import Text
#initializing pygame
pygame.init()
#set up
WIDTH = 500
HEIGHT = 700
title = 'Dropper'
screen = pygame.display.set_mode((WIDTH,HEIGHT))
pygame.display.set_caption(title)
clock = pygame.time.Clock()
#LOADING ASSETS
#for background
bg = pygame.image.load('images/bg.jpg')
bg = pygame.transform.scale(bg,(500,700))
bg_rect = bg.get_rect(topleft = (0,0))
#for initial platform for player to stand at the start
initplat = pygame.image.load('images/platform_2.png')
initplat = pygame.transform.scale(initplat, (200, 35))
initplat_rect = initplat.get_rect(midbottom = (WIDTH/2,300))
#game over and background music
over = pygame.mixer.Sound('sounds/game_over.mp3')
bgm = pygame.mixer.Sound('sounds/Melody.mp3')
bgm.play(loops = -1,fade_ms=2000)
#initial values
start_time = 0
score = 0
intro = True
running = False
speed = 2
#setting frequency of platform spawn
spawn_timer = pygame.USEREVENT + 1
pygame.time.set_timer(spawn_timer,800)
speedup_timer = pygame.USEREVENT + 2
pygame.time.set_timer(speedup_timer,2000)
#creating text display object
text = Text(screen,bg,bg_rect)
#creating sprites
player = pygame.sprite.GroupSingle()
player.add(Player())
Player = player.sprite
platforms = pygame.sprite.Group()
spike_group = pygame.sprite.Group()
for i in [-10,120,250,380]:
spike_group.add(Spikes(i,-15))
def speedup():
global speed
speed += 0.1
for i in platforms.sprites():
i.speed = speed
def reset_score():
global score, start_time
score = 0
start_time = pygame.time.get_ticks()//750
#checking if player goes out of bounds
def game_over():
if HEIGHT < Player.rect.top or pygame.sprite.spritecollide(Player,spike_group,False):
text.save_highscore(score,text.get_hs())
return False
return True
#checking if player is colliding with platform, if yes, dont let the player move further
def path_blocked(platform):
if Player.rect.colliderect(platform) and not on_platform(platform):
if abs(platform.left - Player.rect.right) < 10:
return True
if abs(platform.right - Player.rect.left) < 10:
return True
return False
#checking if player is standing on platform, if yes, do not apply gravity
def on_platform(platform):
if Player.rect.colliderect(platform):
if abs(platform.top - Player.rect.bottom) < 20 and ((platform.left < Player.rect.right) and (Player.rect.left < platform.right)):
return True
return False
#controlling the flow of the game states
def gamestate():
global running, intro,score, speed
if intro:
text.display_intro()
#pressing SPACE would make intro = FALSE, running the game.
if not intro:
reset_score()
running = True
elif running:
over.stop()
for platform in platforms.sprites():
Platform = platform.rect
if path_blocked(Platform):
Player.block_movement()
if on_platform(Platform):
Player.rect.bottom = Platform.top
#draw background
screen.blit(bg,bg_rect)
#initial platform
initplat_rect.y -= 1
screen.blit(initplat,initplat_rect)
if Player.rect.colliderect(initplat_rect):
Player.rect.bottom = initplat_rect.top
#draw platforms
platforms.draw(screen)
player.draw(screen)
#spikes at the top
spike_group.draw(screen)
score = text.display_score(score,start_time)
if score > text.get_hs():
text.new_highscore()
player.update()
platforms.update()
#check if game is over
running = game_over()
#if gameover
elif not running:
#music
bgm.fadeout(1000)
over.play(loops = -1, fade_ms=1900)
#reset numbers
reset_score()
text.display_highscore()
speed = 2
#reset player position
Player.reset()
initplat_rect.center = (250,250)
#clear all platforms
platforms.empty()
#gameloop
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
if running:
if event.type == spawn_timer:
platforms.add(Platforms(speed))
if int(speed) == 3:
pygame.time.set_timer(spawn_timer,600)
if int(speed) == 4:
pygame.time.set_timer(spawn_timer,500)
if event.type == speedup_timer:
if speed < 5:
speedup()
else:
pygame.time.set_timer(speedup_timer,0)
if not running:
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
# bgm.play(loops = -1)
pygame.time.set_timer(spawn_timer,700)
intro = False
running = True
gamestate()
pygame.display.flip()
clock.tick(60)
|
UTF-8
|
Python
| false
| false
| 4,470
|
pyw
| 6
|
main.pyw
| 5
| 0.711633
| 0.689038
| 0
| 191
| 22.408377
| 131
|
williamsdoug/GitAnalysis
| 12,369,505,855,416
|
54a3592311ac9336fc191edd5c1b337186e621c2
|
640411253fcf4dfc71b70ec923b0864ccd58b837
|
/dev/NewDiff.py
|
1ed82534ee81d0300b8ff84a1e6d19719d68ad15
|
[] |
no_license
|
https://github.com/williamsdoug/GitAnalysis
|
cb6dce95e7a92b0d1d2cf2db3c94aec6ef3be3bf
|
da91b541d2531a41cc1f4e02537b7803b84b20d3
|
refs/heads/master
| 2016-09-06T15:50:50.345898
| 2015-07-02T13:28:51
| 2015-07-02T13:28:51
| 23,364,094
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#
# NewDiff.py - Language-specific change detection for Python
#
# Author: Doug Williams - Copyright 2015
#
# Last updated 5/13/2015
#
# History:
# - 5/5/15 - Initial version of file
# - 5/8/15 - Continued active development
# - 5/10/15 - Various bug fixes while testing against nova, glance,
# swift, heat, cinder
# - 5/11/15 - Add support for generation of blame mask.
# - 5/12/15 - Fix handling of TryExcept
# - 5/13/15 - Integration with end-to-end blame processing
# - 5/14/15 - Bug fix for TryExcept, other error handling
#
#
# Top Level Routines:
#
# from NewDiff import process_commit_diff
#
# Yet Another attempt at python diff
#
# Approach:
# - use a technique like hashes to primarily resolve pairing
# - can use diff for the same
# - Establish statement level affinity based on:
# - lines in common
# - proximity to common lines
#
# Steps:
# 1. Parse Each tree [done]
# - Also associate line ranges
# 2. Identify ranges for each entry [done]
# - If all entries has pair, then discard
# - intersection of tokens == union of tokens
# - If differences, then refine further
# 3. Annotate pairings [partial]
# - depth-first, associate tokens with sub-trees [partial]
# - match trees where sub-trees matched (may currently miss
# if insufficient data in header) [to do]
# - verify uniqueness
# 4. check for spurrious mis-matches using compare_ast [to do]
# - spurious mis-matches may be use to comments
#
# Outputs:
# - Sparse sub-tree of differences
# - for use in complexity calculation for feature extraction
# - List of line numbers
# - for use in blame calculation)
#
#
# ###Proposed Changes to existing code:
#
# 1) associate unique ID with each subtree, and create
# table mapping ID to subtree [DONE]
#
# 2) annotate trees [DONE]
# - Include ID in each subtree
# - Parent ID in each child
#
# 3) for each partial match, use ast_compare to verify if false mismatch
# - if so, promote tokens to parent and remove matching subtrees from
# subtrees list
# - also remember to update token map for each token moved to parent
#
# 4) For unmatched nodes with matching subtrees, use parent of matched nodes
#
# 5). We man need to drill-down below function first level statements.
# If so, do so only after matching functions, and do so recursively
# in pairs.
#
import ast
# from pprint import pprint
import collections
import re
import itertools
import git
# sys.path.append('./dev')
# from git_analysis_config import get_repo_name
class ParserError(Exception):
"""Generic error for parser-related issues."""
pass
def debug_ast_strip_lineno(line):
"""Strips lineno and col_offset from ast.dump output"""
line = re.sub(r",?\s+lineno=-?\d+", "", line)
line = re.sub(r",?\s+col_offset=-?\d+", "", line)
return line
def debugSanityCheckDump(node):
"""Validates that dump is behaving correctly,
should be removed after debug"""
if (ast.dump(node, include_attributes=False)
!= debug_ast_strip_lineno(ast.dump(node,
include_attributes=True))):
string1 = ast.dump(node, include_attributes=False)
string2 = debug_ast_strip_lineno(ast.dump(node,
include_attributes=True))
print
print 'compare_ast mismatch'
print string1
print
print string2
print len(string1) == len(string2)
differences = [i for i in range(min(len(string1), len(string2)))
if string1[i] != string2[i]]
print differences
print [string1[i] for i in differences]
print
print [string2[i] for i in differences]
assert False
return True
def compare_ast(node1, node2):
"""My version of compare_ast based on ast.dump"""
if type(node1) is not type(node2):
return False
if isinstance(node1, ast.AST):
debugSanityCheckDump(node1)
debugSanityCheckDump(node2)
return (ast.dump(node1, include_attributes=False)
== ast.dump(node2, include_attributes=False))
elif isinstance(node1, list):
return all(itertools.starmap(compare_ast,
itertools.izip(node1, node2)))
else:
return node1 == node2
# from: http://stackoverflow.com/questions/3312989/
# elegant-way-to-test-python-asts-for-equality-not-reference-or-object-identity
def old_compare_ast(node1, node2, debug=True):
if type(node1) is not type(node2):
return False
if isinstance(node1, ast.AST):
for k, v in vars(node1).iteritems():
if k in ('lineno', 'col_offset', 'ctx'):
continue
if not compare_ast(v, getattr(node2, k)):
return False
return True
elif isinstance(node1, list):
return all(itertools.starmap(compare_ast,
itertools.izip(node1, node2)))
else:
return node1 == node2
def reduceRanges(ranges, verbose=False):
"""Combined overlapping of adjacent ranges"""
if len(ranges) < 2:
return ranges
if verbose:
print 'Before:', ranges
ranges = sorted(ranges, key=lambda x: x[0])
result = [ranges[0]]
for r in ranges[1:]:
if r[0] <= result[-1][1] + 1:
result[-1][1] = r[1]
else:
result.append(r)
if verbose:
print 'After:', result
return result
def tokenGetLineno(tok, side='A'):
"""Extracts line number of match token"""
vals = tok.split('_')
if len(vals) <= 1:
return -1
if side == 'A':
return int(vals[0][1:]) # strip off A before lineno
else:
return int(vals[1][1:]) # strip off B before lineno
def generateRangesForBlame(tree, idxTree, side='A', depth=0, verbose=False):
"""Returns list of line ranges for use with get_blame"""
ranges = []
if tree['mismatch'] == 0:
return ranges
if 'header_mismatch' in tree and tree['header_mismatch'] > 0:
headerRange = [tree['start'], tree['end']]
if 'subtreesIdx' in tree and len(tree['subtreesIdx']) > 0:
start = min([idxTree[i]['start'] for i in tree['subtreesIdx']])
headerRange[1] = max(tree['start'], start - 1)
if verbose:
print 'setting header range to:', headerRange,
print depth, tree['start'], tree['end']
ranges.append(headerRange)
if 'subtreesIdx' in tree:
if verbose:
print 'processing subtrees', depth
for i in tree['subtreesIdx']:
if idxTree[i]['mismatch'] > 0:
if verbose:
print 'processing subtree entry'
ranges = ranges + generateRangesForBlame(idxTree[i], idxTree,
depth=depth+1)
else:
if verbose:
print 'subtree match', idxTree[i]['end'],
print idxTree[i]['end']
else:
ranges.append([tree['start'], tree['end']])
# remove any other matches from range
for tok in tree['tokens']:
val = tokenGetLineno(tok, side=side)
if val in ranges:
print 'deleting:', val
assert False
ranges.remove(val)
return ranges
def treeViewer(tree, idxTree, depth=0, indent=4, trim=False,
idxOther=False):
"""Displays tree and it's sub-trees, optionally prune matching sub-trees"""
if trim and tree['mismatch'] == 0:
if idxOther: # Check if pair has mis-match
if ('pair' in tree and tree['pair']
and idxOther[tree['pair']]['mismatch'] == 0):
return
else:
return
print ' '*depth*indent, type(tree['ast']).__name__, 'ID:', tree['idxSelf'],
print '[', tree['start'], ',', tree['end'], ']',
print 'Mismatch:', tree['mismatch'], 'Tokens:', len(tree['tokens']),
print 'Insert:', tree['insert'],
if 'pair' in tree:
print 'Pair:', tree['pair']
else:
print
if False: # tree['tokens']:
print
print 'Tokens:', tree['tokens']
print
if 'header_mismatch' in tree and tree['header_mismatch'] > 0:
print ' '*(depth+1)*indent, 'Header - Mismatch:',
print tree['header_mismatch'], 'Tokens:', len(tree['header_tokens'])
if 'subtreesIdx' in tree:
# for t in tree['subtrees']:
for i in tree['subtreesIdx']:
treeViewer(idxTree[i], idxTree, depth=depth+1,
trim=trim, idxOther=idxOther)
def get_st_from_blob(blob, verbose=False):
"""Extracts Syntax Tree (AST) from git blob"""
try:
st = ast.parse(blob.data_stream.read(), filename=blob.path)
except SyntaxError:
print
print 'Syntax Error while processing: ', blob.path
print
raise ParserError
return st
def get_lines_from_blob(blob):
"""Extracts line count from blob"""
data = blob.data_stream.read()
return len(data.splitlines())
minus_re = re.compile('\-(\d+)')
plus_re = re.compile('\+(\d+)')
def matchMakeTokens(match, sideB=False):
"""Convert entries to standard tokens for comparison"""
match[0] = 'space'
for i, val in enumerate(match):
if isinstance(val, int):
if sideB:
match[i] = 'A' + str(val) + '_B' + str(i)
else:
match[i] = 'A' + str(i) + '_B' + str(val)
return match
def MatchFlagInsertsHelper(thisMatch, otherMatch, tokenToOtherLine):
"""Flag tokens prior to insert for a single side"""
for i in range(2, len(thisMatch)):
# detect line before insert
if (not thisMatch[i] and thisMatch[i-1]
and not thisMatch[i-1].endswith('_insert')):
otherIdx = tokenToOtherLine[thisMatch[i-1]]
# print i, len(thisMatch), otherIdx, len(otherMatch)
if (otherIdx < len(otherMatch) - 1
and otherMatch[otherIdx+1]):
newToken = thisMatch[i-1] + '_insert'
thisMatch[i-1] = newToken
otherMatch[otherIdx] = newToken
return thisMatch, otherMatch
def matchFlagInserts(matchA, matchB):
"""Flag tokens prior to insert"""
tokenToLineA = {matchA[i]: i for i in range(1, len(matchA)) if matchA[i]}
tokenToLineB = {matchB[i]: i for i in range(1, len(matchB)) if matchB[i]}
matchA, matchB = MatchFlagInsertsHelper(matchA, matchB, tokenToLineB)
matchB, matchA = MatchFlagInsertsHelper(matchB, matchA, tokenToLineA)
return matchA, matchB
def matchFlagBlankTokens(match, data):
# tag blank lines
for i, line in enumerate(data):
if len(line.strip()) == 0 or line.strip()[0] == '#':
if match[i+1] and match[i+1].endswith('_insert'):
match[i+1] = 'blank_insert'
else:
match[i+1] = 'blank'
return match
def makeAllTokens(matchA, dataA, matchB, dataB):
"""Generate match tokens, identifying blank lines and inserts"""
matchA = matchMakeTokens(matchA, sideB=False)
matchB = matchMakeTokens(matchB, sideB=True)
matchA, matchB = matchFlagInserts(matchA, matchB)
matchA = matchFlagBlankTokens(matchA, dataA)
matchB = matchFlagBlankTokens(matchB, dataB)
return matchA, matchB
def getBlobData(blob):
"""Reads data from blob and splits lines"""
return blob.data_stream.read().splitlines()
def parse_diff_txt(txt, a_blob, b_blob, verbose=False, debug=False):
"""Parses git diff, returning line numbers containing changes.
Per-line values in matchA and matchB:
None => Mismatch
-1 => blank
int => matching lineno
"""
dataA = getBlobData(a_blob)
sizeA = len(dataA)
dataB = getBlobData(b_blob)
sizeB = len(dataB)
if debug:
print len(dataA), len(dataB)
# 1 based indexing, ignore element 0
matchA = [None] * (sizeA + 1)
matchB = [None] * (sizeB + 1)
lineA = -1
lineB = -1
curA = 1
curB = 1
changesA = []
changesB = []
lines = txt.split('\n')
for line in lines:
if debug:
print '*', line
if line.startswith('@@'): # Start of diff hunk
range_info = line.split('@@')[1]
match = re.search(minus_re, range_info) # -start, len
if match:
txt = match.group()
lineA = int(txt[1:])
if lineA == 0: # special-case for first line inserts
curA = 0
match = re.search(plus_re, range_info) # +start, len
if match:
txt = match.group()
lineB = int(txt[1:])
if lineB == 0: # special-case for first line inserts
curB = 0
if debug:
print curA, lineA, curB, lineB
assert (lineA - lineB) == (curA - curB)
while curA < lineA:
matchA[curA] = curB
matchB[curB] = curA
curA += 1
curB += 1
if debug:
print '= curA', curA, 'curB', curB
continue
elif line.startswith('--- a/'):
continue
elif line.startswith('+++ b/'):
continue
elif line.startswith('-'):
if debug:
print 'curA', curA
changesA.append(lineA)
lineA += 1
curA += 1
elif line.startswith('+'):
if debug:
print 'curB', curB
changesB.append(lineB)
lineB += 1
curB += 1
elif line.startswith(' '):
if debug:
print 'curA', curA, 'curB', curB
if verbose:
print 'A/B', line
matchA[curA] = curB
matchB[curB] = curA
curA += 1
curB += 1
lineA += 1
lineB += 1
if debug:
print 'Finally:', curA, len(matchA), curB, len(matchB)
while curA < len(matchA) and curB < len(matchB):
if debug:
print '+ curA', curA, 'curB', curB
matchA[curA] = curB
matchB[curB] = curA
curA += 1
curB += 1
computes_changes = [i for i, v in enumerate(matchA) if not v and i != 0]
if (set(changesA).difference(set(computes_changes))
or set(computes_changes).difference(set(changesA))):
print 'Mismatch A!'
computes_changes = [i for i, v in enumerate(matchB) if not v and i != 0]
if (set(changesB).difference(set(computes_changes))
or set(computes_changes).difference(set(changesB))):
print 'Mismatch B!'
# now strip out whitespace
matchA, matchB = makeAllTokens(matchA, dataA, matchB, dataB)
return matchA, matchB
def newTree(st, treeIdx, parentTree=None, start=None, end=None):
"""Creates new subtree and inserts into index"""
result = {'ast': st, 'idxSelf': len(treeIdx)}
treeIdx.append(result)
if start:
result['start'] = start
if end is not None:
result['end'] = end
if parentTree:
result['idxParent'] = parentTree['idxSelf']
else:
result['idxParent'] = -1
return result
def buildTree(st, end, match, text):
"""Builds nested sub-trees from Python AST - top level"""
assert isinstance(st, ast.Module)
treeIdx = []
# print 'End:', end
treetop = newTree(st, treeIdx, start=1, end=end)
buildTree_helper(treetop, match, treeIdx, text)
# pruneDetail(treetop, treeIdx)
return treetop, treeIdx
def buildTree_helper(tree, match, treeIdx, text, verbose=False):
"""Recursively builds nested sub-trees from Python AST"""
blankLineTrimmer(tree, text)
tree['tokens'] = [match[i] for i in range(tree['start'], tree['end']+1)
if match[i] and not match[i].startswith('blank')]
tree['mismatch'] = sum([1 for i in range(tree['start'], tree['end']+1)
if not match[i]])
tree['insert'] = sum([1 for i in range(tree['start'], tree['end']+1)
if match[i] and match[i].endswith('_insert')])
if type(tree['ast']) in [ast.Module, ast.ClassDef, ast.FunctionDef,
ast.If, ast.For, ast.While, ast.With,
ast.TryExcept, ast.TryFinally,
ast.ExceptHandler]:
# body
subtrees = [newTree(st, treeIdx, parentTree=tree, start=st.lineno)
for st in tree['ast'].body]
# handlers
if type(tree['ast']) in [ast.TryExcept]:
subtrees += [newTree(st, treeIdx, parentTree=tree, start=st.lineno)
for st in tree['ast'].handlers]
# orelse
if type(tree['ast']) in [ast.If, ast.For, ast.While]:
subtrees += [newTree(st, treeIdx, parentTree=tree, start=st.lineno)
for st in tree['ast'].orelse]
# finalbody
if type(tree['ast']) in [ast.TryFinally]:
subtrees += [newTree(st, treeIdx, parentTree=tree, start=st.lineno)
for st in tree['ast'].finalbody]
#
# Common back-end processing
#
if len(subtrees) > 0:
all_start = [x['start'] for x in subtrees] + [tree['end'] + 1]
for i, subtree in enumerate(subtrees):
subtree['end'] = max(all_start[i], all_start[i+1] - 1)
buildTree_helper(subtree, match, treeIdx, text)
tree['subtreesIdx'] = [t['idxSelf'] for t in subtrees]
# now compute header:
firstSubtreeLineno = min([t['start'] for t in subtrees])
tree['header_tokens'] = [match[i]
for i in range(tree['start'],
firstSubtreeLineno)
if match[i]]
tree['header_mismatch'] = sum([1
for i in range(tree['start'],
firstSubtreeLineno)
if not match[i]])
if verbose:
if type(tree['ast']) in [ast.If, ast.For, ast.With, ast.While,
ast.TryFinally, ast.TryExcept,
ast.ExceptHandler]:
print 'found', type(tree['ast'])
def blankLineTrimmer(tree, text):
"""Update start and end values to eliminate blank lines"""
# trim spaces at start
if 'end' not in tree:
return
while (tree['end'] > tree['start']
and text[tree['start']-1].strip() == ''):
# print 'Stripping line', tree['end'], text[tree['end']-1]
tree['start'] += 1
# trim spaces at end
while (tree['end'] > tree['start']
and text[tree['end']-1].strip() == ''):
# print 'Stripping line', tree['start'], text[tree['start']-1]
tree['end'] -= 1
def tokenMapper(tree, tokenMap, idxTree, side='A'):
for token in tree['tokens']:
if not token.startswith('blank'):
tokenMap[token][side] = tree
if 'subtreesIdx' in tree:
for i in tree['subtreesIdx']:
tokenMapper(idxTree[i], tokenMap, idxTree, side=side)
def remove_invalid_tokens(tokens, tree, idxTree):
"""Removes invalid tokens from entry at it's parents"""
before = len(tree['tokens'])
for tok in tokens:
if tok in tree['tokens']:
tree['tokens'].remove(tok)
tree['mismatch'] += before - len(tree['tokens'])
if 'header_tokens' in tree:
before = len(tree['header_tokens'])
for tok in tokens:
if tok in tree['header_tokens']:
tree['header_tokens'].remove(tok)
tree['header_mismatch'] += before - len(tree['header_tokens'])
if tree['idxParent'] != -1:
remove_invalid_tokens(tokens, idxTree[tree['idxParent']], idxTree)
def cleanup_matches(tree, pairs, idxTree, otherIdxTree,
tokenMap, verbose=False):
"""Clean-up spurious matches (ie: other values in pairs)"""
if verbose:
if 'pair' in tree:
print 'selected pair:', tree['pair']
print 'Other candidates to be ignored'
tokens_to_ignore = set([])
for p in pairs:
if 'pair' in tree and p['idxSelf'] == tree['pair']:
continue
# Compute intersection to determine tokens and
# header-tokens spanning thisTreee and candidate
# and remove from ignored pair
common_tokens = set(p['tokens']).intersection(set(tree['tokens']))
tokens_to_ignore = tokens_to_ignore.union(common_tokens)
if verbose:
print 'ignoring:', list(common_tokens)
remove_invalid_tokens(list(common_tokens), p, otherIdxTree)
# Now remove from this Tree and it's parents
if verbose:
print 'ignoring for this tree:', list(tokens_to_ignore)
remove_invalid_tokens(list(tokens_to_ignore), tree, idxTree)
if verbose:
print 'removing from token map as well'
for tok in tokens_to_ignore:
del tokenMap[tok]
def computePairs(tree, tokenMap, idxTree, otherIdxTree,
thisSide='A', verbose=False):
pairs = []
if thisSide == 'A':
otherSide = 'B'
else:
otherSide = 'A'
if 'subtreesIdx' in tree:
tokens = tree['header_tokens']
for i in tree['subtreesIdx']:
computePairs(idxTree[i], tokenMap, idxTree, otherIdxTree,
thisSide=thisSide)
else:
tokens = tree['tokens']
for tok in tokens:
if not tok.startswith('blank'):
if (thisSide not in tokenMap[tok]
or otherSide not in tokenMap[tok]):
print 'Skipping empty token', tok
continue
this = tokenMap[tok][thisSide]
match = tokenMap[tok][otherSide]
if this == tree and match not in pairs:
pairs.append(match)
elif this != tree:
print 'Skipping', tok
if len(pairs) == 1:
tree['pair'] = pairs[0]['idxSelf']
if verbose:
print 'Pairing:', tree['idxSelf'], 'with', tree['pair']
return
# Try using substrees to also resolve instances of non-unqiueness
# at header level
elif len(pairs) == 0 or len(pairs) > 1:
# print 'ID:', tree['idxSelf'],
# print 'Currently Unmatched - mismatch count:', tree['mismatch'],
# print 'Tokens:', len(tree['tokens'])
# if 'subtreesIdx' in tree:
# # print ' Subtrees:', len(tree['subtreesIdx'])
# pass
# Try to match based on children:
if 'subtreesIdx' in tree:
candidatePairs = []
for i in tree['subtreesIdx']:
if 'pair' in idxTree[i]:
# print 'subtree match', idxTree[i]['idxSelf'],
# print idxTree[i]['pair']
# print ' parents', idxTree[i]['idxParent'],
# print otherIdxTree[idxTree[i]['pair']]['idxParent']
if (otherIdxTree[idxTree[i]['pair']]['idxParent']
not in candidatePairs):
candidatePairs.append(
otherIdxTree[idxTree[i]['pair']]['idxParent'])
# print 'Candidate parents:', candidatePairs
# print 'Candidate pair count:', len(candidatePairs)
if len(candidatePairs) == 1:
tree['pair'] = candidatePairs[0]
if verbose:
print 'Pairing:', tree['idxSelf'], 'with', tree['pair'],
print 'via subtree matches'
otherIdxTree[candidatePairs[0]]['pair'] = tree['idxSelf']
if len(pairs) > 1:
cleanup_matches(tree, pairs, idxTree,
otherIdxTree, tokenMap)
return
if len(pairs) > 1:
if verbose:
print 'Too many pairs', len(pairs), thisSide
# if still not resolved, try using a majority vote among candidates
# with compatible parents
good_pairs = [p for p in pairs
if 'pair' in otherIdxTree[p['idxParent']]
and otherIdxTree[p['idxParent']]['pair'] ==
idxTree[tree['idxParent']]['idxSelf']]
best_pair = None
best_pair_count = -1
for p in good_pairs:
if len(p['tokens']) > best_pair_count:
best_pair = p
best_pair_count = len(p['tokens'])
if best_pair:
tree['pair'] = best_pair['idxSelf']
best_pair['pair'] = tree['idxSelf']
if verbose:
print 'Pairing:', tree['idxSelf'], 'with', tree['pair'],
print 'via subtree matches'
cleanup_matches(tree, pairs, idxTree,
otherIdxTree, tokenMap)
else:
if verbose:
print 'Unable to identify pair'
# remove all overlapping tokens
cleanup_matches(tree, pairs, idxTree,
otherIdxTree, tokenMap)
def okToPair(tree1, tree2):
"""Determine is on can infer pair relationship"""
if type(tree1) == type(tree2):
# further qualify match based on type
# << INSERT CODE HERE >>
tree1['pair'] = tree2['idxSelf']
tree2['pair'] = tree1['idxSelf']
return True
else:
return False
def inferPairs(tree, thisIdx, otherIdx, verbose=False):
"""Infer pairs based on neighboring pairs in subtree"""
if ('pair' not in tree or tree['mismatch'] == 0
or 'subtreesIdx' not in tree):
return
otherTree = otherIdx[tree['pair']]
if 'subtreesIdx' not in tree or 'subtreesIdx' not in otherTree:
return
thisSubtrees = tree['subtreesIdx']
otherSubtrees = otherTree['subtreesIdx']
while len(thisSubtrees) > 0 and len(otherSubtrees) > 0:
if 'pair' in thisIdx[thisSubtrees[0]]:
thisSubtrees = thisSubtrees[1:]
continue
if 'pair' in otherIdx[otherSubtrees[0]]:
otherSubtrees = otherSubtrees[1:]
continue
if 'pair' in thisIdx[thisSubtrees[-1]]:
thisSubtrees = thisSubtrees[:-1]
continue
if 'pair' in otherIdx[otherSubtrees[-1]]:
otherSubtrees = otherSubtrees[:-1]
continue
# see if unmatched items can be linked
if okToPair(thisIdx[thisSubtrees[0]], otherIdx[otherSubtrees[0]]):
if verbose:
print 'Pair Found at start', thisSubtrees[0], otherSubtrees[0]
thisSubtrees = thisSubtrees[1:]
otherSubtrees = otherSubtrees[1:]
continue
# determine which ones to ignore (look 1 further for each)
if okToPair(thisIdx[thisSubtrees[-1]], otherIdx[otherSubtrees[-1]]):
if verbose:
print 'Pair found at end', thisSubtrees[-1], otherSubtrees[-1]
thisSubtrees = thisSubtrees[:-1]
otherSubtrees = otherSubtrees[:-1]
continue
break
def pruneDetail(tree, idxTree):
"""Prune sub-trees when no matching tokens for parent"""
if 'subtreesIdx' in tree:
if len(tree['tokens']) == 0:
del tree['subtreesIdx']
else:
for i in tree['subtreesIdx']:
pruneDetail(idxTree[i], idxTree)
def ignoreDocstrings(idxTree, lines, verbose=False, parserFix=True):
"""Ignore any mismatch in doc_strings"""
for tree in idxTree:
if not (isinstance(tree['ast'], ast.Module)
or isinstance(tree['ast'], ast.ClassDef)
or isinstance(tree['ast'], ast.FunctionDef)):
continue
if 'subtreesIdx' in tree:
firstLine = idxTree[tree['subtreesIdx'][0]]
if (isinstance(firstLine['ast'], ast.Expr)
and isinstance(firstLine['ast'].value, ast.Str)):
if parserFix:
# There is a the python ast parser where
# it incorrectly notes the start of docstrings.
# the below code compensates for this bug. WHen
# upgrading to Python3, need verify whether will needed
quotes = lines[firstLine['start']-1].count("'''")
double_quotes = lines[firstLine['start']-1].count('"""')
if max(quotes, double_quotes) == 1:
if double_quotes == 1:
target = '"""'
else:
target = "'''"
for i in range(firstLine['start']-1,
tree['start'], -1):
if lines[i-1].count(target) == 1:
firstLine['start'] = i
break
if verbose:
print ' ignoring docstring', tree['idxSelf'],
print firstLine['idxSelf']
tree['mismatch'] -= firstLine['mismatch']
firstLine['mismatch'] = 0
def matchTryExcept(idxTree, otherIdxTree, verbose=False):
"""Special case here existing code encapsulated in Try/Except clause"""
for tree in idxTree:
if not (isinstance(tree['ast'], ast.TryExcept)
and tree['mismatch'] > 0
and 'pair' in idxTree[tree['idxParent']]):
continue
if verbose:
print
print 'Found instance of TryExcept'
treeViewer(tree, idxTree, trim=False, idxOther=otherIdxTree)
print
print 'Other Parent'
otherParent = otherIdxTree[idxTree[tree['idxParent']]['pair']]
if verbose:
treeViewer(otherParent, otherIdxTree, trim=False, idxOther=idxTree)
print '-'*40
origThisMismatch = tree['mismatch']
origOtherMismatch = otherParent['mismatch']
total_matches = 0
if 'subtreesIdx' not in tree or 'subtreesIdx' not in otherParent:
continue
for i in tree['subtreesIdx']:
thisTree = idxTree[i]
for j in otherParent['subtreesIdx']:
candidateTree = otherIdxTree[j]
if compare_ast(thisTree['ast'], candidateTree['ast']):
if verbose:
print
print 'Matched',
treeViewer(thisTree, idxTree, trim=False)
print ast.dump(thisTree['ast'],
include_attributes=False)
print 'with',
treeViewer(candidateTree, otherIdxTree, trim=False)
print ast.dump(candidateTree['ast'],
include_attributes=False)
thisTree['pair'] = candidateTree['idxSelf']
candidateTree['pair'] = thisTree['idxSelf']
tree['mismatch'] -= thisTree['mismatch']
otherParent['mismatch'] -= candidateTree['mismatch']
thisTree['mismatch'] = 0
candidateTree['mismatch'] = 0
total_matches += 1
break
thisDelta = origThisMismatch - tree['mismatch']
otherDelta = origOtherMismatch - otherParent['mismatch']
# Update mismatch count in hierarchy
curIdx = tree['idxParent']
while curIdx != -1:
t = idxTree[curIdx]
t['mismatch'] -= thisDelta
curIdx = t['idxParent']
curIdx = otherParent['idxParent']
while curIdx != -1:
t = otherIdxTree[curIdx]
t['mismatch'] -= otherDelta
curIdx = t['idxParent']
if verbose:
print
print 'Total matches', total_matches
print 'Delta mismatch counts:',
print 'tree:', thisDelta, 'otherParent', otherDelta
print
print 'Updated TryExcept'
treeViewer(tree, idxTree, trim=False, idxOther=otherIdxTree)
print
print 'Updated Other Parent'
treeViewer(otherParent, otherIdxTree, trim=False, idxOther=idxTree)
print
return
def validateMismatches(tree, thisIdx, otherIdx, verbose=False):
"""Depth first check of mismatch pairs"""
if 'pair' not in tree:
return
old_mismatch = tree['mismatch']
new_mismatch = old_mismatch
if 'subtreesIdx' in tree:
if 'header_mismatch' in tree:
new_mismatch = tree['header_mismatch']
else:
new_mismatch = 0
for i in tree['subtreesIdx']:
validateMismatches(thisIdx[i], thisIdx, otherIdx)
# recompute mismatch count
new_mismatch += sum([thisIdx[i]['mismatch']
for i in tree['subtreesIdx']])
# Now compare this node
otherTree = otherIdx[tree['pair']]
if old_mismatch != new_mismatch:
if verbose:
print ' Updating mismatches for:', tree['idxSelf'],
print 'was:', old_mismatch, 'now:', new_mismatch
tree['mismatch'] = new_mismatch
if old_mismatch > 0 and compare_ast(tree['ast'], otherTree['ast']):
if verbose:
print '-'*40
print 'Match found'
print ast.dump(tree['ast'], include_attributes=False)
print
print ast.dump(otherTree['ast'], include_attributes=False)
tree['mismatch'] = 0
if 'header_mismatch' in tree:
tree['header_mismatch'] = 0
if verbose:
print ' Match:', tree['idxSelf'], otherTree['idxSelf']
def performDiff(d, verbose=False):
"""Perform diff operation on individual file"""
if not d.b_blob or not d.b_blob.path.endswith('.py'):
print 'Error: Invalid blob for performDiff', d.b_blob
raise git.BadObject
if verbose:
print
print '+'*60
print
print 'Comparing ', d.b_blob.path
if d.a_blob.path != d.b_blob.path:
print ' With', d.b_blob.path
matchA, matchB = parse_diff_txt(d.diff, d.a_blob, d.b_blob)
st_a = get_st_from_blob(d.a_blob)
treeA, idxA = buildTree(st_a, len(matchA) - 1, matchA,
getBlobData(d.a_blob))
st_b = get_st_from_blob(d.b_blob)
treeB, idxB = buildTree(st_b, len(matchB) - 1, matchB,
getBlobData(d.b_blob))
if verbose:
print
print 'Token Mapper'
tokenMap = collections.defaultdict(dict)
tokenMapper(treeA, tokenMap, idxA, side='A')
tokenMapper(treeB, tokenMap, idxB, side='B')
if verbose:
print
print '***Tree A ***'
treeViewer(treeA, idxA, trim=True, idxOther=idxB)
print '*'*40
print
print '***Tree B ***'
treeViewer(treeB, idxB, trim=True, idxOther=idxA)
if verbose:
print 'Compute pairings:'
computePairs(treeA, tokenMap, idxA, idxB, thisSide='A')
if verbose:
print '-'*20
computePairs(treeB, tokenMap, idxB, idxA, thisSide='B')
if verbose:
print
print 'Process TryExcept:'
matchTryExcept(idxA, idxB)
matchTryExcept(idxB, idxA)
if verbose:
print
print 'Infer additional pairings:'
for tree in idxA:
inferPairs(tree, idxA, idxB)
if verbose:
print
print 'Ignore Docstrings:'
ignoreDocstrings(idxA, getBlobData(d.a_blob))
ignoreDocstrings(idxB, getBlobData(d.b_blob))
# pruneDetail(treeA, idxA)
# pruneDetail(treeB, idxB)
if verbose:
print
print '***Tree A ***'
treeViewer(treeA, idxA, trim=False, idxOther=idxB)
print '*'*40
print
print '***Tree B ***'
treeViewer(treeB, idxB, trim=False, idxOther=idxA)
if verbose:
print
print 'Comparing Pairs:'
print ' Side A:'
validateMismatches(treeA, idxA, idxB)
if verbose:
print ' Side B:'
validateMismatches(treeB, idxB, idxA)
if verbose:
print
print '***Tree A ***'
treeViewer(treeA, idxA, trim=True, idxOther=idxB)
print '*'*40
print
print '***Tree B ***'
treeViewer(treeB, idxB, trim=True, idxOther=idxA)
if verbose:
print
print '***Tree A ***'
treeViewer(treeA, idxA, trim=False)
print '*'*40
print
print '***Tree B ***'
treeViewer(treeB, idxB, trim=False)
return treeA, treeB, idxA, idxB
def getLinesFromRanges(ranges):
"""Converts ranges to list of line numbners"""
result = []
for r in ranges:
result += range(r[0], r[1] + 1)
return result
def getRangesForBlame(d, verbose=False):
"""Compute range information for use with get_blame()"""
treeA, treeB, idxA, idxB = performDiff(d)
if treeB and treeB['mismatch'] > 0:
if verbose:
# treeViewer(treeB, idxB, trim=False)
# print '-'*40
treeViewer(treeB, idxB, trim=True)
lines = generateRangesForBlame(treeB, idxB, side='B')
# Combined adjacent entries into range
ranges = reduceRanges(lines)
if verbose:
print 'treeB:', ranges
return ranges
else:
return []
|
UTF-8
|
Python
| false
| false
| 37,700
|
py
| 43
|
NewDiff.py
| 13
| 0.552016
| 0.544456
| 0
| 1,116
| 32.781362
| 79
|
mattloper/chumpy
| 2,044,404,459,700
|
35e0cdeaccaabc9488346a8e55a759990433cccb
|
d10c7e12f5c9a7b37cf6c776ef21c9f70b87df48
|
/chumpy/reordering.py
|
0ab1cd34cddd73a521df41807ac81687e4075c58
|
[
"MIT"
] |
permissive
|
https://github.com/mattloper/chumpy
|
1029bc308e5c72d5b6b967c1d3b86b4267f45fda
|
51d5afd92a8ded3637553be8cef41f328a1c863a
|
refs/heads/master
| 2023-06-08T23:17:35.228530
| 2023-02-21T21:21:27
| 2023-02-21T21:21:27
| 21,735,743
| 189
| 83
|
MIT
| false
| 2023-06-06T10:26:08
| 2014-07-11T12:43:43
| 2023-05-15T21:14:48
| 2023-06-06T10:26:07
| 218
| 176
| 87
| 16
|
Python
| false
| false
|
"""
Author(s): Matthew Loper
See LICENCE.txt for licensing and contact information.
"""
from .ch import Ch
import numpy as np
from .utils import row, col
import scipy.sparse as sp
import weakref
__all__ = ['sort', 'tile', 'repeat', 'transpose', 'rollaxis', 'swapaxes', 'reshape', 'Select',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'squeeze', 'expand_dims', 'fliplr', 'flipud',
'concatenate', 'vstack', 'hstack', 'dstack', 'ravel', 'diag', 'diagflat', 'roll', 'rot90']
# Classes deriving from "Permute" promise to only reorder/reshape
class Permute(Ch):
pass
def ravel(a, order='C'):
assert(order=='C')
if isinstance (a, np.ndarray):
self = Ch(a)
return reshape(a=a, newshape=(-1,))
class Reorder(Permute):
dterms = 'a',
def on_changed(self, which):
if not hasattr(self, 'dr_lookup'):
self.dr_lookup = {}
def compute_r(self):
return self.reorder(self.a.r)
def compute_dr_wrt(self, wrt):
if wrt is self.a:
if False:
from scipy.sparse.linalg.interface import LinearOperator
return LinearOperator((self.size, wrt.size), lambda x : self.reorder(x.reshape(self.a.shape)).ravel())
else:
a = self.a
asz = a.size
ashape = a.shape
key = self.unique_reorder_id()
if key not in self.dr_lookup or key is None:
JS = self.reorder(np.arange(asz).reshape(ashape))
IS = np.arange(JS.size)
data = np.ones_like(IS)
shape = JS.shape
self.dr_lookup[key] = sp.csc_matrix((data, (IS, JS.ravel())), shape=(self.r.size, wrt.r.size))
return self.dr_lookup[key]
class Sort(Reorder):
dterms = 'a'
terms = 'axis', 'kind', 'order'
def reorder(self, a): return np.sort(a, self.axis, self.kind, self.order)
def unique_reorder_id(self): return None
def sort(a, axis=-1, kind='quicksort', order=None):
return Sort(a=a, axis=axis, kind=kind, order=order)
class Tile(Reorder):
dterms = 'a',
terms = 'reps',
term_order = 'a', 'reps'
def reorder(self, a): return np.tile(a, self.reps)
def unique_reorder_id(self): return (self.a.shape, tuple(self.reps))
def tile(A, reps):
return Tile(a=A, reps=reps)
class Diag(Reorder):
dterms = 'a',
terms = 'k',
def reorder(self, a): return np.diag(a, self.k)
def unique_reorder_id(self): return (self.a.shape, self.k)
def diag(v, k=0):
return Diag(a=v, k=k)
class DiagFlat(Reorder):
dterms = 'a',
terms = 'k',
def reorder(self, a): return np.diagflat(a, self.k)
def unique_reorder_id(self): return (self.a.shape, self.k)
def diagflat(v, k=0):
return DiagFlat(a=v, k=k)
class Repeat(Reorder):
dterms = 'a',
terms = 'repeats', 'axis'
def reorder(self, a): return np.repeat(a, self.repeats, self.axis)
def unique_reorder_id(self): return (self.repeats, self.axis)
def repeat(a, repeats, axis=None):
return Repeat(a=a, repeats=repeats, axis=axis)
class transpose(Reorder):
dterms = 'a'
terms = 'axes'
term_order = 'a', 'axes'
def reorder(self, a): return np.require(np.transpose(a, axes=self.axes), requirements='C')
def unique_reorder_id(self): return (self.a.shape, None if self.axes is None else tuple(self.axes))
def on_changed(self, which):
if not hasattr(self, 'axes'):
self.axes = None
super(self.__class__, self).on_changed(which)
class rollaxis(Reorder):
dterms = 'a'
terms = 'axis', 'start'
term_order = 'a', 'axis', 'start'
def reorder(self, a): return np.rollaxis(a, axis=self.axis, start=self.start)
def unique_reorder_id(self): return (self.a.shape, self.axis, self.start)
def on_changed(self, which):
if not hasattr(self, 'start'):
self.start = 0
super(self.__class__, self).on_changed(which)
class swapaxes(Reorder):
dterms = 'a'
terms = 'axis1', 'axis2'
term_order = 'a', 'axis1', 'axis2'
def reorder(self, a): return np.swapaxes(a, axis1=self.axis1, axis2=self.axis2)
def unique_reorder_id(self): return (self.a.shape, self.axis1, self.axis2)
class Roll(Reorder):
dterms = 'a',
terms = 'shift', 'axis'
term_order = 'a', 'shift', 'axis'
def reorder(self, a): return np.roll(a, self.shift, self.axis)
def unique_reorder_id(self): return (self.shift, self.axis)
def roll(a, shift, axis=None):
return Roll(a, shift, axis)
class Rot90(Reorder):
dterms = 'a',
terms = 'k',
def reorder(self, a): return np.rot90(a, self.k)
def unique_reorder_id(self): return (self.a.shape, self.k)
def rot90(m, k=1):
return Rot90(a=m, k=k)
class Reshape(Permute):
dterms = 'a',
terms = 'newshape',
term_order= 'a', 'newshape'
def compute_r(self):
return self.a.r.reshape(self.newshape)
def compute_dr_wrt(self, wrt):
if wrt is self.a:
return sp.eye(self.a.size, self.a.size)
#return self.a.dr_wrt(wrt)
# def reshape(a, newshape):
# if isinstance(a, Reshape) and a.newshape == newshape:
# return a
# return Reshape(a=a, newshape=newshape)
def reshape(a, newshape):
while isinstance(a, Reshape):
a = a.a
return Reshape(a=a, newshape=newshape)
# class And(Ch):
# dterms = 'x1', 'x2'
#
# def compute_r(self):
# if True:
# needs_work = [self.x1, self.x2]
# done = []
# while len(needs_work) > 0:
# todo = needs_work.pop()
# if isinstance(todo, And):
# needs_work += [todo.x1, todo.x2]
# else:
# done = [todo] + done
# return np.concatenate([d.r.ravel() for d in done])
# else:
# return np.concatenate((self.x1.r.ravel(), self.x2.r.ravel()))
#
# # This is only here for reverse mode to work.
# # Most of the time, the overridden dr_wrt is callpath gets used.
# def compute_dr_wrt(self, wrt):
#
# if wrt is not self.x1 and wrt is not self.x2:
# return
#
# input_len = wrt.r.size
# x1_len = self.x1.r.size
# x2_len = self.x2.r.size
#
# mtxs = []
# if wrt is self.x1:
# mtxs.append(sp.spdiags(np.ones(x1_len), 0, x1_len, x1_len))
# else:
# mtxs.append(sp.csc_matrix((x1_len, input_len)))
#
# if wrt is self.x2:
# mtxs.append(sp.spdiags(np.ones(x2_len), 0, x2_len, x2_len))
# else:
# mtxs.append(sp.csc_matrix((x2_len, input_len)))
#
#
# if any([sp.issparse(mtx) for mtx in mtxs]):
# result = sp.vstack(mtxs, format='csc')
# else:
# result = np.vstack(mtxs)
#
# return result
#
# def dr_wrt(self, wrt, want_stacks=False, reverse_mode=False):
# self._call_on_changed()
#
# input_len = wrt.r.size
# x1_len = self.x1.r.size
# x2_len = self.x2.r.size
#
# mtxs = []
# if wrt is self.x1:
# mtxs.append(sp.spdiags(np.ones(x1_len), 0, x1_len, x1_len))
# else:
# if isinstance(self.x1, And):
# tmp_mtxs = self.x1.dr_wrt(wrt, want_stacks=True, reverse_mode=reverse_mode)
# for mtx in tmp_mtxs:
# mtxs.append(mtx)
# else:
# mtxs.append(self.x1.dr_wrt(wrt, reverse_mode=reverse_mode))
# if mtxs[-1] is None:
# mtxs[-1] = sp.csc_matrix((x1_len, input_len))
#
# if wrt is self.x2:
# mtxs.append(sp.spdiags(np.ones(x2_len), 0, x2_len, x2_len))
# else:
# if isinstance(self.x2, And):
# tmp_mtxs = self.x2.dr_wrt(wrt, want_stacks=True, reverse_mode=reverse_mode)
# for mtx in tmp_mtxs:
# mtxs.append(mtx)
# else:
# mtxs.append(self.x2.dr_wrt(wrt, reverse_mode=reverse_mode))
# if mtxs[-1] is None:
# mtxs[-1] = sp.csc_matrix((x2_len, input_len))
#
# if want_stacks:
# return mtxs
# else:
# if any([sp.issparse(mtx) for mtx in mtxs]):
# result = sp.vstack(mtxs, format='csc')
# else:
# result = np.vstack(mtxs)
#
# return result
class Select(Permute):
terms = ['idxs', 'preferred_shape']
dterms = ['a']
term_order = 'a', 'idxs', 'preferred_shape'
def compute_r(self):
result = self.a.r.ravel()[self.idxs].copy()
if hasattr(self, 'preferred_shape'):
return result.reshape(self.preferred_shape)
else:
return result
def compute_dr_wrt(self, obj):
if obj is self.a:
if not hasattr(self, '_dr_cached'):
IS = np.arange(len(self.idxs))
JS = self.idxs.ravel()
ij = np.vstack((row(IS), row(JS)))
data = np.ones(len(self.idxs))
self._dr_cached = sp.csc_matrix((data, ij), shape=(len(self.idxs), np.prod(self.a.shape)))
return self._dr_cached
def on_changed(self, which):
if hasattr(self, '_dr_cached'):
if 'idxs' in which or self.a.r.size != self._dr_cached.shape[1]:
del self._dr_cached
class AtleastNd(Ch):
dterms = 'x'
terms = 'ndims'
def compute_r(self):
xr = self.x.r
if self.ndims == 1:
target_shape = np.atleast_1d(xr).shape
elif self.ndims == 2:
target_shape = np.atleast_2d(xr).shape
elif self.ndims == 3:
target_shape = np.atleast_3d(xr).shape
else:
raise Exception('Need ndims to be 1, 2, or 3.')
return xr.reshape(target_shape)
def compute_dr_wrt(self, wrt):
if wrt is self.x:
return 1
def atleast_nd(ndims, *arys):
arys = [AtleastNd(x=ary, ndims=ndims) for ary in arys]
return arys if len(arys) > 1 else arys[0]
def atleast_1d(*arys):
return atleast_nd(1, *arys)
def atleast_2d(*arys):
return atleast_nd(2, *arys)
def atleast_3d(*arys):
return atleast_nd(3, *arys)
def squeeze(a, axis=None):
if isinstance(a, np.ndarray):
return np.squeeze(a, axis)
shape = np.squeeze(a.r, axis).shape
return a.reshape(shape)
def expand_dims(a, axis):
if isinstance(a, np.ndarray):
return np.expand_dims(a, axis)
shape = np.expand_dims(a.r, axis).shape
return a.reshape(shape)
def fliplr(m):
return m[:,::-1]
def flipud(m):
return m[::-1,...]
class Concatenate(Ch):
def on_changed(self, which):
if not hasattr(self, 'dr_cached'):
self.dr_cached = weakref.WeakKeyDictionary()
@property
def our_terms(self):
if not hasattr(self, '_our_terms'):
self._our_terms = [getattr(self, s) for s in self.dterms]
return self._our_terms
def __getstate__(self):
# Have to get rid of WeakKeyDictionaries for serialization
if hasattr(self, 'dr_cached'):
del self.dr_cached
return super(self.__class__, self).__getstate__()
def compute_r(self):
return np.concatenate([t.r for t in self.our_terms], axis=self.axis)
@property
def everything(self):
if not hasattr(self, '_everything'):
self._everything = np.arange(self.r.size).reshape(self.r.shape)
self._everything = np.swapaxes(self._everything, self.axis, 0)
return self._everything
def compute_dr_wrt(self, wrt):
if not hasattr(self, 'dr_cached'):
self.dr_cached = weakref.WeakKeyDictionary()
if wrt in self.dr_cached and self.dr_cached[wrt] is not None:
return self.dr_cached[wrt]
if wrt not in self.our_terms:
return
_JS = np.arange(wrt.size)
_data = np.ones(wrt.size)
IS = []
JS = []
data = []
offset = 0
for term in self.our_terms:
tsz = term.shape[self.axis]
if term is wrt:
JS += [_JS]
data += [_data]
IS += [np.swapaxes(self.everything[offset:offset+tsz], self.axis, 0).ravel()]
offset += tsz
IS = np.concatenate(IS).ravel()
JS = np.concatenate(JS).ravel()
data = np.concatenate(data)
res = sp.csc_matrix((data, (IS, JS)), shape=(self.r.size, wrt.size))
if len(list(self._parents.keys())) != 1:
self.dr_cached[wrt] = res
else:
self.dr_cached[wrt] = None
return res
def expand_concatenates(mtxs, axis=0):
mtxs = list(mtxs)
done = []
while len(mtxs) > 0:
mtx = mtxs.pop(0)
if isinstance(mtx, Concatenate) and mtx.axis == axis:
mtxs = [getattr(mtx, s) for s in mtx.dterms] + mtxs
else:
done.append(mtx)
return done
def concatenate(mtxs, axis=0, **kwargs):
mtxs = expand_concatenates(mtxs, axis)
result = Concatenate(**kwargs)
result.dterms = []
for i, mtx in enumerate(mtxs):
result.dterms.append('m%d' % (i,))
setattr(result, result.dterms[-1], mtx)
result.axis = axis
return result
def hstack(mtxs, **kwargs):
return concatenate(mtxs, axis=1, **kwargs)
def vstack(mtxs, **kwargs):
return concatenate([atleast_2d(m) for m in mtxs], axis=0, **kwargs)
def dstack(mtxs, **kwargs):
return concatenate([atleast_3d(m) for m in mtxs], axis=2, **kwargs)
|
UTF-8
|
Python
| false
| false
| 13,938
|
py
| 26
|
reordering.py
| 22
| 0.544124
| 0.53573
| 0
| 454
| 29.700441
| 118
|
thednainus/ERV_Simulations_II
| 7,464,653,191,016
|
347f93973771c15bb15b2884d525719d6541b0cf
|
243a76a0b3b9bc3a87b3c79346b89c33010ac9a4
|
/OSG/inputs/ERV_phylodynamics_class.py
|
37b70d9895f3d07f337652b39415bd53497afa26
|
[] |
no_license
|
https://github.com/thednainus/ERV_Simulations_II
|
599d0003bc94a9f6484188d64e046f97407898c4
|
173cdf76cd099168becd9d649d2fdbf0f7c42bd9
|
refs/heads/master
| 2016-09-21T04:48:26.698014
| 2016-08-23T16:04:53
| 2016-08-23T16:04:53
| 66,194,658
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import time, os
from ete2 import Tree
from Tree_utilities import TreeUtil
from Random_Functions import ERV_Randoms
from writeFiles import writeInfos
class Dynamics(object):
def __init__(self, model, n_Sim, numHostGen, hostGen_text, max_copies, hostMutRate, virusMutRate, virusBirthRate, virusInacRate, virusDelRate):
self.model = model
self.n_Sim = n_Sim
self.numHostGen = numHostGen # Total Number of host generations
self.hostGen_text = hostGen_text
self.max_copies = max_copies # if copy number is higher than 1,000, host will die
self.hostMutRate = hostMutRate
self.virusMutRate = virusMutRate
self.virusBirthRate = virusBirthRate
self.virusInacRate = virusInacRate # rate at which an element becomes inactive
self.virusDelRate = virusDelRate # rate at which an element become a solo-LTR
def ensure_dir(self, part1, part2):
completeName = os.path.join(part1, part2)
if not os.path.exists(completeName):
os.makedirs(completeName)
return completeName
# "simulation" function receives the number which will be the directory name in each level (for analysis in Spartan. Spartan is a R library)
def simulation(self, path_name):
totalN = 1 # It counts the number of elements that is in the list (actives + inactives)
totalTips = 1 # It counts that number os elements that are being generated
totalTime = 0 # It will count total Time
actives = ['Seq0'] # elements that can give birth to a new element
inactives = [] # elements that cannot give birth to a new element
deleted = []
total_copies = len(actives) + len(inactives)
info = None # info means outcome of a simulation run
code = None # code is a number that is related to outcome in info
randoms = ERV_Randoms(self.numHostGen, self.hostMutRate, 0, 0)
for sim in range(self.n_Sim):
for pI in self.model:
for birth in self.virusBirthRate:
for inact in self.virusInacRate:
for deletion in self.virusDelRate:
for virusMut in self.virusMutRate:
startTime1 = time.time()
t = Tree()
ut = Tree()
randoms.rateBirth = birth
randoms.rateInact = inact
utils = TreeUtil(pI, sim, virusMut)
write2Files = writeInfos(pI, sim, virusMut, self.hostMutRate, birth, inact, deletion, self.hostGen_text)
header = 'Model ' \
'\nSim_Number ' \
'\nERV_Mutation_Rate ' \
'\nBirth_Rate ' \
'\nInact_Rate ' \
'\nDel_Rate1 ' \
'\nDel_Rate2 ' \
'\nLast_Host_Generation ' \
'\nNum_active_elem ' \
'\nNum_Inactive_elem ' \
'\nTotal_elem_in_the_tree ' \
'\nNum_deleted_elem ' \
'\nFinal_Info ' \
'\ncode_Final_Info'
info = "Tree exists!"
code = "1"
while totalTime <= self.numHostGen:
if len(actives) != 0:
waiting_time_event = randoms.wTime_event(actives, inactives, totalTime, deletion)
waitingTime = waiting_time_event[0]
totalTime += waitingTime
if totalTime > self.numHostGen:
time_lastgen = totalTime - waitingTime
if totalTips == 1:
info = "First event generated a totalTime higher than numHostGen. Tree does not exist"
code = "1.1"
break
else:
break
if waiting_time_event[1] == 'birth':
copy1 = randoms.choose_seq(actives)
copy2 = 'Seq' + str(totalTips) # This is the new copy being generated
# Because the new generated branch in the tree will take into account the mutation rate of the
# virus which is higher than the host mutation rate.
copy1_time = waitingTime * self.hostMutRate
copy2_time = (virusMut) + (waitingTime * self.hostMutRate)
act_or_inac = randoms.act_inact_decision(pI)
if act_or_inac == 'active':
# It will append the new active elements that was generated, so it can be randomly selected later, when replacement starts to occur.
actives.append(copy2)
else:
inactives.append(copy2)
total_copies = len(actives) + len(inactives)
# if totalTips == 1, it means that I have to create the tree structure. After that stpe I can add or remove sequences
if totalTips == 1:
child1 = t.add_child(name = copy1, dist = copy1_time)
child2 = t.add_child(name = copy2, dist = copy2_time)
# to create the ultrametric trees
u_child1 = ut.add_child(name = copy1, dist = waitingTime)
u_child2 = ut.add_child(name = copy2, dist = waitingTime)
totalN += 1
totalTips += 1
elif total_copies >= self.max_copies:
time_lastgen = totalTime - waitingTime
if actives.count(copy2) == 0:
inactives.remove(copy2)
else:
actives.remove(copy2)
break
else:
t = utils.add_new_seq(t, copy1, copy2, copy1_time, copy2_time)
ut = utils.add_new_seq(ut, copy1, copy2, waitingTime, waitingTime)
totalN += 1
totalTips += 1
elif waiting_time_event[1] == 'inactivation':
# choose which element in the active list will become inactive
inact_element = randoms.choose_seq(actives)
# removes the new inactive element from the active list
actives.remove(inact_element)
# appends the new inactive element to the inactive list
inactives.append(inact_element)
if totalTips != 1: # If totalN == 1 it means I do not have a tree or my tree has just one element
t = utils.update_leaves(t, None, None, waitingTime * self.hostMutRate)
ut = utils.update_leaves(ut, None, None, waitingTime)
# I do not need a else statement here because if totalTips == 1,
# it means that the first event was a deletion, so active list has only one element
# this single element in the active list will be moved to the inactive list,
# and in the next loop len(actives) == 0 and will not enter the if statement of len(actives) != 0
# No tree will be generated at the end of a simulation run.
elif waiting_time_event[1] == 'deletion':
if totalTips == 1: # First event is a deletion
actives = []
deleted = ['Seq0']
info = "Tree does not exist because 1st event was a deletion" # it means that the first event was a deletion
code = "2"
time_lastgen = totalTime
break
else:
total_elements = actives + inactives
delete_element = randoms.choose_seq(total_elements)
deleted.append(delete_element)
if actives.count(delete_element) == 0:
inactives.remove(delete_element)
else:
actives.remove(delete_element)
# first I delete the element from the tree
t = utils.delete_seq(t, delete_element)
ut = utils.delete_seq(ut, delete_element)
# then I update the tree with the waiting time
t = utils.update_leaves(t, None, None, waitingTime * self.hostMutRate)
ut = utils.update_leaves(ut, None, None, waitingTime)
totalN -= 1
# if len(actives) == 0 and len(inactives) != 0, the only thing that can happen is deletion of inactives elements of the tree.
elif len(actives) == 0 and len(inactives) != 0:
waitingTime = randoms.wTime_del(inactives, totalTime, deletion)
totalTime += waitingTime
if totalTime > self.numHostGen:
time_lastgen = totalTime - waitingTime
break
delete_element = randoms.choose_seq(inactives)
deleted.append(delete_element)
inactives.remove(delete_element) # if here, I remove the last inactive element from the list and I cannot calculate the waiting time anymore because it will be a division by zero
if len(inactives) != 0: # if after I removed one element from the inactive element (se line above) I still have len(inactives) != 0 I do the below
# first I delete the element from the tree
t = utils.delete_seq(t, delete_element)
ut = utils.delete_seq(ut, delete_element)
# then I update the tree with the waiting time
t = utils.update_leaves(t, None, None, waitingTime * self.hostMutRate)
ut = utils.update_leaves(ut, None, None, waitingTime)
totalN -= 1
else: # This means that before entering the loop len(inactives) == 1, and it was the only sequence that could be chosen to be deleted from the tree
info = "All elements were deleted! (Event = permanent deletion)"
code = "3"
time_lastgen = totalTime
# first I delete the element from the tree
t = utils.delete_seq(t, delete_element)
ut = utils.delete_seq(ut, delete_element)
# then I update the tree with the waiting time
t = utils.update_leaves(t, None, None, time_lastgen * self.hostMutRate)
ut = utils.update_leaves(ut, None, None, time_lastgen)
break
else: # when len(actives) == 0 and len(inactives) == 0
time_lastgen = totalTime
info = "All elements were deleted!"
code = "3"
break
if totalTime > self.numHostGen:
if totalN == 1:
info = "Only one full element survived!"
code = "4"
elif totalN <= 0:
info = "All elements were deleted! Tree does not exist!"
code = "4.1"
else:
timeFinal = randoms.final_Time(totalTime, waitingTime) # in substitution per site
t = utils.update_leaves(t, None, None, timeFinal)
ut = utils.update_leaves(ut, None, None, (self.numHostGen - time_lastgen))
elif total_copies >= self.max_copies:
info = "Host died!"
code = "5"
write2Files.write_info(path_name, header, time_lastgen, len(actives), len(inactives), len(deleted), info, code)
if code == "1":
write2Files.write_Newick(path_name, "subs_per_site_", t, len(actives)+len(inactives), code, len(actives), len(inactives), len(deleted))
write2Files.write_Newick(path_name, "ultrametric_", ut, len(actives)+len(inactives), code, len(actives), len(inactives), len(deleted))
totalN = 1
totalTips = 1
totalTime = 0
actives = ['Seq0']
inactives = []
deleted = []
total_copies = len(actives) + len(inactives)
print "Elapsed time of first simulation is: %.5f" % (time.time() - startTime1)
|
UTF-8
|
Python
| false
| false
| 17,130
|
py
| 18
|
ERV_phylodynamics_class.py
| 12
| 0.386924
| 0.381144
| 0
| 312
| 53.897436
| 222
|
lujun9972/lctt-cli
| 420,906,823,318
|
c7cb0dbef7129ede661edfb46272327fadf387da
|
969c4337320aa75e602639300f17ddaee219595c
|
/lctt-cli/clone/clone_translate.py
|
2e5d4eca60540d3ca957b812119785c377485f6a
|
[
"MIT"
] |
permissive
|
https://github.com/lujun9972/lctt-cli
|
f5a33f87fabfa0b3895a0fe2571bed39a899bdab
|
48b3911104e5c5675922fdbfcab498ed2f670fee
|
refs/heads/master
| 2021-08-19T21:52:33.442891
| 2017-10-24T16:24:53
| 2017-10-24T16:24:53
| 112,172,011
| 0
| 0
| null | true
| 2017-11-27T08:57:34
| 2017-11-27T08:57:34
| 2017-11-09T11:23:11
| 2017-10-24T16:53:22
| 8,176
| 0
| 0
| 0
| null | false
| null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import wget,git,os
class download_translate_project():
# 定义初始变量
def __init__(self,url=None,filename=None):
self.url='http://codeload.github.com/LCTT/TranslateProject/zip/master.zip'
self.filename='TranslateProject.zip'
# 定义下载模块
def downloading_now(self,url=None,filename=None):
# 使用wget进行下载
def wget_zip_download():
wget.download(self.url,self.filename)
# 使用git进行克隆
def git_github_clone():
if not os.path.isdir("TranslateProject"):
os.makedirs("TranslateProject")
git.Repo.clone_from(url="https://github.com/lctt/TranslateProject", to_path="TranslateProject")
# 解压wget下载的文件
def unzip():
# 删除解压文件并移动
os.system('python -m zipfile -e .\TranslateProject.zip .\list')
try:
wget_zip_download()
unzip()
except:
git_github_clone()
# 删除重名操作 保证文件摆放位置正确
finally:
# wget 操作
if os.path.exists('.\TranslateProject.zip'): os.remove('.\TranslateProject.zip')
if os.path.exists('.\list\TranslateProject-master'): os.rename('.\list\TranslateProject-master','.\list\TranslateProject')
# git 操作
if os.path.exists('.\TranslateProject'): os.rename('.\TranslateProject','.\list\TranslateProject')
|
UTF-8
|
Python
| false
| false
| 1,566
|
py
| 16
|
clone_translate.py
| 12
| 0.580913
| 0.57953
| 0
| 36
| 38.166667
| 134
|
fczero/MT_Testing
| 19,679,540,164,077
|
c560736b96882fc2bbb14af55dd18c5184045d9a
|
0f35c3a2c957b10bd9a0c79a314f6cfc4500de62
|
/compare_macros.py
|
42d244f6afe73b00c06f699cad9fe8b15ec8182d
|
[] |
no_license
|
https://github.com/fczero/MT_Testing
|
ec61092c0be245054dab4514ba88a40f06feadee
|
d109a8aa1409eabc8f96804f6ee93233571a063b
|
refs/heads/master
| 2021-01-11T15:31:08.194508
| 2016-11-14T10:12:15
| 2016-11-14T10:12:15
| 80,367,028
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#from db_helper import *
import db_tables
import db_helper
import sys
def compareMacros(target, reference, newIndices):
""" given target, reference macro numbers and list of new indices
loops through equivalent parameters
returns list of tuple of parameters with differences and corresponding indices
[ ['parameter name',[difflist]] ,['parameter name2',[difflist2]] ]
[ ['Number of Message data',['limit_print_string', 'default_number']] ,['parameter name2',[difflist2]] ]
or
[ div1, div2 ]
div1 = ['Number of Message data',['limit_print_string', 'default_number']]
div2 = ['mail notif parameter..',['limit_print_string', 'default_number']]
"""
#remove new indices first
#create copies of lists
filtered_target = target[:][:]
filtered_reference = reference[:][:]
retVal = []
#per div
for i in range(len(reference)):
if i > 0 :
#since mail notification is never the same it should be filtered as well
if len(newIndices[i]):
filtered_target[i] = removeIndices(target[i], newIndices[i])
filtered_reference[i] = removeIndices(reference[i], newIndices[i])
else:
filtered_target[i] = removeIndices(target[i], newIndices[i])
#per div
for i in range(len(filtered_reference)):
div = []
# print "--i = ",i
#per param
for j in range(len(filtered_reference[i])):
diffInfo = []
diffList = compareParams(filtered_target[i][j], filtered_reference[i][j])
if diffList:
parameter_name = filtered_target[i][j][3]
for k in diffList:
diffInfo.append(db_tables.CP_PARA_NAME[k])
div.append([parameter_name, diffInfo])
retVal.append(div)
return retVal
def compareParams(target, reference):
""" given a target and reference tuple, compares each value
returns list of Indices that are not equal
"""
indexList = []
for i in range(1, len(reference)):
if target[i] != reference[i]:
#ignore Struct Key as it is often different
if 1:
if i == db_tables.CP_PARA_NAME.index('Struct_key'):
pass
else:
indexList.append(i)
else:
indexList.append(i)
print target[i],
print " != ",
print reference[i]
return indexList
def removeIndices(newMacro, newIndices):
""" given a list of tuples (parameters) and list of new elements
returns a copy of the list without the new elements
"""
retVal = newMacro[:]
indices = newIndices[:]
indices.sort(reverse=True)
for i in range(len(indices)):
retVal.pop(indices[i])
return retVal
#######################################################
if __name__ == "__main__":
if len(sys.argv) < 3:
print "insufficient arguments:",
print len(sys.argv) - 1
print "should be: 2",
quit(1)
macro_for_checking = sys.argv[1]
reference_macro = sys.argv[2]
#test
target_info = getMacroInfo(str(macro_for_checking))
target_paramsAll = getParamsAll(target_info)
target_paramsInfoAll = getParamsInfoAll(target_paramsAll)
target_paramsInfo = insertStrParamsAll(target_paramsInfoAll)
ref_info = getMacroInfo(str(reference_macro))
ref_paramsAll = getParamsAll(ref_info)
ref_paramsInfoAll = getParamsInfoAll(ref_paramsAll)
ref_paramsInfo = insertStrParamsAll(ref_paramsInfoAll)
print 'Macro: %s ' % macro_for_checking
print 'Reference: %s ' % reference_macro
#zero-based indices
newIndices = [[6,9,12],[0]]
compareMacros(target_paramsInfo, ref_paramsInfo, newIndices)
|
UTF-8
|
Python
| false
| false
| 3,917
|
py
| 28
|
compare_macros.py
| 13
| 0.585908
| 0.57978
| 0
| 108
| 35.259259
| 112
|
MakDizdar/map
| 11,201,274,716,237
|
16a841c51cc36814a1c7cb547771eab6138b70c4
|
4707b6e6a7eba583391a382f2619589710a54a6c
|
/Maps/.svn/pristine/16/16a841c51cc36814a1c7cb547771eab6138b70c4.svn-base
|
322256ce117af1e98ed8d027c21e98515b45a07e
|
[] |
no_license
|
https://github.com/MakDizdar/map
|
c9ce8cd54978b30f23c972bb73caf38a78a998c0
|
0886e3e3d00a8b005a3c97c6e7246d0a8a6bd6ba
|
refs/heads/master
| 2020-07-06T02:00:52.602371
| 2018-06-03T10:47:59
| 2018-06-03T10:47:59
| 73,970,520
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import Template, Context, RequestContext
from django.http import HttpResponse
import datetime
from time import sleep
from extract_nodes import extract_osm_nodes , select_nodes_in_rectangle
from extract_shortest_path import find_node, shortestPath
from extract_edges import *
from store import *
def hello(request):
now = datetime.datetime.now()
html = "<html><body>Hello world!<b> It is now %s.</b> </body></html>" %now
return HttpResponse(html)
class Member:
def __init__(self,name,age):
self.name = name
self.age = age
def memb_reg(request):
members = dict()
members[2] = Member("Kurt", 64)
members[5] = Member("Anne", 39)
members[3] = Member("Berit", 15)
members[8] = Member("Julius_Caesar", 2113)
c = Context({
'NUMBER_OF_MEMBERS': len(members),
'MEMBER_INFO': members, # Important, note the last ","!
})
return render_to_response('mapvis/projekt_lace.html', c)
# Create your views here.
def mapapp(request):
if request.method == 'GET':
node_set=NodeSet()
node_set = extract_osm_nodes("mapvis/osmmap.osm")
node_set = select_nodes_in_rectangle(node_set,
58.40807, 58.40968,
15.56111, 15.56424)
c = RequestContext(request,{'GMAPS_API_KEY': '+++YOUR_GMAPS_API_KEY+++',
'COORDS': node_set.get_nodes().values(), 'DISTANCE':[] ,
'PLACEDMARKER': []})
return render_to_response('mapvis/mapapp.html', c)
elif request.method == 'POST':
pparser = POST_parser(request)
start_mrk_lat = pparser.lat1
start_mrk_lng = pparser.lng1
end_mrk_lat= pparser.lat2
end_mrk_lng= pparser.lng2
node_set = extract_osm_nodes("mapvis/osmmap.osm")
iter_nodeset = node_set.get_nodes()
edgeset = extract_osm_ways("mapvis/osmmap.osm")
upt_edgeset = update_edgeset_weights(edgeset,node_set)
graph = extract_graph_alt(upt_edgeset)
start_node = find_node(start_mrk_lat, start_mrk_lng, node_set)[1]
end_node = find_node(end_mrk_lat, end_mrk_lng, node_set)[1]
seq=[]
errMSG= False
try:
dist, path =shortestPath(graph, str(int(start_node.id)), str(int(end_node.id)))
for node in path:
seq.append(iter_nodeset[float(node)])
except:
dist, path = "wut", "NOT FOUND"
errMSG=True
c = RequestContext(request, {'GMAPS_API_KEY': '+++YOUR_GMAPS_API_KEY+++','PATHNODES': seq,
'DISTANCE': dist , 'START':start_node, 'BEND': end_node.lat,
'LEND': end_node.lng,'FEND': end_node.id, 'errMSG': errMSG})
return render_to_response('mapvis/mapapp.html', c)
def get_float(request, id):
value = request.POST.get(id)
# Check that it's possible to convert input to float.
try:
return float(value)
except (TypeError, ValueError):
return None
def get_str(request, id):
return request.POST.get(id)
class POST_parser:
def __init__(self, request):
# You can choose what variables you want to
# get from POST and what to call them.
self.lat1 = get_float(request, 'lat1')
self.lng1 = get_float(request, 'lng1')
self.lat2 = get_float(request, 'lat2')
self.lng2 = get_float(request, 'lng2')
|
UTF-8
|
Python
| false
| false
| 3,705
| 6
|
16a841c51cc36814a1c7cb547771eab6138b70c4.svn-base
| 3
| 0.57247
| 0.557355
| 0
| 103
| 34.961165
| 98
|
|
bretmullinix/githooks
| 5,918,464,965,760
|
e5c0920bf1d47c4aba2d150380007ec256e75f84
|
73908cf0d8561f1dc4e1820e086aa510fd680b44
|
/pre_commit_directory/common_classes/markdown_rule.py
|
63948b13ec67eb8d236f7a400cd05b4b8831b068
|
[] |
no_license
|
https://github.com/bretmullinix/githooks
|
84e8da509468bbe39104ee4301b925c03c9dbe0f
|
b6c6f4500c4b22d0feb448a88951cd9bc708dd98
|
refs/heads/master
| 2022-11-18T01:33:31.655751
| 2020-07-19T20:54:06
| 2020-07-19T20:54:06
| 278,967,733
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
class MarkdownRule:
def __init__(self):
pass
def __init__(self, regex_match):
self.re_match = regex_match
self._parse_re_match()
def _parse_re_match(self):
self.rule = self.re_match.group(3)
self.rule_description = self.re_match.group(4)
self.file = self.re_match.group(1)
self.file_line_number = int(self.re_match.group(2))
|
UTF-8
|
Python
| false
| false
| 397
|
py
| 12
|
markdown_rule.py
| 11
| 0.586902
| 0.576826
| 0
| 13
| 29.384615
| 59
|
andypaw03/homework
| 9,715,216,072,552
|
0a13a82fb3bcc6f0d286a7361bdf1b87ab2389c3
|
b1db73e6c9df798e190ec23d170710d7f0b9f073
|
/zad new.py
|
8a0b232d8f775bf464666682187c64f946ad8b54
|
[] |
no_license
|
https://github.com/andypaw03/homework
|
3f9fe1825f4a9a4e3245994b02a0520fa88f8acd
|
d0553507b5842867aaac21aaeaa7638969e307c6
|
refs/heads/master
| 2021-05-17T11:07:05.534910
| 2020-03-28T17:29:59
| 2020-03-28T17:29:59
| 250,748,906
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
a=input("").split()
b=[""]
for i in range(len(a)):
minn=a[0]
for j in a:
if minn>j:
minn=list(j)
a.remove(minn)
b.append(minn)
print(b)
|
UTF-8
|
Python
| false
| false
| 172
|
py
| 53
|
zad new.py
| 52
| 0.488372
| 0.482558
| 0
| 10
| 16.2
| 24
|
Ispirett/kjbstudios
| 10,342,281,291,200
|
f8a8b1664a14e81d38d236f2587bde909838b79c
|
e83f5fe4cb0b8c8bb2a4484aebe83fcde81dddec
|
/photography/migrations/0008_auto_20180812_0517.py
|
b6b415b44418808e073d06561a668867a723c95d
|
[] |
no_license
|
https://github.com/Ispirett/kjbstudios
|
47b910ec666f0bfced834eab931390b499ef0581
|
e09a03da02eca9ecc16709862bcaf4f3b282f015
|
refs/heads/master
| 2018-11-14T00:02:00.211649
| 2018-08-30T22:24:41
| 2018-08-30T22:24:41
| 144,615,989
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 2.0.7 on 2018-08-12 05:17
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('photography', '0007_auto_20180812_0305'),
]
operations = [
migrations.AddField(
model_name='sitecontent',
name='SLIDE_1',
field=models.CharField(default=django.utils.timezone.now, max_length=1000),
preserve_default=False,
),
migrations.AddField(
model_name='sitecontent',
name='SLIDE_2',
field=models.CharField(default=django.utils.timezone.now, max_length=1000),
preserve_default=False,
),
migrations.AddField(
model_name='sitecontent',
name='SLIDE_3',
field=models.CharField(default=django.utils.timezone.now, max_length=1000),
preserve_default=False,
),
]
|
UTF-8
|
Python
| false
| false
| 956
|
py
| 21
|
0008_auto_20180812_0517.py
| 10
| 0.595188
| 0.547071
| 0
| 32
| 28.875
| 87
|
AdiGajbhiye/grid-world
| 1,408,749,284,589
|
b707611a6de8f0e7d6f9a0bef2fa612ceff8b0bd
|
e99a9c91fb6c1c6c9c2a2da21bd3c76eec605c27
|
/qlearning.py
|
cbc22210c65d5acbcba0fbee078235eaebdabe69
|
[] |
no_license
|
https://github.com/AdiGajbhiye/grid-world
|
5e258a89f38517aed7e188135eb37b5b3b65cf5b
|
dcc939cbbd9436956beb0c520beb598930f22e05
|
refs/heads/master
| 2021-01-20T03:04:24.937817
| 2017-08-29T09:30:34
| 2017-08-29T09:30:34
| 101,344,244
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
import subprocess
import time
import grid
Qvalues = [[[0 for i in range(4)] for i in range(4)] for i in range(4)]
discountFactor = 0.9
epsilon = 0.8 # how much exploration
livingReward = -0.4
def printQvalues():
for i in reversed(Qvalues):
for j in i:
for action in j:
print("%+06.2f" % action, end=" ")
print(" ", end="@@")
print()
print('-' * 50)
def printPolicy():
for i in reversed(range(0, 4)):
for j in range(0, 4):
action = Qvalues[i][j].index(max(Qvalues[i][j]))
if action == 0:
print(",", end=" ")
if action == 1:
print("^", end=" ")
if action == 2:
print("<", end=" ")
if action == 3:
print(">", end=" ")
print()
print('-' * 50)
def shouldExplore():
if random.random() < epsilon:
return True
return False
def eGreedy():
i, j = world.getState()
if shouldExplore():
action = random.randint(0, 3)
else:
action = Qvalues[i][j].index(max(Qvalues[i][j]))
world.changeState(action)
nexti, nextj = world.getState()
sample = world.getReward() + discountFactor * \
max(Qvalues[nexti][nextj])
Qvalues[i][j][action] = (1 - alpha) * \
Qvalues[i][j][action] + alpha * sample
def train():
while not world.isExit():
eGreedy()
# printQvalues()
# printPolicy()
# time.sleep(1)
# subprocess.call(["clear"])
def main():
global alpha, world
n = 1
world = grid.GridWorld(livingReward)
while True:
alpha = 1 / n # learning rate
train()
printQvalues()
printPolicy()
time.sleep(1)
subprocess.call(["clear"])
world.reset()
n += 1
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false
| false
| 1,895
|
py
| 4
|
qlearning.py
| 4
| 0.503958
| 0.486544
| 0
| 83
| 21.831325
| 71
|
anjabeck/AP-1516
| 7,310,034,371,918
|
ec92dae4029ca0f0e26266e978b1c280ec92eb8d
|
42595a8f1c8bb4509b6586843296c9f751ca71b7
|
/V601-Franck-Hertz/Rechnung.py
|
5596af124bc1999d8ddbeabdc96b731a51482e40
|
[] |
no_license
|
https://github.com/anjabeck/AP-1516
|
a6acccca6abb0fe0cfc7e65e3fd35830ef5831bc
|
d07f6108038bf2d84e9b96db7d932beeb59f4b88
|
refs/heads/master
| 2023-08-31T07:02:05.547817
| 2016-08-25T10:31:08
| 2016-08-25T10:31:08
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import sem
import scipy.constants as const
from uncertainties import ufloat
from table import(
make_table,
make_SI,
write)
Strom1, Spannung1 = np.genfromtxt('Messung1.txt', unpack=True)
Strom2, Spannung2 = np.genfromtxt('Messung2.txt', unpack=True)
Anregungsspannung = np.genfromtxt('Messung3.txt', unpack=True)
Temperatur = np.genfromtxt('Messung_Temperatur.txt', unpack=True)
#Temperatuten umrechnen und Stöße in der Röhre bestimmen
Temperatur = Temperatur + const.zero_Celsius #celsius in kelvin
Sattigungsdruck = 5.5*10**7*np.exp(-6876/Temperatur) # p in mbar und T in kelvin
Weglange = 0.0029 / Sattigungsdruck # Weglange in cm und Druck in mabr
#Stöße pro ein cm (Röhrenlänge) ausrechnen
Stosse = 1 / Weglange
write('build/tabelle_temperatur.tex', make_table([Temperatur, Sattigungsdruck, Weglange*10**4, Stosse], [2,2,2,2]))
#in Ampere und Volt umrechnen
Spannung1 = Spannung1*(10/228)
Strom1 = Strom1*(3.8e-6/143)
Spannung2 = Spannung2*(9.6/217)
Strom2 = Strom2*(1.1e-7/86)
Anregungsspannung = Anregungsspannung*(27/222)
matrix1 = np.ones((2, len(Spannung1)-1))
# matrix_x[0:i] ist der strom pro spannung, matrix_x[1:i] der zugehörige stonnungswert
for i in range(0, len(Spannung1)-1):
matrix1[0,i] = (Strom1[i+1] -Strom1[i])/(Spannung1[i]-Spannung1[i+1])
matrix1[1,i] = Spannung1[i]
x=np.linspace(0,1)
plt.plot(matrix1[1,:], matrix1[0,:]*10**9, 'ro')
plt.plot(x+8.15, x*100000, 'k--', label='Maximale Energie')
plt.xlabel('Spannung / V')
plt.ylabel('Steigung des Stromes / nA/V')
plt.xlim(-0.2, 9)
plt.ylim(0,1600)
plt.legend(loc='best')
plt.savefig('build/Energieverteilung_25.png')
plt.show()
write('build/tabelle_stromverlauf_25.tex', make_table([Spannung1, Strom1*10**9], [2,0]))
write('build/tabelle_energieverteilung_25.tex', make_table([matrix1[1,:], matrix1[0,:]*10**9], [2,2]))
matrix2 = np.ones((2, len(Spannung2)-1))
for i in range(0, len(Spannung2)-1):
matrix2[0,i] = (Strom2[i+1]-Strom2[i])/(Spannung2[i]-Spannung2[i+1])
matrix2[1,i] = Spannung2[i]
plt.plot(matrix2[1,:], matrix2[0,:]*10**9, 'ro')
plt.xlabel('Spannung / V')
plt.ylabel(r'Steigung des Stromes / nA/V')
plt.savefig('build/Energieverteilung_140.png')
plt.show()
write('build/tabelle_stromverlauf_140.tex', make_table([Spannung2, Strom2*10**9], [2,0]))
write('build/tabelle_energieverteilung_140.tex', make_table([matrix2[1,:], matrix2[0,:]*10**9], [2,2]))
# Berechnung der Austrittsenergie
Anregungsspannung_gesamt = ufloat(np.mean(Anregungsspannung), sem(Anregungsspannung))
Energie = const.e * Anregungsspannung_gesamt
Wellenlange = const.h*const.c / Energie
write('build/tabelle_anregungsspannung.tex', make_table([Anregungsspannung], [4]))
write('build/Anregungsspannung.tex', make_SI(Anregungsspannung_gesamt, r'\volt', figures=2))
write('build/Energie.tex', make_SI(Energie*10**19, r'\joule','e-19',figures=1))
write('build/Wellenlange.tex', make_SI(Wellenlange*10**9, r'\nano\meter', figures=1))
print(Anregungsspannung_gesamt, Energie, Wellenlange)
|
UTF-8
|
Python
| false
| false
| 3,068
|
py
| 508
|
Rechnung.py
| 47
| 0.720915
| 0.657516
| 0
| 94
| 31.489362
| 115
|
ajtanskanen/benefits
| 3,710,851,773,996
|
f1fa70d47cd3741f12fb3953fc1b241e991ea349
|
baa2041f13a21d43e9c89751057eb7517c67a5aa
|
/fin_benefits/benefits_unemp_EK2020.py
|
8484c6d65969f6876ff733bf34a212956ebd98b5
|
[
"MIT"
] |
permissive
|
https://github.com/ajtanskanen/benefits
|
a9cd9d2b2214f8c50541dbe19f40fede5ec8662e
|
82bed55716d7b4f27fa51063519646c5044b33bc
|
refs/heads/master
| 2023-07-19T01:20:48.325805
| 2023-07-18T13:57:47
| 2023-07-18T13:57:47
| 208,250,070
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import math
import gym
from gym import spaces, logger, utils, error
from gym.utils import seeding
import numpy as np
from .benefits import Benefits
import random
class BenefitsEK2020(Benefits):
"""
Description:
Changes to unemployment benefits in the EK model
Source:
AT
"""
def __init__(self,**kwargs):
super().__init__(**kwargs)
#self.muuta_ansiopv_ylaraja=True
#self.ansiopvraha_kesto400=350/(12*21.5) # lyhennetty 50 pv
#self.ansiopvraha_kesto300=250/(12*21.5)
#self.toe_vaatimus=1.0 # työssäoloehto väh 12kk
#self.porrastus=True
#self.muuta_ansiopv_ylaraja=True
#self.muuta_pvhoito=True
def ansiopaivaraha(self,tyoton,vakiintunutpalkka,lapsia,tyotaikaisettulot,saa_ansiopaivarahaa,kesto,p,ansiokerroin=1.0,kesto_400=True):
ansiopvrahan_suojaosa=p['ansiopvrahan_suojaosa']
lapsikorotus=p['ansiopvraha_lapsikorotus']
if tyoton>0:
if lapsikorotus<1:
lapsia=0
if self.vuosi==2018:
lapsikorotus=np.array([0,5.23,7.68,9.90])*21.5
sotumaksu=0.0448 # 2015 0.0428 2016 0.0460
taite=3078.60
elif self.vuosi==2019:
lapsikorotus=np.array([0,5.23,7.68,9.90])*21.5
sotumaksu=0.0448 # 2015 0.0428 2016 0.0460
taite=3078.60
elif self.vuosi==2020:
lapsikorotus=np.array([0,5.23,7.68,9.90])*21.5
sotumaksu=0.0448 # 2015 0.0428 2016 0.0460
taite=3078.60
else:
lapsikorotus=np.array([0,5.23,7.68,9.90])*21.5
sotumaksu=0.0448 # 2015 0.0428 2016 0.0460
taite=3078.60
if (saa_ansiopaivarahaa>0): # & (kesto<400.0): # ei keston tarkastusta!
perus=self.peruspaivaraha(0) # peruspäiväraha lasketaan tässä kohdassa ilman lapsikorotusta
vakpalkka=vakiintunutpalkka*(1-sotumaksu)
if vakpalkka>taite:
tuki2=0.2*max(0,vakpalkka-taite)+0.45*max(0,taite-perus)+perus
else:
tuki2=0.45*max(0,vakpalkka-perus)+perus
tuki2=tuki2+lapsikorotus[min(lapsia,3)]
tuki2=tuki2*ansiokerroin # mahdollinen porrastus tehdään tämän avulla
suojaosa=self.tyottomyysturva_suojaosa(ansiopvrahan_suojaosa)
perus=self.peruspaivaraha(lapsia) # peruspäiväraha lasketaan tässä kohdassa lapsikorotukset mukana
if tuki2>.9*vakpalkka:
tuki2=max(.9*vakpalkka,perus)
#if tuki2>.9*vakpalkka:
# tuki2=max(.9*vakpalkka,perus)
vahentavattulo=max(0,tyotaikaisettulot-suojaosa)
ansiopaivarahamaara=max(0,tuki2-0.5*vahentavattulo)
ansiopaivarahamaara=self.ansiopaivaraha_ylaraja(ansiopaivarahamaara,tyotaikaisettulot,vakpalkka,vakiintunutpalkka,peruspvraha)
#if vakpalkka<ansiopaivarahamaara+tyotaikaisettulot:
# ansiopaivarahamaara=max(0,vakpalkka-tyotaikaisettulot)
tuki=ansiopaivarahamaara
perus=self.soviteltu_peruspaivaraha(lapsia,tyotaikaisettulot,ansiopvrahan_suojaosa)
tuki=max(perus,tuki) # voi tulla vastaan pienillä tasoilla
else:
ansiopaivarahamaara=0
perus=self.soviteltu_peruspaivaraha(lapsia,tyotaikaisettulot,ansiopvrahan_suojaosa)
tuki=perus
else:
perus=0
tuki=0
ansiopaivarahamaara=0
return tuki,ansiopaivarahamaara,perus
# yläraja 80% ansionalenemasta
def ansiopaivaraha_ylaraja(self,ansiopaivarahamaara,tyotaikaisettulot,vakpalkka,vakiintunutpalkka,peruspvraha):
if ansiopaivarahamaara>peruspvraha:
return peruspvraha+min(ansiopaivarahamaara-peruspvraha,0.8*max(0,vakiintunutpalkka-peruspvraha))
else:
return peruspvraha
#else:
#return super().ansiopaivaraha_ylaraja(ansiopaivarahamaara,tyotaikaisettulot,vakpalkka,vakiintunutpalkka)
def toimeentulotuki(self,omabruttopalkka,omapalkkavero,puolison_bruttopalkka,puolison_palkkavero,muuttulot,verot,asumismenot,muutmenot,p,omavastuuprosentti=0.07):
return super().toimeentulotuki(omabruttopalkka,omapalkkavero,puolison_bruttopalkka,puolison_palkkavero,muuttulot,verot,asumismenot,muutmenot,p,omavastuuprosentti=omavastuuprosentti)
|
UTF-8
|
Python
| false
| false
| 4,781
|
py
| 38
|
benefits_unemp_EK2020.py
| 20
| 0.608312
| 0.551427
| 0
| 103
| 45.252427
| 189
|
mF2C/LifecycleManagement
| 4,956,392,280,686
|
6b8f9afdd77ff7b61fe2b094143c4aa7bc79b91e
|
0d893c8e3a0e5db6da272fb909080d44983781c2
|
/lifecycle/common.py
|
b47229e22103b50df024e6eaac7be913f2d7cc60
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/mF2C/LifecycleManagement
|
5be09ef5f1c98319a9361a93e129e19fa1b5d007
|
a9661c6dcaad94564c3e00d6059b9c0713b3f3c0
|
refs/heads/master
| 2021-06-03T20:54:22.429726
| 2020-01-17T14:02:32
| 2020-01-17T14:02:32
| 107,657,386
| 0
| 0
|
Apache-2.0
| false
| 2021-05-06T19:05:58
| 2017-10-20T09:13:42
| 2020-01-17T14:02:51
| 2021-05-06T19:05:57
| 4,827
| 0
| 0
| 3
|
Python
| false
| false
|
"""
Common functions
This is being developed for the MF2C Project: http://www.mf2c-project.eu/
Copyright: Roi Sucasas Font, Atos Research and Innovation, 2017.
This code is licensed under an Apache 2.0 license. Please, refer to the LICENSE.TXT file for more information
Created on 09 feb. 2018
@author: Roi Sucasas - ATOS
"""
import os
import config
from flask import Response, json
from lifecycle.logs import LOG
###############################################################################
# GLOBAL VARS:
# LOGS
TRACE = 5
# Service Type
SERVICE_DOCKER = "docker"
SERVICE_DOCKER_COMPOSE = "docker-compose" #"docker-compose"
SERVICE_COMPSS = "compss"
SERVICE_DOCKER_SWARM = "docker-swarm" #"swarm" #""docker-swarm"
SERVICE_KUBERNETES = "K8s"
SERVICE_DOCKER_COMPOSE_SWARM = "docker-compose-swarm" #"docker-compose-swarm
# Operations:
OPERATION_START = "start"
OPERATION_STOP = "stop"
OPERATION_RESTART = "restart"
OPERATION_TERMINATE = "terminate"
OPERATION_START_JOB = "start-job"
OPERATION_STOP_TERMINATE = "stop-and-terminate"
# service instance / agent status
STATUS_ERROR = "error"
STATUS_ERROR_STARTING = "error-starting"
STATUS_ERROR_STOPPING = "error-stopping"
STATUS_UNKNOWN = "??"
STATUS_NOT_DEPLOYED = "not-deployed"
STATUS_WAITING = "waiting"
STATUS_STARTED = "started"
STATUS_STOPPED = "stopped"
STATUS_TERMINATED = "terminated"
STATUS_CREATED_NOT_INITIALIZED = "created-not-initialized"
STATUS_DEPLOYING = "deploying"
STATUS_STARTING = "starting"
STATUS_STOPPING = "stopping"
STATUS_TERMINATING = "terminating"
###############################################################################
# STAND_ALONE_MODE:
# is_standalone_mode
def is_standalone_mode():
if config.dic['LM_MODE'] is not None and config.dic['LM_MODE'] == 'STANDALONE':
return True
return False
###############################################################################
# RESPONSEs:
# CLASS ResponseCIMI
class ResponseCIMI():
msj = ""
# Generate response 200
def gen_response_ok(message, key, value, key2=None, value2=None):
dict = {'error': False, 'message': message}
dict[key] = value
if not (key2 is None) and not (value2 is None):
dict[key2] = value2
LOG.log(TRACE, "[lifecycle.common.common] [gen_response_ok] Generate response OK; dict=" + str(dict))
return dict
# Generate response ERROR
def gen_response(status, message, key, value, key2=None, value2=None):
dict = {'error': True, 'message': message}
dict[key] = value
if not (key2 is None) and not (value2 is None):
dict[key2] = value2
LOG.log(TRACE, '[lifecycle.common.common] [gen_response] Generate response ' + str(status) + "; dict=" + str(dict))
return Response(json.dumps(dict), status=status, content_type='application/json')
# Generate response 200
def gen_response_ko(message, key, value, key2=None, value2=None):
dict = {'error': True, 'message': message}
dict[key] = value
if not (key2 is None) and not (value2 is None):
dict[key2] = value2
LOG.log(TRACE, "[lifecycle.common.common] [gen_response] Generate response KO; dict=" + str(dict))
return dict
###############################################################################
# IPs:
# check_ip: Check if IP is alive
def check_ip(ip_adress):
try:
# '-c 1' ==> linux
# '-n 1' ==> windows
response = os.system("ping -c 1 " + ip_adress)
if response == 0:
return True
except:
LOG.error('[lifecycle.common.common] [check_ip] Exception')
return True
###############################################################################
# ENV:
# set_value_env: set value (in config dict) from environment
def set_value_env(env_name):
res = os.getenv(env_name, default='not-defined')
#LOG.debug('LIFECYCLE: [' + env_name + '=' + res + ']')
if res != 'not-defined':
config.dic[env_name] = res
|
UTF-8
|
Python
| false
| false
| 3,909
|
py
| 49
|
common.py
| 40
| 0.61141
| 0.600153
| 0
| 130
| 29.069231
| 119
|
VickyChing/ICASSP19
| 14,594,298,876,367
|
7e221da539932c9dac63c1701e0c8d180f2691c1
|
310077d1ef72e29e8659702f539eb7e060f6038d
|
/plot.py
|
02723c8270b009049e27675811537be9a2ff6358
|
[
"MIT"
] |
permissive
|
https://github.com/VickyChing/ICASSP19
|
8476f63b3fd1c244211d34769539026f12553a59
|
247b0b9d738679f5472dd74f52bd1933b871e87f
|
refs/heads/master
| 2020-09-22T06:59:28.514413
| 2019-09-04T07:45:26
| 2019-09-04T07:45:26
| 225,097,441
| 0
| 1
|
MIT
| true
| 2019-12-01T02:42:01
| 2019-12-01T02:42:01
| 2019-10-02T03:36:57
| 2019-09-04T07:45:31
| 34
| 0
| 0
| 0
| null | false
| false
|
import torch
import argparse
import numpy as np
import mir_eval
from madmom.io import midi
from adsr import ADSRNoteTrackingProcessor
from matplotlib.collections import PatchCollection
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
np.set_printoptions(precision=4)
def get_onsets_and_pitch_labels(midifile):
pattern = midi.MIDIFile(midifile)
intervals = []
labels = []
for onset, _pitch, duration, velocity, _channel in pattern.sustained_notes:
label = int(_pitch) # do not subtract 21; mir_eval needs pitches strictly >= 0 anyways
intervals.append([onset, onset + duration])
labels.append(label)
return np.array(intervals), np.array(labels)
def evaluate(est_intervals, est_pitches, ref_intervals, ref_pitches):
if len(est_intervals) > 0:
# evaluate onsets and pitches
p, r, f, o = mir_eval.transcription.precision_recall_f1_overlap(
ref_intervals,
ref_pitches,
est_intervals,
est_pitches,
pitch_tolerance=0, # no numerical tolerance for midi note numbers
onset_tolerance=0.05, # +- 50 ms
offset_ratio=None, # do not evaluate offsets
strict=False
)
print('onsets p {:>4.2f} r {:>4.2f} f {:>4.2f} o {:>4.2f}'.format(p, r, f, o))
# evaluate notes and pitches
p, r, f, o = mir_eval.transcription.precision_recall_f1_overlap(
ref_intervals,
ref_pitches,
est_intervals,
est_pitches,
pitch_tolerance=0, # no numerical tolerance for midi note numbers
onset_tolerance=0.05, # +- 50 ms
offset_ratio=0.2, # evaluate complete notes
strict=False
)
print('notes p {:>4.2f} r {:>4.2f} f {:>4.2f} o {:>4.2f}'.format(p, r, f, o))
else:
print('complete failure')
def line_segments(intervals, pitches, pitch_offset, yoffset=0.):
segments = []
for (start, end), _pitch in zip(intervals, pitches):
pitch = _pitch + pitch_offset + yoffset
segments.append(((start, pitch), (end, pitch)))
return segments
def get_rectangles(intervals,
pitches,
pitch_height,
pitch_offset,
pitch_multiplier,
yoffset=0.,
linewidth=1,
color='k',
fill=False,
alpha=1.):
rectangles = []
for (start, end), _pitch in zip(intervals, pitches):
pitch = ((_pitch + pitch_offset) * pitch_multiplier) + yoffset
xy = (start, pitch)
width = end - start
height = pitch_height
facecolor = color if fill else None
rectangles.append(mpatches.Rectangle(
xy,
width,
height,
edgecolor=color,
facecolor=facecolor,
fill=fill,
linewidth=linewidth,
alpha=alpha
))
return rectangles
def onsets(intervals, pitches, pitch_offset, yoffset=0.):
points = []
for (start, _), _pitch in zip(intervals, pitches):
pitch = _pitch + pitch_offset + yoffset
points.append([start, pitch])
return np.array(points)
def offsets(intervals, pitches, pitch_offset, yoffset=0.):
points = []
for (_, end), _pitch in zip(intervals, pitches):
pitch = _pitch + pitch_offset + yoffset
points.append([end, pitch])
return np.array(points)
def run_config(activation_filename):
results = dict()
onset_note_prob = 0.9
offset_prob = 0.1
threshold = 0.5
activations_bundle = torch.load(activation_filename)
activations = activations_bundle['activations']
midifilename = activations_bundle['metadata']['midi_filename']
ref_intervals, ref_pitches = get_onsets_and_pitch_labels(midifilename)
fps = 50
adsr = ADSRNoteTrackingProcessor(
onset_prob=onset_note_prob,
note_prob=onset_note_prob,
offset_prob=offset_prob,
attack_length=0.04,
decay_length=0.04,
release_length=0.02,
complete=True,
onset_threshold=threshold,
note_threshold=threshold,
fps=fps,
pitch_offset=21
)
notes, paths = adsr.process(activations, clip=1e-2)
est_intervals = []
est_pitches = []
for onset, pitch, duration in notes:
est_intervals.append([onset, onset + duration])
est_pitches.append(pitch)
est_intervals = np.array(est_intervals)
est_pitches = np.array(est_pitches)
# convert timing in both ref_intervals and est_intervals into framecounts
est_intervals = est_intervals * fps
ref_intervals = ref_intervals * fps
# convert intervals and pitches into line segments; subtract 21 from pitch values
ref_segments = line_segments(ref_intervals, ref_pitches, -21)
est_segments = line_segments(est_intervals, est_pitches, -21, 0.25)
ref_onsets = onsets(ref_intervals, ref_pitches, -21)
est_onsets = onsets(est_intervals, est_pitches, -21, 0.25)
ref_offsets = offsets(ref_intervals, ref_pitches, -21)
est_offsets = offsets(est_intervals, est_pitches, -21, 0.25)
ref_color = 'green'
est_color = 'gray'
#############################################################################
# fig, axes = plt.subplots(nrows=3, sharex=True, sharey=True)
# ax = axes[0]
# ax.set_title('onsets')
# ax.imshow(activations[:, :, 1].T, origin='lower', cmap='gray_r', vmin=0, vmax=1)
# ax = axes[1]
# ax.set_title('frames')
# ax.imshow(activations[:, :, 0].T, origin='lower', cmap='gray_r', vmin=0, vmax=1)
# ax = axes[2]
# ax.set_title('offsets')
# ax.imshow(activations[:, :, 2].T, origin='lower', cmap='gray_r', vmin=0, vmax=1)
# ax = axes[0]
# ax.add_collection(LineCollection(ref_segments, colors=[ref_color]))
# ax.add_collection(LineCollection(est_segments, colors=[est_color]))
# ax = axes[1]
# ax.scatter(ref_onsets[:, 0], ref_onsets[:, 1], c=[ref_color])
# ax.scatter(est_onsets[:, 0], est_onsets[:, 1], c=[est_color])
# ax = axes[2]
# ax.scatter(ref_offsets[:, 0], ref_offsets[:, 1], c=[ref_color])
# ax.scatter(est_offsets[:, 0], est_offsets[:, 1], c=[est_color])
#############################################################################
ref_rects = get_rectangles(
ref_intervals,
ref_pitches,
pitch_height=3,
pitch_offset=-21,
pitch_multiplier=3,
yoffset=-0.5,
linewidth=2,
color=ref_color,
fill=False,
alpha=1.
)
est_rects = get_rectangles(
est_intervals,
est_pitches,
pitch_height=2.4,
pitch_offset=-21,
pitch_multiplier=3,
yoffset=-0.2,
linewidth=1.5,
color=est_color,
fill=True,
alpha=0.5
)
cmap = LinearSegmentedColormap.from_list('rwb', ['orange', 'white', 'black'])
merged = np.zeros((len(activations), 88 * 3))
merged[:, 0::3] = activations[:, :, 0] # frames
merged[:, 1::3] = activations[:, :, 1] # onsets
merged[:, 2::3] = activations[:, :, 2] # offsets
merged[merged <= 0.5] *= -1
fig, ax = plt.subplots()
ax.imshow(merged.T, origin='lower', cmap=cmap, vmin=-1, vmax=1)
ax.add_collection(PatchCollection(ref_rects, match_original=True))
ax.add_collection(PatchCollection(est_rects, match_original=True))
plt.show()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('activation_filename', type=str)
args = parser.parse_args()
run_config(args.activation_filename)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false
| false
| 7,805
|
py
| 20
|
plot.py
| 15
| 0.584369
| 0.565535
| 0
| 240
| 31.520833
| 95
|
kbsezginel/tee_mof
| 17,343,077,979,745
|
a10acaf6eb9c28df9798e78c79d1548bfccf416f
|
ce8b70e1b61b64a0f9f80f1974e9ffe34b5eb242
|
/thermof/read.py
|
2f40ca3cf3887c86a74b42029fb76be4798a642f
|
[
"MIT"
] |
permissive
|
https://github.com/kbsezginel/tee_mof
|
6a2da327d0d5178c962106c270bbed2f3d0df558
|
44c33e37aeadce31241a878135b1531757a2b1c4
|
refs/heads/master
| 2021-01-11T16:41:44.227544
| 2018-06-09T19:17:27
| 2018-06-09T19:17:27
| 80,137,987
| 0
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# Date: August 2017
# Author: Kutay B. Sezginel
"""
Read Lammps output files for thermal conductivity calculations
"""
import os
import math
import yaml
import csv
import numpy as np
from thermof.reldist import reldist
from thermof.parameters import k_parameters, thermo_headers
def read_thermal_flux(file_path, dt=k_parameters['dt'], start=200014, j_index=3):
"""Read thermal flux autocorellation vs time data from Lammps simulation output file
Args:
- file_path (str): Thermal flux autocorellation file generated by Lammps
- dt (int): Sampling interval (fs). Can be calculated by multiplying timestep with sampling interval ($s) used for autocorrelation
- start (int): Index of the line to start reading flux autocorrelation (corresponds to last function)
- j_index (int): Index of thermal flux in file
Returns:
- list: thermal flux autocorrelation function
- list: time
"""
with open(file_path, 'r') as f:
flux_lines = f.readlines()
flux, time = [], []
for line in flux_lines[start:]:
ls = line.split()
t = (float(ls[0]) - 1) * dt / 1000.0
flux.append(float(ls[j_index]))
time.append(t)
return flux, time
def calculate_k(flux, k_par=k_parameters):
"""Calculate thermal conductivity (W/mK) from thermal flux autocorrelation function
Args:
- flux (list): Thermal flux autocorellation read by read_thermal_flux method
- k_par (dict): Dictionary of calculation parameters
Returns:
- list: Thermal conductivity autocorrelation function
"""
k = flux[0] / 2 * k_par['volume'] * k_par['dt'] / (k_par['kb'] * math.pow(k_par['temp'], 2)) * k_par['conv']
k_data = [k]
for J in flux[1:]:
k = k + J * k_par['volume'] * k_par['dt'] / (k_par['kb'] * math.pow(k_par['temp'], 2)) * k_par['conv']
k_data.append(k)
return k_data
def estimate_k(k_data, time, t0=5, t1=10):
""" Get approximate thermal conductivity value for a single simulation.
The arithmetic average of k values are taken between given timesteps.
Args:
- k_data (list): Thermal conductivity autocorrelation function
- time (list): Simulation timestep
- t0: Timestep to start taking average of k values
- t1: Timestep to end taking average of k values
Returns:
- float: Estimate thermal conductivity
"""
start, end = time.index(t0), time.index(t1)
return (sum(k_data[start:end]) / len(k_data[start:end]))
def average_k(k_runs):
"""Calculate average thermal conductivity for multiple runs
Args:
- k_runs (list): 2D list of thermal conductivity autocorrelation function for multiple runs
Returns:
- list: Arithmetic average of thermal conductivity per timestep for multiple runs
"""
n_frames = len(k_runs[0])
for run_index, k in enumerate(k_runs):
run_frames = len(k)
if run_frames != n_frames:
raise TimestepsMismatchError('Number of timesteps for inital run not equal to run %i (%i != %i)'
% (run_index, n_frames, run_frames))
avg_k_data = []
for timestep in range(n_frames):
avg_k_data.append(sum([k[timestep] for k in k_runs]) / len(k_runs))
return avg_k_data
def get_flux_directions(run_dir, k_par=k_parameters, verbose=True):
"""Return thermal flux data file and direction name for each direction as lists.
Each file with the given prefix is selected as thermal flux file and direction is read as the
character between prefix and file extension.
Example: J0Jt_tx.dat -> prefix should be 'J0Jt_t' and direction would be read as 'x'.
Args:
- run_dir (str): Lammps simulation directory with thermal flux files
Returns:
- list: List of thermal flux files found with given prefix
- list: List of thermal flux directions
"""
flux_files, directions = [], []
for f in os.listdir(run_dir):
if k_par['prefix'] in f:
flux_files.append(os.path.join(run_dir, f))
directions.append(f.split('.')[0].split('J0Jt_t')[1])
if len(directions) == 0:
raise FluxFileNotFoundError('No flux file found with prefix: %s' % k_par['prefix'])
else:
print('%i directions found.' % (len(directions))) if verbose else None
return flux_files, directions
def read_run(run_dir, k_par=k_parameters, t0=5, t1=10, verbose=True):
"""Read single Lammps simulation run
Args:
- run_dir (str): Lammps simulation directory for single run
- k_par (dict): Dictionary of calculation parameters
- verbose (bool): Print information about the run
Returns:
- dict: Run data containing thermal conductivity, estimate, timesteps, run name
"""
run_data = dict(name=os.path.basename(run_dir), k={}, k_est={}, time=[], directions=[], hcacf={})
trial_data = []
runs_id = []
if os.path.isdir(run_dir):
if k_par['read_thermo']:
print('Reading log file -> %s' % k_par['log_file']) if verbose else None
headers = get_thermo_headers(k_par['thermo_style'])
thermo_data = read_log(os.path.join(run_dir, '%s' % k_par['log_file']), headers=headers)
fix = k_par['fix']
run_data['thermo'] = read_thermo(thermo_data, headers=k_par['thermo_style'], fix=fix)
if 'vol' in k_par['thermo_style']:
if fix is None:
fix = list(range(len(thermo_data)))
k_par['fix'] = fix
k_par['initial_volume'] = k_par['volume']
k_par['volume'] = run_data['thermo'][fix[-1]]['vol'][-1]
k_par['deltaV'] = (k_par['volume'] - k_par['initial_volume']) / k_par['initial_volume'] * 100
print('Volume read as: %.3f | Delta V: %.2f %%' % (k_par['volume'], k_par['deltaV'])) if verbose else None
flux_files, directions = get_flux_directions(run_dir, k_par=k_par, verbose=verbose)
run_message = '%-9s ->' % run_data['name']
for direction, flux_file in zip(directions, flux_files):
flux, time = read_thermal_flux(flux_file, dt=k_par['dt'])
run_data['hcacf'][direction] = flux
k = calculate_k(flux, k_par=k_par)
run_data['k'][direction] = k
run_data['k_est'][direction] = estimate_k(k, time, t0=k_par['t0'], t1=k_par['t1'])
run_message += ' k: %.3f W/mK (%s) |' % (run_data['k_est'][direction], direction)
if k_par['read_walltime']:
run_data['walltime'] = read_walltime(os.path.join(run_dir, '%s' % k_par['log_file']))
if k_par['read_thexp']:
run_data['thexp'] = read_thermal_expansion(os.path.join(run_dir, '%s' % k_par['thexp_file']))
print('Thermal expansion read') if verbose else None
run_data['time'] = time
run_data['directions'] = directions
print(run_message) if verbose else None
else:
raise RunDirectoryNotFoundError('Run directory not found: %s' % run_dir)
if k_par['isotropic']:
run_data['k']['iso'] = average_k([run_data['k'][d] for d in directions])
run_data['hcacf']['iso'] = average_k([run_data['hcacf'][d] for d in directions])
run_data['k_est']['iso'] = estimate_k(run_data['k']['iso'], run_data['time'], t0=k_par['t0'], t1=k_par['t1'])
print('Isotropic -> k: %.3f W/mK from %i directions' % (run_data['k_est']['iso'], len(directions))) if verbose else None
return run_data
def read_trial(trial_dir, k_par=k_parameters, verbose=True):
"""Read Lammps simulation trial with any number of runs
Args:
- trial_dir (str): Lammps simulation directory including directories for multiple runs
- k_par (dict): Dictionary of calculation parameters
- verbose (bool): Print information about the run
Returns:
- dict: Trial data containing thermal conductivity, estimate, timesteps, run name for each run
"""
trial = dict(runs=[], data={}, name=os.path.basename(trial_dir))
print('\n------ %s ------' % trial['name']) if verbose else None
run_list = [os.path.join(trial_dir, run) for run in os.listdir(trial_dir)
if os.path.isdir(os.path.join(trial_dir, run))]
for run in run_list:
run_data = read_run(run, k_par=k_par, verbose=verbose)
trial['data'][run_data['name']] = run_data
trial['runs'].append(run_data['name'])
if k_par['average']:
trial['avg'] = average_trial(trial, isotropic=k_par['isotropic'])
return trial
def average_trial(trial, isotropic=False):
"""Take average of thermal conductivities for multiple runs.
Assumes all runs have the same number of directions.
Args:
- isotropic (bool): Isotropy of thermal flux, if True aveage is taken for each direction
Returns:
- dict: Trial data average for thermal conductivity and estimate
"""
trial_avg = dict(k={}, k_est={'stats': {}})
for direction in trial['data'][trial['runs'][0]]['directions']:
# Take average of k for each direction
trial_avg['k'][direction] = average_k([trial['data'][run]['k'][direction] for run in trial['runs']])
k_est_runs = [trial['data'][run]['k_est'][direction] for run in trial['runs']]
trial_avg['k_est'][direction] = sum(k_est_runs) / len(trial['runs'])
trial_avg['k_est']['stats'][direction] = dict(std=np.std(k_est_runs),
max=max(k_est_runs),
min=min(k_est_runs))
if isotropic:
# Take average of isotropic k and k_estimate
trial_avg['k']['iso'] = average_k([trial['data'][run]['k']['iso'] for run in trial['runs']])
k_est_iso_runs = [trial['data'][run]['k_est']['iso'] for run in trial['runs']]
trial_avg['k_est']['iso'] = sum(k_est_iso_runs) / len(trial['runs'])
trial_avg['k_est']['stats']['iso'] = dict(std=np.std(k_est_iso_runs),
max=max(k_est_iso_runs),
min=min(k_est_iso_runs))
return trial_avg
def read_trial_set(trial_set_dir, k_par=k_parameters, verbose=True):
"""Read multiple trials with multiple runs
Args:
- trial_set_dir (str): Lammps simulation directory including directories for multiple trials
- k_par (dict): Dictionary of calculation parameters
- verbose (bool): Print information about the run
Returns:
- dict: Trial set data containing thermal conductivity, estimate, timesteps, trial name for each trial
"""
trial_set = dict(trials=[], data={}, name=os.path.basename(trial_set_dir))
trial_list = [os.path.join(trial_set_dir, t) for t in os.listdir(trial_set_dir)
if os.path.isdir(os.path.join(trial_set_dir, t))]
for trial_dir in trial_list:
trial = read_trial(trial_dir, k_par=k_par, verbose=verbose)
trial_set['trials'].append(os.path.basename(trial_dir))
trial_set['data'][trial['name']] = trial
return trial_set
def read_log(log_file, headers='Step Temp E_pair E_mol TotEng Press'):
"""Read log.lammps file and return lines for multiple thermo data
Args:
- log_file (str): Lammps simulation log file path
- headers (str): The headers for thermo data ('Step Temp E_pair E_mol TotEng Press')
Returns:
- list: 2D list of thermo lines for all fixes
"""
with open(log_file, 'r') as log:
log_lines = log.readlines()
thermo_start = []
thermo_end = []
for line_index, line in enumerate(log_lines):
if headers in line:
start = line_index + 1
thermo_start.append(start)
if 'Loop time' in line:
end = line_index
thermo_end.append(end)
thermo_data = []
for s, e in zip(thermo_start, thermo_end):
thermo_data.append(log_lines[s:e])
return thermo_data
def read_thermo(thermo_data, headers=['step', 'temp', 'epair', 'emol', 'etotal', 'press'], fix=None):
"""Read thermo data from given thermo log lines
Args:
- thermo_data (list): 2D list of thermo lines for all fixes
- headers (list): The headers for thermo data
- fix (list): Name of the separate fixes in thermo
Returns:
- dict: Thermo data for all fixes separated as: thermo['fix1']['header1'] = ...
"""
thermo = {}
if fix is None:
fix = list(range(len(thermo_data)))
if len(fix) != len(thermo_data):
raise ThermoFixDataMatchError('Fixes: %s do not match fixes read in log file' % ' | '.join(fix))
else:
for t, thermo_fix in enumerate(thermo_data):
ther = {key: [] for key in headers}
for data in thermo_fix:
line = data.strip().split()
for i, h in enumerate(headers):
ther[h].append(float(line[i]))
thermo[fix[t]] = ther
return thermo
def read_walltime(log_file):
"""Read log.lammps file and return lines for multiple thermo data
Args:
- log_file (str): Lammps simulation log file path
Returns:
- list: Wall time in hours, minutes, and seconds -> [h, m, s]
"""
with open(log_file, 'r') as log:
log_lines = log.readlines()
if 'Total wall time' in log_lines[-1]:
walltime = log_lines[-1].split()[-1]
h, m, s = walltime.split(':')
else:
err_msg = 'Walltime not found! Simulation might not be finished, please check log file -> %s' % log_file
err_msg += '\nLast line of log file -> %s' % log_lines[-1]
raise WallTimeNotFoundError(err_msg)
return [int(h), int(m), int(s)]
def read_thermal_expansion(thexp_file):
"""
Read thermal expansion csv file.
Args:
- thexp_file (str): Thermal expansion csv file
Returns:
- dict: Thermal expansion data for Lammps run
"""
thexp = dict(step=[], volume=[], enthalpy=[])
with open(thexp_file, newline='') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',', quotechar='|')
next(csv_reader, None) # Skip the headers
for row in csv_reader:
thexp['step'].append(float(row[0]))
thexp['volume'].append(float(row[1]))
thexp['enthalpy'].append(float(row[2]))
return thexp
def read_framework_distance(run_list, fdist_par):
"""Read trajectory for multiple runs and calculate framework distance
Args:
- run_list (list): List of absolute path of run directories
- k_par (dict): Dictionary of calculation parameters
Returns:
- list: List of dictionaries containing framework distance data
"""
start, end = fdist_par['traj_start'], fdist_par['traj_end']
dist_data = []
for run in run_list:
traj_path = os.path.join(run, fdist_par['traj'])
x_coords, y_coords, z_coords = reldist(traj_path, end=end)
x_coords.append(0)
x_coords.append(1)
y_coords.append(0)
y_coords.append(1)
title = '%s/%s' % (os.path.split(os.path.split(run)[0])[1], os.path.split(run)[1])
dist_data.append(dict(x=x_coords[start:], y=y_coords[start:], z=z_coords[start:], title=title))
return dist_data
def get_thermo_headers(thermo_style, thermo_headers=thermo_headers):
"""
Lammps thermo headers for log file.
"""
return ' '.join([thermo_headers[i] for i in thermo_style])
class FluxFileNotFoundError(Exception):
pass
class TimestepsMismatchError(Exception):
pass
class RunDirectoryNotFoundError(Exception):
pass
class ThermoFixDataMatchError(Exception):
pass
class WallTimeNotFoundError(Exception):
pass
|
UTF-8
|
Python
| false
| false
| 15,839
|
py
| 82
|
read.py
| 50
| 0.604647
| 0.599407
| 0
| 399
| 38.696742
| 138
|
simonb21/cmsc128-ay2015-16-assign001-py
| 5,016,521,852,259
|
e6b09981c96e24467d0833a0b23eb61008667dbb
|
06904d39db5dbd91e5a976be01856442d3d17c92
|
/numberDelimited.py
|
d03f1e2b7c39ff7c6200104003d8aa5bf1b3ef59
|
[] |
no_license
|
https://github.com/simonb21/cmsc128-ay2015-16-assign001-py
|
6941ac1834dc0f2c674125cf799e5724c64719da
|
4019ed5d27b8445c6c7fbbc14ca25fea01962f30
|
refs/heads/master
| 2016-08-09T06:54:50.924132
| 2016-02-13T14:51:03
| 2016-02-13T14:51:03
| 51,634,321
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#Language Used: Python 3.5
#Constants#
MIN = 0
MAX = 1000000
good = False #Flag for valid input
while not good:
try:
x = int(float(input("Input: ")))
if x>=MIN and x<=MAX: #Limits the input so it is between 0 and 1000000
good = True
else:
print("Invalid value size!")
except ValueError:
print("Invalid value!")
good = False
while not good:
delimiter = input("Delimiter: ")
if len(delimiter)==1:#Limits to a single character delimiter
good = True
else:
print("Invalid Delimiter")
good = False
while not good:
try:
jump = int(float(input("Distance:")))
if x>MIN:
good = True
else:
print("Invalid Value!")
except ValueError:#Checks if the input is valid
print("Invalid value!")
x = str(x)
out = ""
i = len(x)-1
j = 0
while i>=0:
if i!=len(x)-1 and j==jump:#Delimits the input if the counter is equal to the given distance
out = delimiter+out
j = 0
out = x[i]+out
i = i-1
j = j+1
print("Output: %s" % out)
|
UTF-8
|
Python
| false
| false
| 1,096
|
py
| 4
|
numberDelimited.py
| 4
| 0.568431
| 0.544708
| 0
| 49
| 21.367347
| 96
|
Marketuser/LearnPython-lesson1
| 3,358,664,460,384
|
8bdfa95c0dd35d18b0a49338672a7dcda579eb74
|
bcfefdb823051a9115cd62ddfebeb28ba726bafe
|
/answers.py
|
c6b22d87d95b59bbfd191eb224f3317638dd7810
|
[] |
no_license
|
https://github.com/Marketuser/LearnPython-lesson1
|
33f87bfa638cd48287cf204dab6566d84a046d35
|
a95f9fb84dcbd44ee68a22c0c132135d88a51799
|
refs/heads/master
| 2021-08-15T23:24:36.970526
| 2017-11-18T14:03:52
| 2017-11-18T14:03:52
| 111,210,991
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
def get_answer(question, answer):
answer = ["привет", "И тебе привет!", "как дела", "Лучше всех", "пока"]
return()
|
UTF-8
|
Python
| false
| false
| 152
|
py
| 2
|
answers.py
| 2
| 0.652174
| 0.652174
| 0
| 3
| 37.666667
| 72
|
thelma1944/Python_Stuff
| 4,827,543,286,323
|
dbca047be049f18c75300d947719b4626a5ef8c4
|
484c462c29e3c2f8ac280b79c11db6982c6a8ca6
|
/wip/CFO_Module/UDP_client.py
|
74f22ac708e0b891587e6bcaf72d526803a5ba69
|
[] |
no_license
|
https://github.com/thelma1944/Python_Stuff
|
b5fa53bf008bb5e865204201b144fe20e7f87565
|
077131a2c9f247396dca86fdf18933d38ae8d501
|
refs/heads/master
| 2021-06-05T12:25:35.779070
| 2020-10-03T18:20:16
| 2020-10-03T18:20:16
| 16,077,931
| 0
| 1
| null | false
| 2021-03-26T00:30:14
| 2014-01-20T17:36:16
| 2020-10-03T18:23:33
| 2021-03-26T00:30:12
| 97,334
| 0
| 1
| 3
|
Python
| false
| false
|
#!/usr/bin/python
# UDP client example
import socket
import datetime, timeit
"""
This is a ten second heart beat using the port
number as the single word as the heartbeat
28 Jan 2014 TEV
"""
port = "5000"
target = '216.97.82.48'
while 1:
while 1:
then = datetime.datetime.now() + datetime.timedelta(seconds=10)
while then > datetime.datetime.now():
print 'sleeping'
time.sleep(1)
break
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.sendto(port, (target,port))
client_socket.close()
|
UTF-8
|
Python
| false
| false
| 687
|
py
| 81
|
UDP_client.py
| 35
| 0.558952
| 0.524017
| 0
| 28
| 23.464286
| 73
|
Belbola/learn-GUI-Python
| 4,011,499,484,840
|
afd3ef2eb451385e56265e5da29f660e633dc7de
|
0f8d89c1d3cedfd7a19c249791ac10d2e3bf65ab
|
/6 - Advanced Widgets/HelloTextWidget.py
|
3d4033c9b32020665e500211692d47be65fa66ff
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/Belbola/learn-GUI-Python
|
6e1c1d94b0180ec65ea54202ec191ec25a42181b
|
c259658ee2f3b67eeb84a543cb74e3166b5edcd7
|
refs/heads/master
| 2016-09-08T19:17:56.510086
| 2015-07-17T06:40:53
| 2015-07-17T06:40:53
| 38,014,428
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
Python 3.4.0 (default, Jun 19 2015, 14:20:21)
[GCC 4.8.2] on linux
Type "copyright", "credits" or "license()" for more information.
>>> from tkinter import *
>>> root = Tk()
>>> text = Text(root, width = 40, height = 10)
>>> text.pack()
>>> text.config(wrap = 'word')
>>> # the widget has : get, insert and delete methods
>>> # the get requires indices for the text, the general syntax :
>>> # "base modifier modifier modifier"
>>> # lets focus on the most common way to build indexes :
>>> # Common Base Formats : line.char (example : 4.2), end
>>> # Common Modifiers : +/- #chars and +/- #lines, linestart, lineendm wordstart, wordend
>>> # all of the text in the text box
>>> text.get('1.0', 'end')
'this is a long message !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\nthis the shit i do like ! yeah fuck yeah hello ... \n'
>>> # now we get just the first line
>>> text.get('1.0', '1.end')
'this is a long message !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
>>> # to insert text into the text widget we'll use the insert method
>>> text.insert('1.0 + 2 lines', 'Inserted Message')
>>> text.insert('1.0 + 2 lines lineend', ' and\nmore\nmore\nmore...')
>>> # to delete text from the text widget use the delete method
>>> text.delete('1.0') # deletes one character (the first in this case)
>>> text.delete('1.0', '1.0 lineend')
>>> text.delete('1.0', '3.0 lineend + 1 chars')
>>> # replace some text
>>> text.replace('1.0', '1.0 lineend', 'This is the first line.')
>>> text.config(state = 'disabled')
>>> text.delete('1.0', 'end') # doesnt do anything
>>> text.config(state = 'normal')
|
UTF-8
|
Python
| false
| false
| 1,655
|
py
| 24
|
HelloTextWidget.py
| 24
| 0.583686
| 0.552266
| 0
| 32
| 50.71875
| 158
|
snassimr/concept-drift
| 9,706,626,098,799
|
ccc8a04bc78ce3a66b06643384ca77fff6ac3256
|
7e0d9b57304629abea51a753302517dad3026604
|
/notebooks/archive/plots.py
|
1ca86e6a7b65826be5caeecf3ffd7c7acc3d1126
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/snassimr/concept-drift
|
aabf071c7666e89ae01e2220bbe563d5298c6d6e
|
11dc0ddee35c9e5d43f7d70d1bd0e2486471c0af
|
refs/heads/main
| 2023-07-23T18:09:59.709296
| 2021-08-30T19:10:27
| 2021-08-30T19:10:27
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
def decision_regions(x, y, classifier, test_idx=None, resolution=0.02, plot_support=False, plot_custom_support=False):
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
x1_min, x1_max = x[:, 0].min() - 1, x[:, 0].max() + 1
x2_min, x2_max = x[:, 1].min() - 1, x[:, 1].max() + 1
xx1, xx2 = np.meshgrid(
np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution)
)
#z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
xy = np.array([xx1.ravel(), xx2.ravel()]).T
z = classifier.decision_function(xy) #.reshape(x.shape)
z = z.reshape(xx1.shape)
plt.contour(xx1, xx2, z, alpha=0.3, cmap=cmap, levels=[-1, 0, 1], linestyles=['--', '-', '--'])
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, c1 in enumerate(np.unique(y)):
plt.scatter(
x=x[y == c1, 0], y=x[y == c1, 1],
alpha=0.8, c=colors[idx],
marker=markers[idx], label=c1,
edgecolors='none'
)
if test_idx:
x_test, y_test = x[test_idx,:], y[test_idx]
plt.scatter(
x_test[:,0],
x_test[:,1],
c='none',
edgecolors='green',
alpha=1.0, linewidth=1,
marker='o',
s=250,
label='test set')
if plot_support:
plt.scatter(
classifier.support_vectors_[:, 0],
classifier.support_vectors_[:, 1],
marker='o',
s=100,
c='none',
alpha=1.0,
linewidth=1,
edgecolors='purple',
#facecolors='none',
label='support set'
)
if plot_custom_support:
preds = classifier.decision_function(x)
support_vectors = np.where(abs(preds) <= 1, 1, 0)
#print(support_vectors)
support_vector_idxs = np.where(support_vectors == 1)[0]
#print(support_vector_idxs)
x_support = x[support_vector_idxs, :]
plt.scatter(
x_support[:, 0],
x_support[:, 1],
marker='o',
s=200,
c='none',
alpha=1.0,
linewidth=1,
edgecolors='orange',
facecolors='none',
label='custom support set'
)
|
UTF-8
|
Python
| false
| false
| 2,550
|
py
| 21
|
plots.py
| 9
| 0.493333
| 0.464314
| 0
| 78
| 31.705128
| 118
|
cmoralescar/programacion-practica-seo
| 18,734,647,357,921
|
affbb2c67ccd228df5dcdb0a86903b8954a76e37
|
a3b9452798d707d2592a00dd8eece6dbdf186d7a
|
/programacion-practica-seo-master/python/intro/08-dictionaries.py
|
25b87e842a627672ff7999c9a39c5d82016ebbdf
|
[] |
no_license
|
https://github.com/cmoralescar/programacion-practica-seo
|
2bc74979fab23fa5c664f0edbd6b5b3358eddc72
|
24d44e324e37fcf53ce778792b2ca85dafeaab1a
|
refs/heads/master
| 2023-02-02T02:38:43.168926
| 2020-12-21T18:08:21
| 2020-12-21T18:08:21
| 323,315,459
| 0
| 1
| null | false
| 2020-12-21T18:08:22
| 2020-12-21T11:25:03
| 2020-12-21T12:37:20
| 2020-12-21T18:08:21
| 0
| 0
| 0
| 0
| null | false
| false
|
# Crear
ejemploDict = {
'a': 3,
'b': 0,
'z': 34
}
# Acceder a un elemento
print(ejemploDict['a'])
# Loop
for elemento in ejemploDict:
print(ejemploDict[elemento])
# Items & values
ejemploDict.keys()
ejemploDict.values()
print(ejemploDict.items())
# Chequear si existe
if 'a' in ejemploDict:
print('existe')
# Anadir un elemento
ejemploDict['y'] = 16
print(ejemploDict)
# Eliminar un elemento
del ejemploDict['y']
print(ejemploDict)
|
UTF-8
|
Python
| false
| false
| 452
|
py
| 96
|
08-dictionaries.py
| 72
| 0.690265
| 0.676991
| 0
| 35
| 11.942857
| 30
|
ludwigwittgenstein2/Algorithms-Python
| 16,432,544,914,951
|
aa3103657c1c758200e3a80335162fd6aa6a4602
|
de95e9ace929f6279f5364260630e4bf7a658c1c
|
/practiceReuse.py
|
58f86e0f8d05f971270eaea784c1891c30896d01
|
[] |
no_license
|
https://github.com/ludwigwittgenstein2/Algorithms-Python
|
ceaf0739b8582f7bd749a9b3f52f283765044744
|
c5bed8b2e398c218d1f36e72b05a3f5545cf783a
|
refs/heads/master
| 2021-06-19T11:40:31.012268
| 2017-07-02T04:59:20
| 2017-07-02T04:59:20
| 75,953,711
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
seq = [20, 10, 24, 56, 89, 99]
def trav(seq, i=0):
if i ==len(seq):
return
trav(seq, i+1)
if __name__ == '__main__':
trav(range(100))
|
UTF-8
|
Python
| false
| false
| 156
|
py
| 162
|
practiceReuse.py
| 156
| 0.467949
| 0.358974
| 0
| 9
| 16.333333
| 30
|
laurent-george/comments
| 12,807,592,479,562
|
be779dd8a03d020927515e2663a1a8a991e1a090
|
94c4e6b4d5b65e4aca078456c6be76c80944ab07
|
/first_try_keras.py
|
d0c83c4199d7b19bb8bab6aa209ac700324f394e
|
[] |
no_license
|
https://github.com/laurent-george/comments
|
92d663d0068148e10903464b325cc24cf011391b
|
596648802d4db9eba32d276ee3ec1d3bb0e3e3bb
|
refs/heads/master
| 2021-05-11T08:17:14.825319
| 2018-01-18T22:55:48
| 2018-01-18T22:55:48
| 118,048,224
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import tensorflow as tf
def get_input_fn_dataset(dataset_name = 'train', num_epoch=30, batch_size=256):
def _parse_function(example_proto):
features = {"X": tf.VarLenFeature(tf.float32),
"Y": tf.VarLenFeature(tf.float32)}
features = {'id': tf.FixedLenFeature([1], tf.int64),
'X': tf.FixedLenFeature([300, 100], tf.float32),
'Y': tf.FixedLenFeature([6], tf.float32)}
parsed_features = tf.parse_single_example(example_proto, features)
#return parsed_features["X"], parsed_features["Y"]
return parsed_features["id"], parsed_features['X'], parsed_features['Y']
def input_fn():
dataset = tf.data.TFRecordDataset('data/{}.tfrecord'.format(dataset_name), compression_type='')
#dataset = dataset.repeat(num_epoch)
#dataset = dataset.shuffle(10*batch_size)
#dataset = dataset.batch(batch_size)
#dataset = tf.contrib.data.map_and_batch()
dataset = dataset.map(_parse_function)
#dataset = dataset.prefetch(10)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
return input_fn
def model_fn(features, labels, mode=tf.estimator.ModeKeys.TRAIN, params=None):
"""
a model_fn for Estimator class
This function will be called to create a new graph each time an estimator method is called
"""
tf.keras.backend.set_learning_phase(mode == tf.estimator.ModeKeys.TRAIN)
learning_rate = params['learning_rate']
nb_class = 10
X = features
logits = 0 # <-- TODO: definir le model avec lstm ici
predictions = {'class': tf.argmax(logits, axis=1), 'image': X}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
return tf.estimator.EstimatorSpec(predictions=predictions,
loss=loss,
train_op=train_op,
mode=mode,
training_hooks=[log_hook],
evaluation_hooks=[],
eval_metric_ops={'acc_validation': accuracy_metric})
def main():
input_fn = get_input_fn_dataset()
import IPython
IPython.embed()
config= tf.estimator.RunConfig(save_summary_steps=10,
save_checkpoints_steps=1000,
keep_checkpoint_max=200,
log_step_count_steps=1000)
estimator = tf.estimator.Estimator(model_fn=model_fn,
model_dir='mnist_trained',
params={'learning_rate': 0.01},
config=config)
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false
| false
| 2,869
|
py
| 2
|
first_try_keras.py
| 2
| 0.555246
| 0.538864
| 0
| 73
| 38.30137
| 103
|
caleb-severn/useful_codes
| 14,328,010,946,287
|
a8e7ef8de4c933c90a5e6c6f004330f5d2f26ce6
|
a62c3b44a1e22ce919819836cf0ebd63d3e5fce6
|
/Code Tests/139. Word Break.py
|
4b49de81fc711d9a102f1fd160dec85b1fa1848d
|
[] |
no_license
|
https://github.com/caleb-severn/useful_codes
|
e70128bf00cd2eff9fb84d69c9417c2082c4ed40
|
03ebb313cf0609736822464bdf3730fc136b227c
|
refs/heads/main
| 2023-06-21T03:41:28.598802
| 2021-07-20T09:12:28
| 2021-07-20T09:12:28
| 387,737,531
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 2 11:32:34 2021
@author: csevern
"""
#%% Attempt 1 this only ran into an issue on one example which required a rewrite
def wordBreak(s,wordDict):
slist = list(s)
wordc = 0
for w in wordDict:
print(w,''.join(slist), len(s),wordc)
if w in ''.join(slist):
print(w,"in",''.join(slist), int(s.count(w)))
#if (len(s)-(len(w)*s.count(w))) in lendict or len(s)==len(w):
wordc += 1
slist[slist.index(w[0]):slist.index(w[-1])+2] = " "
print(len(slist),wordc,slist)
if len(s)==wordc:
return True
return False
print(wordBreak(
"ccaccc",
["cc","ac"]))
#%% Attempt 1 this only ran into an issue on one example which required a rewrite
def wordBreak(s,wordDict):
word = ""
wordc = 0
for i,l in enumerate(s):
word = word + l
if word in wordDict:
wordc += 1
s = s[:i+1].replace(word," ") + s[i+1:]
print(s)
word = ""
if len(s)==wordc:
return True
return False
print(wordBreak(
"ccaccc",
["cc","ac"]))
#%% Attempt 3 again another solution, online solution very clever
def wordBreak(s,wordDict):
dp = [False]*(len(s)+1)
dp[len(s)] = True
for i in range(len(s)-1,-1,-1):
for w in wordDict:
if (i+len(w)) <= len(s) and s[i:i+len(w)]==w:
dp[i] =dp[i+len(w)]
if dp[i]:
break
return dp[0]
print(wordBreak(
"ccaccc",
["cc","ac"]))
|
UTF-8
|
Python
| false
| false
| 1,597
|
py
| 39
|
139. Word Break.py
| 39
| 0.505322
| 0.487163
| 0
| 70
| 21.785714
| 81
|
TacoCurry/HRTTest
| 8,650,064,144,094
|
4ecf4d5f990e6c1957e01443244ab0fe30f0fa7e
|
88c9b01dc08254b2555e68eb7b7094aace41c2d7
|
/none_rt_out_csv.py
|
d944bb21395b02e338caf33034ab531124c47ae8
|
[] |
no_license
|
https://github.com/TacoCurry/HRTTest
|
c4400a5fdd36431f62e89f1743642fa18c2efc90
|
c5fb0d3792284ef2578343520e9156bec373bf6e
|
refs/heads/master
| 2021-04-10T23:36:04.081758
| 2020-06-12T13:37:12
| 2020-06-12T13:37:12
| 248,976,484
| 0
| 0
| null | false
| 2020-06-12T13:37:13
| 2020-03-21T12:58:54
| 2020-03-26T06:17:30
| 2020-06-12T13:37:12
| 1,288
| 0
| 0
| 0
|
Python
| false
| false
|
import csv
def init(file_name):
f = open(file_name, 'w', encoding='utf-8', newline='')
f.close()
def write(file_name, list):
f = open(file_name, 'a+', encoding='utf-8', newline='')
wr = csv.writer(f)
wr.writerow(list)
f.close()
|
UTF-8
|
Python
| false
| false
| 253
|
py
| 31
|
none_rt_out_csv.py
| 21
| 0.58498
| 0.577075
| 0
| 11
| 22.090909
| 59
|
shubhamchandak94/LDPC_DNA_storage
| 3,779,571,247,515
|
7f36be1166cb4800ce13065f5d8434781cdcfc31
|
0591f0b651235e77c2161c89cebdf11e25e533f9
|
/util/analysis/Raptor_BCH/sample_generation_raptor_BSC.py
|
7f3fb2090d757d8f3c28533e3293e7a2c28c97aa
|
[
"MIT"
] |
permissive
|
https://github.com/shubhamchandak94/LDPC_DNA_storage
|
be6da10d53b4fde16f9c8e42df1d7c96c97f42df
|
ae69da8696f71c1afc7f2dac8756b9d2392ac052
|
refs/heads/master
| 2021-06-29T18:32:20.365051
| 2020-09-05T15:08:24
| 2020-09-05T15:08:24
| 189,745,376
| 9
| 3
| null | false
| 2020-01-29T13:32:43
| 2019-06-01T14:58:24
| 2019-10-04T17:17:40
| 2020-01-29T13:32:42
| 3,922
| 0
| 1
| 0
|
C
| false
| false
|
import numpy as np
import random
import argparse
import json
def get_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--output_file', type=str, default="output.txt")
parser.add_argument('--sample_file', type=str, default="sample.txt")
parser.add_argument('--coverage', type=float, default=1.0)
parser.add_argument('--num_chunks', type=int, default=100)
parser.add_argument('--eps', type=float, default=0.0)
return parser
def randomly_sample_reads(input_file, sample_file,num_sample_reads, eps):
f_input = open(input_file, "r");
f_sample = open(sample_file, "w");
input_data = json.load(f_input)
output_data = dict(input_data)
output_data['symbols'] = []
print num_sample_reads
for i in range(num_sample_reads):
read = list(random.choice(input_data['symbols']))
l = [c for c in read[1]]
for j in range(len(l)):
if np.random.random() < eps:
l[j] = '0' if (read[1][j] == '1') else '1'
read[1] = ''.join(l)
output_data['symbols'].append(read)
print("Number of unique reads", len(set([s[0] for s in output_data['symbols']])))
f_sample.write(json.dumps(output_data, sort_keys = 'False', indent=2, separators=(',', ': ')))
def main():
parser = get_argument_parser()
config = parser.parse_args()
num_samples = int(config.coverage*config.num_chunks)
randomly_sample_reads( config.output_file, config.sample_file, num_samples, config.eps);
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false
| false
| 1,472
|
py
| 31
|
sample_generation_raptor_BSC.py
| 20
| 0.660326
| 0.650136
| 0
| 40
| 35.8
| 95
|
teaking/Python-exe
| 10,153,302,699,110
|
b0ad8439cc372740398251975d8d37b874fb5387
|
e7b274111a324b28871ac23d7120710645488bae
|
/ex39_2.py
|
1beb136306f3b0cf183f042789bd3c078f78f462
|
[] |
no_license
|
https://github.com/teaking/Python-exe
|
5bbfd8c228af7ad80bd6aa850134a32d7d96bf5c
|
f3c17d7399f99f4f9e0f7ec790a90089d2bdd166
|
refs/heads/master
| 2021-01-25T00:10:26.329655
| 2018-04-22T00:40:55
| 2018-04-22T00:40:55
| 123,290,095
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
'''Dictionary Data Structure example'''
#create a mapping of state to abbreviation
states = {
'Oregon': 'OR',
'Florida':'FL',
'California':'CA',
'New York': 'NY',
'Michigan':'MI'
}
cities = {
'CA': 'Sans Fransisco',
'MI': 'Detroit',
'FL':'Jacksonville'
}
#add some more cities
cities['NY'] = "New York"
cities['OR'] = 'Portland'
#print out some cities
print '_' * 10
print "NY State has: ", cities['NY']
print "OR State has: ", cities['OR']
#do it by using the state then cities dict
print '_' * 10
print "Michigan has: ", cities[states['Michigan']]
print "Florida has: ", cities[states['Florida']]
#print every state abbreviation
print'_' * 10
for state, abbre in states.items():
print "%s is abbreviated %s" % (state, abbre)
print '_' * 10
for abbrev, city in cities.items():
print "%s has the city %s" %(abbrev, city)
print '_' * 10
for state, abbre in states.items():
print "%s state is annreviated %s and has city %s" % (state, abbre,cities[abbre])
print '_' * 10
# safely get a abbreviation by state that might not be there
state = states.get('Texas')
if not state:
print "false"
else:
print "true"
#get a city if not output the default value
city = cities.get('OR', 'Does Not Exist')
print "The city for the state 'OR' is: %s" % city
city = cities.get('TX', 'Does Not Exist')
print "The city for the state 'TX' is: %s" % city
print cities
print cities.pop('OR')
print cities
|
UTF-8
|
Python
| false
| false
| 1,424
|
py
| 48
|
ex39_2.py
| 47
| 0.652388
| 0.643961
| 0
| 74
| 18.243243
| 82
|
floriangardin/musiclang
| 12,524,124,648,140
|
6b9c7514d82f5ae96dbc2432081e791558bcacc5
|
7127cc2ab5eb59bfe93d872bbc3705a865bf0269
|
/musiclang/script/actions/counterpoint.py
|
7f13a49d8b5824cf0b4c27dd7a33edc9d356eea1
|
[
"MIT"
] |
permissive
|
https://github.com/floriangardin/musiclang
|
f117b5c6f8acec33d239d4250629563a64bab3a4
|
4d593263c697065938772a212c0c063b2567a4c3
|
refs/heads/master
| 2022-10-29T00:25:08.050505
| 2022-10-24T18:58:14
| 2022-10-24T18:58:14
| 101,918,359
| 2
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from .rythm import ProjectOnRythm
from .utils_counterpoint import create_counterpoint
from ..operators import *
from .utils_actions import parse_relative_to_absolute
from ..utils.chord_utils import offset_between_chords, project_on_one_chord, reproject_on_multiple_chords
class CounterpointPart(ChordAction):
"""
Mutate to create an acceptable counterpoint from a voice
"""
def __init__(self, subject_parts, counterpoint_part):
self.subject_parts = subject_parts
self.counterpoint_part = counterpoint_part
def get_part(self, chord, part):
if isinstance(part, int):
part = chord.parts[part]
result = chord.score[part]
else:
result = chord.score[part]
return result, part
def action(self, chord, **kwargs):
subjects = [self.get_part(chord, part)[0] for part in self.subject_parts]
counterpoints = self.get_part(chord, self.counterpoint_part)[0]
counterpoints_part = self.get_part(chord, self.counterpoint_part)[1]
counterpoints_result = create_counterpoint([subjects], [counterpoints])[0]
new_chord = chord.copy()
chord.score[counterpoints_part] = counterpoints_result.copy()
return new_chord
class CounterpointAfterChords(ChordSerieAction):
def __init__(self, subject_parts, counterpoint_parts):
self.subject_parts = subject_parts
self.counterpoint_parts = counterpoint_parts
def get_part(self, chord, part):
if isinstance(part, int):
part = chord.parts[part]
result = chord.score[part]
else:
result = chord.score[part]
return result, part
def action(self, chord_serie: ChordSerie, **kwargs):
chord, idx_stops, offsets, melodies, chords = project_on_one_chord(chord_serie)
new_chord = CounterpointParts(subject_parts=self.subject_parts, counterpoint_parts=self.counterpoint_parts)(chord)
new_score = reproject_on_multiple_chords(chords, new_chord, idx_stops, offsets)
return new_score
class CounterpointParts(ChordAction):
"""
Mutate to create an acceptable counterpoint from a voice
"""
def __init__(self, subject_parts, counterpoint_parts):
"""
:param subject_part: int|str : part acting as subject
:param counterpoint_parts: list[int|str] : list of parts on which to apply counterpoint from subject
"""
self.subject_parts = subject_parts
self.counterpoint_parts = counterpoint_parts
def get_part(self, chord, part):
if isinstance(part, int):
part = chord.parts[part]
result = chord.score[part]
else:
result = chord.score[part]
return result, part
def action(self, chord, **kwargs):
subjects = [self.get_part(chord, part)[0] for part in self.subject_parts]
subjects_parts = [self.get_part(chord, part)[1] for part in self.subject_parts]
counterpoints_parts = self.counterpoint_parts
if self.counterpoint_parts is None:
counterpoints_parts = [part for part in chord.parts if part not in subjects_parts]
counterpoints = [self.get_part(chord, part)[0] for part in counterpoints_parts]
counterpoints_parts = [self.get_part(chord, part)[1] for part in counterpoints_parts]
counterpoints_result = create_counterpoint(subjects, counterpoints)
new_chord = chord.copy()
for counterpoint, part in zip(counterpoints_result, counterpoints_parts):
new_chord.score[part] = counterpoint.copy()
return new_chord
|
UTF-8
|
Python
| false
| false
| 3,632
|
py
| 111
|
counterpoint.py
| 104
| 0.660518
| 0.658315
| 0
| 95
| 37.210526
| 122
|
AndySer37/semantic_labels_sys
| 8,117,488,237,493
|
6c6857e9167e02bfeb855e35e5342ae6c3b638ed
|
451e809753c2833bf510f9f41c37c7d804d6a039
|
/catkin_ws/src/text_recognize/moran_text_recog/src/text_recognize.py
|
8e38a4f54cf55fa0150015df29fa72a221072b85
|
[] |
no_license
|
https://github.com/AndySer37/semantic_labels_sys
|
91de213bb03e5142702e5165c9f633ab75544fad
|
074ea8bf015900bba053eb6ad8e789f627504808
|
refs/heads/master
| 2020-08-05T00:49:43.136415
| 2020-03-19T13:56:31
| 2020-03-19T13:56:31
| 212,338,077
| 2
| 2
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import numpy as np
import cv2
import roslib
import rospy
import tf
import struct
import math
import time
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo, CompressedImage
from geometry_msgs.msg import PoseArray, PoseStamped
from visualization_msgs.msg import Marker, MarkerArray
import rospkg
from cv_bridge import CvBridge, CvBridgeError
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import os
import message_filters
from text_msgs.msg import text_detection_msg, text_detection_array, int_arr
from text_msgs.srv import *
from PIL import Image as Im
import tools.utils as utils
import tools.dataset as dataset
from models.moran import MORAN
from collections import OrderedDict
class text_recognize(object):
def __init__(self):
r = rospkg.RosPack()
self.path = r.get_path('moran_text_recog')
self.prob_threshold = 0.90
self.cv_bridge = CvBridge()
self.commodity_list = []
self.read_commodity(r.get_path('text_msgs') + "/config/commodity_list.txt")
self.alphabet = '0:1:2:3:4:5:6:7:8:9:a:b:c:d:e:f:g:h:i:j:k:l:m:n:o:p:q:r:s:t:u:v:w:x:y:z:$'
self.means = (0.485, 0.456, 0.406)
self.stds = (0.229, 0.224, 0.225)
self.bbox_thres = 1500
self.color_map = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(255,255,255)] # 0 90 180 270 noise
self.objects = []
self.is_compressed = False
self.cuda_use = torch.cuda.is_available()
if self.cuda_use:
cuda_flag = True
self.network = MORAN(1, len(self.alphabet.split(':')), 256, 32, 100, BidirDecoder=True, CUDA=cuda_flag)
self.network = self.network.cuda()
else:
self.network = MORAN(1, len(self.alphabet.split(':')), 256, 32, 100, BidirDecoder=True, inputDataType='torch.FloatTensor', CUDA=cuda_flag)
model_name = "moran.pth"
print "Moran Model Parameters number: " + str(self.count_parameters(self.network))
if self.cuda_use:
state_dict = torch.load(os.path.join(self.path, "weights/", model_name))
else:
state_dict = torch.load(os.path.join(self.path, "weights/", model_name), map_location='cpu')
MORAN_state_dict_rename = OrderedDict()
for k, v in state_dict.items():
name = k.replace("module.", "") # remove `module.`
MORAN_state_dict_rename[name] = v
self.network.load_state_dict(MORAN_state_dict_rename)
self.converter = utils.strLabelConverterForAttention(self.alphabet, ':')
self.transformer = dataset.resizeNormalize((100, 32))
for p in self.network.parameters():
p.requires_grad = False
self.network.eval()
#### Publisher
self.image_pub = rospy.Publisher("~predict_img", Image, queue_size = 1)
self.mask = rospy.Publisher("~mask", Image, queue_size = 1)
self.img_bbox_pub = rospy.Publisher("~predict_bbox", Image, queue_size = 1)
#### Service
self.predict_ser = rospy.Service("~text_recognize_server", text_recognize_srv, self.srv_callback)
image_sub1 = rospy.Subscriber('/text_detection_array', text_detection_array, self.callback, queue_size = 1)
### msg filter
# image_sub = message_filters.Subscriber('/camera/color/image_raw', Image)
# depth_sub = message_filters.Subscriber('/camera/aligned_depth_to_color/image_raw', Image)
# ts = message_filters.TimeSynchronizer([image_sub, depth_sub], 10)
# ts.registerCallback(self.callback)
print "============ Ready ============"
def read_commodity(self, path):
for line in open(path, "r"):
line = line.rstrip('\n')
self.commodity_list.append(line)
print "Node (text_recognize): Finish reading list"
def count_parameters(self, model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def callback(self, msg):
try:
if self.is_compressed:
np_arr = np.fromstring(msg.image, np.uint8)
cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
else:
cv_image = self.cv_bridge.imgmsg_to_cv2(msg.image, "bgr8")
except CvBridgeError as e:
print(e)
predict_img, mask = self.predict(msg, cv_image)
img_bbox = cv_image.copy()
try:
self.image_pub.publish(self.cv_bridge.cv2_to_imgmsg(predict_img, "bgr8"))
self.img_bbox_pub.publish(self.cv_bridge.cv2_to_imgmsg(img_bbox, "bgr8"))
self.mask.publish(self.cv_bridge.cv2_to_imgmsg(mask, "8UC1"))
except CvBridgeError as e:
print(e)
def srv_callback(self, req):
resp = text_recognize_srvResponse()
try:
if self.is_compressed:
np_arr = np.fromstring(req.data.image, np.uint8)
cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
else:
cv_image = self.cv_bridge.imgmsg_to_cv2(req.data.image, "bgr8")
except CvBridgeError as e:
resp.state = e
print(e)
predict_img, mask = self.predict(req.data, cv_image, req.direct)
img_bbox = cv_image.copy()
try:
self.image_pub.publish(self.cv_bridge.cv2_to_imgmsg(predict_img, "bgr8"))
self.img_bbox_pub.publish(self.cv_bridge.cv2_to_imgmsg(img_bbox, "bgr8"))
resp.mask = self.cv_bridge.cv2_to_imgmsg(mask, "8UC1")
self.mask.publish(self.cv_bridge.cv2_to_imgmsg(mask, "8UC1"))
except CvBridgeError as e:
resp.state = e
print(e)
return resp
def predict(self, msg, img, rot=0):
# # Preprocessing
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(rows, cols, channels) = img.shape
mask = np.zeros([rows, cols], dtype = np.uint8)
for text_bb in msg.text_array:
if (text_bb.box.ymax - text_bb.box.ymin) * (text_bb.box.xmax - text_bb.box.xmin) < self.bbox_thres:
continue
start = time.time()
image = gray[text_bb.box.ymin:text_bb.box.ymax, text_bb.box.xmin:text_bb.box.xmax]
image = Im.fromarray(image)
image = self.transformer(image)
if self.cuda_use:
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
text = torch.LongTensor(1 * 5)
length = torch.IntTensor(1)
text = Variable(text)
length = Variable(length)
max_iter = 20
t, l = self.converter.encode('0'*max_iter)
utils.loadData(text, t)
utils.loadData(length, l)
output = self.network(image, length, text, text, test=True, debug=True)
preds, preds_reverse = output[0]
demo = output[1]
_, preds = preds.max(1)
_, preds_reverse = preds_reverse.max(1)
sim_preds = self.converter.decode(preds.data, length.data)
sim_preds = sim_preds.strip().split('$')[0]
sim_preds_reverse = self.converter.decode(preds_reverse.data, length.data)
sim_preds_reverse = sim_preds_reverse.strip().split('$')[0]
# print('\nResult:\n' + 'Left to Right: ' + sim_preds + '\nRight to Left: ' + sim_preds_reverse + '\n\n')
print "Text Recognize Time : {}".format(time.time() - start)
_cont = []
for p in text_bb.contour:
point = []
point.append(p.point[0])
point.append(p.point[1])
_cont.append(point)
_cont = np.array(_cont, np.int32)
if sim_preds in self.commodity_list:
cv2.rectangle(img, (text_bb.box.xmin, text_bb.box.ymin),(text_bb.box.xmax, text_bb.box.ymax), self.color_map[rot], 3)
cv2.putText(img, sim_preds, (text_bb.box.xmin, text_bb.box.ymin), 0, 1, (0, 255, 255),3)
pix = self.commodity_list.index(sim_preds) + rot*len(self.commodity_list)
if pix in np.unique(mask):
cv2.fillConvexPoly(mask, _cont, pix + 4*len(self.commodity_list))
else:
cv2.fillConvexPoly(mask, _cont, pix)
else:
correct, conf, _bool = self.conf_of_word(sim_preds)
# print conf
if _bool:
cv2.putText(img, correct + "{:.2f}".format(conf), (text_bb.box.xmin, text_bb.box.ymin), 0, 1, (0, 255, 255),3)
cv2.rectangle(img, (text_bb.box.xmin, text_bb.box.ymin),(text_bb.box.xmax, text_bb.box.ymax), (255, 255, 255), 2)
pix = self.commodity_list.index(correct) + rot*len(self.commodity_list)
if pix in np.unique(mask):
cv2.fillConvexPoly(mask, _cont, pix + 4*len(self.commodity_list))
else:
cv2.fillConvexPoly(mask, _cont, pix)
# else:
# cv2.putText(img, sim_preds, (text_bb.box.xmin, text_bb.box.ymin), 0, 1, (0, 0, 0),3)
# cv2.rectangle(img, (text_bb.box.xmin, text_bb.box.ymin),(text_bb.box.xmax, text_bb.box.ymax), (0, 0, 0), 2)
return img, mask
def conf_of_word(self, target):
### Edit distance
# print target
_recheck = False
total = np.zeros(len(self.commodity_list))
for i in range(1, len(self.commodity_list)):
size_x = len(self.commodity_list[i]) + 1
size_y = len(target) + 1
matrix = np.zeros ((size_x, size_y))
for x in xrange(size_x):
matrix [x, 0] = x
for y in xrange(size_y):
matrix [0, y] = y
for x in xrange(1, size_x):
for y in xrange(1, size_y):
if self.commodity_list[i][x-1] == target[y-1]:
matrix [x,y] = min(
matrix[x-1, y] + 1,
matrix[x-1, y-1],
matrix[x, y-1] + 1
)
else:
matrix [x,y] = min(
matrix[x-1,y] + 1,
matrix[x-1,y-1] + 1,
matrix[x,y-1] + 1
)
# print (matrix)
total[i] = (size_x - matrix[size_x-1, size_y-1]) / float(size_x)
if self.commodity_list[i] == "kleenex" and 0.3 < total[i] < 0.77:
_list = ["kloonex", "kloonox","kleeper", "killer", "kleem", "kleers", "kluting", "klates",\
"kleams", "kreamer", "klea", "kleas", "kletter","keenier","vooney", "wooner", "whonex"]
_recheck = True
elif self.commodity_list[i] == "andes" and 0.3 < total[i] < 0.77:
_list = ["anders", "findes","windes"] # "andor",
_recheck = True
elif self.commodity_list[i] == "vanish" and 0.3 < total[i] < 0.77:
_list = ["varish"]
_recheck = True
# elif self.commodity_list[i] == "crayola" and 0.3 < total[i] < 0.77:
# _list = ["casions"]
# _recheck = True
if _recheck == True:
for _str in _list:
size_x = len(_str) + 1
size_y = len(target) + 1
matrix = np.zeros ((size_x, size_y))
for x in xrange(size_x):
matrix [x, 0] = x
for y in xrange(size_y):
matrix [0, y] = y
for x in xrange(1, size_x):
for y in xrange(1, size_y):
if _str[x-1] == target[y-1]:
matrix [x,y] = min(
matrix[x-1, y] + 1,
matrix[x-1, y-1],
matrix[x, y-1] + 1
)
else:
matrix [x,y] = min(
matrix[x-1,y] + 1,
matrix[x-1,y-1] + 1,
matrix[x,y-1] + 1
)
score_temp = (size_x - matrix[size_x-1, size_y-1]) / float(size_x)
if total[i] < score_temp:
total[i] = score_temp
if 0.77 > total[i] > 0.68:
total[i] = 0.77
_recheck = False
# print target, total[i], self.commodity_list[i]
return self.commodity_list[np.argmax(total)], np.max(total), np.max(total) >= 0.77 ## 0.66
### old method
# total = np.zeros(len(self.commodity_list))
# for i in range(1, len(self.commodity_list)):
# # if self.commodity_list[i] != "raisins":
# # continue
# err = 0 ## error
# _len = len(self.commodity_list[i])
# arr = -10 * np.ones(_len)
# for j in range(len(target)):
# index = self.commodity_list[i].find(target[j])
# if index == -1:
# err += 1
# else:
# upper = arr[index+1] if index != _len - 1 else -10
# if arr[index] == -10 and upper == -10:
# arr[index] = j
# else:
# index = self.commodity_list[i].find(target[j], index + 1)
# while index != -1:
# lower = arr[index-1] if index != 0 else -10
# upper = arr[index+1] if index != _len - 1 else -10
# if (arr[index] - lower) == 1 or (upper - arr[index]) == 1:
# index = self.commodity_list[i].find(target[j], index + 1)
# else:
# arr[index] = j
# break
# score = 0 # score for word
# for j in range(_len - 1):
# if arr[j+1] - arr[j] == 1:
# score += 1
# total[i] = float(score) / (_len + err - 1)
# # print score, _len, err, arr
# return self.commodity_list[np.argmax(total)], np.max(total), np.max(total) >= 0.5
def onShutdown(self):
rospy.loginfo("Shutdown.")
if __name__ == '__main__':
rospy.init_node('text_recognize',anonymous=False)
text_recognize = text_recognize()
rospy.on_shutdown(text_recognize.onShutdown)
rospy.spin()
|
UTF-8
|
Python
| false
| false
| 12,007
|
py
| 45
|
text_recognize.py
| 21
| 0.626135
| 0.598484
| 0
| 355
| 32.822535
| 141
|
Dogeek/pyprocessing
| 14,525,579,445,790
|
137b65a81a8637f012d6e51cead6e5af481aa816
|
4a2952ebb5e6a199b4949088f1695812457e5bc6
|
/pyprocessing/runner.py
|
087ee7d74477addbd0826b219ca23cf954f22569
|
[
"MIT"
] |
permissive
|
https://github.com/Dogeek/pyprocessing
|
a7b4eb0ebde872f521e2d8964e22d21c7cd42766
|
14e832cd64384426ab29c5be35b2be6c56e6e04a
|
refs/heads/master
| 2023-02-04T22:44:37.736020
| 2020-12-23T15:42:08
| 2020-12-23T15:42:08
| 296,716,131
| 23
| 13
|
MIT
| false
| 2020-09-26T10:46:55
| 2020-09-18T19:49:35
| 2020-09-26T07:07:41
| 2020-09-26T10:46:54
| 91
| 17
| 9
| 2
|
Python
| false
| false
|
import importlib.util
import tempfile
from pyprocessing import PyProcessing
from pyprocessing.renderer import TkRenderer
class Runner:
renderers_mapping = {
'TkRenderer': TkRenderer,
}
@classmethod
def from_sketch_path(cls, sketch_path, **kwargs):
spec = importlib.util.spec_from_file_location(
"module.name", sketch_path,
)
sketch = importlib.util.module_from_spec(spec)
spec.loader.exec_module(sketch)
return Runner(sketch, **kwargs)
@classmethod
def from_sketch_source(cls, sketch_source, **kwargs):
with tempfile.NamedTemporaryFile('w') as tf:
tf.write(sketch_source)
return cls.from_sketch_path(tf.name, **kwargs)
def __init__(self, sketch, renderers=None, logging_level=40):
self.sketch = sketch
if renderers is not None:
renderers = [self.renderers_mapping[r] for r in renderers]
if not renderers:
renderers = [TkRenderer]
self.renderers = renderers
self.pp = PyProcessing()
self.pp.logger.setLevel(logging_level)
for renderer_class in self.renderers:
self.pp.attach_renderer(renderer_class)
def run(self):
if 'setup' in dir(self.sketch):
self.sketch.setup()
self.pp.windows.setup()
if 'draw' in dir(self.sketch):
draw = self.sketch.draw
else:
def draw():
return
callables = {
func_name: getattr(self.sketch, func_name)
for func_name in dir(self.sketch)
if callable(getattr(self.sketch, func_name))
}
self.pp.draw_fn = draw
self.pp.callables.update(callables)
self.pp.start()
|
UTF-8
|
Python
| false
| false
| 1,770
|
py
| 37
|
runner.py
| 30
| 0.59887
| 0.59774
| 0
| 60
| 28.5
| 70
|
boyko11/ML1-SupervisedLearning
| 12,498,354,844,282
|
c5c2c4658fbcf8b1594c86c6838a3d678950d431
|
40a1991796073db107aeb070c53cae781389eb91
|
/boosting.py
|
9fad7575768b54432edb915b6289fdcee130918d
|
[] |
no_license
|
https://github.com/boyko11/ML1-SupervisedLearning
|
80c521a971560aed5c89bf83d7ca63b4ede0da7e
|
f5d6771bce5d7eed7f3b4bdcd70bbc94f1ee573c
|
refs/heads/master
| 2020-03-28T18:52:13.189670
| 2019-01-21T04:45:25
| 2019-01-21T04:45:25
| 148,921,219
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from sklearn import ensemble
from learner import Learner
from sklearn import tree
class BoostingLearner(Learner):
def __init__(self, n_estimators=50, max_depth=1, class_weight=None):
self.max_depth = max_depth
#self.estimator = ensemble.GradientBoostingClassifier()
self.estimator = ensemble.AdaBoostClassifier(
base_estimator=tree.DecisionTreeClassifier(max_depth=self.max_depth, class_weight=class_weight),
n_estimators=n_estimators)
#self.estimator = ensemble.AdaBoostClassifier()
def fit_predict_score(self, x_train, y_train, x_test, y_test):
return super(BoostingLearner, self).fit_predict_score(self.estimator, x_train, y_train, x_test, y_test)
def display_trees(self):
print(self.estimator.estimators_)
# for i, one_tree_in_the_forrest in enumerate(self.estimator.estimators_):
# print(one_tree_in_the_forrest.tree_.node_count)
# print(one_tree_in_the_forrest.tree_.feature[0])
# print('----------------------')
# tree.export_graphviz(one_tree_in_the_forrest, out_file='tree{0}.dot'.format(i+1))
|
UTF-8
|
Python
| false
| false
| 1,151
|
py
| 16
|
boosting.py
| 15
| 0.659427
| 0.654214
| 0
| 28
| 40.107143
| 111
|
maelsin/Project-Euler
| 7,645,041,828,248
|
769ad171f26f105d9b23c9f3863f48c3b6daf03d
|
b36ce9c02cfbc0989ede45088534e85958f0ae8f
|
/Problem 6.py
|
d7755a938e2e16750322b95afc925d769f8d25b3
|
[] |
no_license
|
https://github.com/maelsin/Project-Euler
|
d868069561a815e286652af08056e40296ee6d49
|
ed570d299cf1082a69902c5cfbc2cb5e13e49b30
|
refs/heads/main
| 2023-06-25T16:49:12.702571
| 2021-07-11T15:43:46
| 2021-07-11T15:43:46
| 384,986,266
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
def SquareDifference(n):
sumOfSquare = sum([i ** 2 for i in range(1, n+1)])
squareOfSum = sum(range(1, n+1)) ** 2
return squareOfSum - sumOfSquare
print(SquareDifference(100))
|
UTF-8
|
Python
| false
| false
| 189
|
py
| 9
|
Problem 6.py
| 9
| 0.661376
| 0.613757
| 0
| 7
| 26.142857
| 54
|
ashish717744/100dayscodingchallenge
| 16,484,084,505,933
|
242fa370f59fdfd9e907e739405d6018692d7fa8
|
33d5078db9fef4e4db28210febb654ddcc824ba3
|
/code5.py
|
e458b0b907d163fb86fd5b8790dec0578fcbe7c8
|
[] |
no_license
|
https://github.com/ashish717744/100dayscodingchallenge
|
b462f40e59ebc8b06bd17464a6b582f7a2805f70
|
2afdf1b124cd9f1d057c84e86e211c7ec9abeb87
|
refs/heads/master
| 2020-05-07T13:34:44.599224
| 2019-04-11T15:32:01
| 2019-04-11T15:32:01
| 180,555,365
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#Question: Complete the script so that it prints out a list slice containing letters a, c, e, g, and i.
letters = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
#Expected output:
#['a', 'c', 'e', 'g', 'i']
keyword =''
for i in letters:
if i =='a' or i=='c' or i=='e' or i=='g' or i=='i':
keyword+=i
print(list(keyword))
|
UTF-8
|
Python
| false
| false
| 351
|
py
| 8
|
code5.py
| 8
| 0.504274
| 0.504274
| 0
| 10
| 33.1
| 104
|
frr717/pythonProjects
| 2,199,023,301,920
|
d079a68a120818a56de053d76dbb7d614a8e370a
|
8be1b6b9ae64c696ec057331b89155954dbb3e1d
|
/chap14/calc.py
|
e24a4782e482eebd43a52400fa609e6dbfb5c993
|
[] |
no_license
|
https://github.com/frr717/pythonProjects
|
d4969ede129b1c54991f133dbd7d7317195b353c
|
eb5afd17bc20331ba8ab40dac11b3c0ea70c924e
|
refs/heads/main
| 2023-05-07T16:40:03.368384
| 2021-05-11T02:08:47
| 2021-05-11T02:08:47
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# Editor: aaaasubing
# DevelopmentTime: 2021/4/29 9:13
def add(a, b):
return a + b
def div(a, b):
return a / b
# 如何去导入自定义模块
|
UTF-8
|
Python
| false
| false
| 156
|
py
| 120
|
calc.py
| 118
| 0.610294
| 0.536765
| 0
| 11
| 11.363636
| 33
|
MTNDStudiosXD/Xenonite-Bot
| 13,838,384,638,726
|
ce104392799cab10cd0c2767fc13c852dbd3d9d3
|
5d35c0e146e6c71a5ce81dcb16b048fce7bfd1a7
|
/main.py
|
95ba68073e356f721aed1d74ba076b1fb3afca05
|
[] |
no_license
|
https://github.com/MTNDStudiosXD/Xenonite-Bot
|
f6a9b435fa6d044edcebd58c8b91d6290c5fb6b2
|
46425116d99f3be071e60d2928af34dba4c03697
|
refs/heads/master
| 2020-12-31T09:41:18.969541
| 2020-02-07T17:16:33
| 2020-02-07T17:16:33
| 238,983,313
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import discord
from discord.ext import commands
import asyncio
import keep_alive
import os
#------------------------------------
from discord.ext import *
from discord.ext import commands
#-------------------------------------
print("Bot loading...")
bot = commands.Bot(command_prefix='x!') #prefix
bot = commands.Bot(command_prefix='oof') #prefix
bot.remove_command:help
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Streaming(name="wtf do u want", url="https://www.youtube.com/watch?v=WtKWtY5cyiU"))
print(f"Bot is online!") # Shows on the console that the bot is working.
@bot.command(pass_text=True)
async def gay(ctx):
await ctx.send('no u')
print ("Typing no u....")
@bot.command(pass_text=True)
@commands.has_permissions(administrator=True)
async def clear(ctx, amount=5):
await ctx.channel.purge(limit=amount)
print ("Purging Channel....")
await ctx.send ('Cleared.')
@bot.command(pass_text=True)
@commands.has_permissions(manage_messages=True)
async def nuke(ctx, amount=100000000000):
await ctx.channel.purge(limit=amount)
print ("Nuking Channel....")
await ctx.send ('Channel Nuked.')
@bot.command(pass_text=True)
async def ban(ctx, member):
await ctx.send ("**This Command Has Been ``||Permenantely||`` Removed, It is NOT Working and It has plenty of Errors.**")
@bot.command(pass_text=True)
async def kick(ctx, member):
await ctx.send ("**This Command Has Been ``||Permenantely||`` Removed, It is NOT Working and It has plenty of Errors.**")
@bot.command(pass_text=True)
async def say(ctx, message):
await ctx.message.delete()
await ctx.send(message)
keep_alive.keep_alive()
token = os.environ.get("BOT_TOKEN_HERE")
bot.run(token, bot= True, reconnect=True)
|
UTF-8
|
Python
| false
| false
| 1,816
|
py
| 6
|
main.py
| 2
| 0.664097
| 0.656388
| 0
| 57
| 29.877193
| 128
|
visw2290/Sel-python
| 25,769,805,486
|
0531ac5dda4601a987d611512df1f830d1885db9
|
7904467d003db1a9e482235e1967e215b7363f50
|
/working_with_elements/scroll_yahoo.py
|
b87826224b0bd72984e4b6c934c0a024bd95da7c
|
[] |
no_license
|
https://github.com/visw2290/Sel-python
|
fd4661b2530458032d6429a6f0c6f6d6b45dee18
|
ea0075bf8b160a183686d8db4f54b7411c3530fa
|
refs/heads/master
| 2021-07-07T14:00:07.437111
| 2017-10-03T07:04:02
| 2017-10-03T07:04:02
| 105,622,417
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from selenium import webdriver
import time
class Scrollyahoo():
def test_scroll_yahoo(self):
url = 'https://yahoo.com'
driver = webdriver.Firefox()
driver.maximize_window()
driver.get(url)
driver.implicitly_wait(3)
driver.execute_script("window.scrollBy(0,2000);")
driver.execute_script("window.scrollBy(0,-2000);")
element = driver.find_element_by_xpath("//h2[contains(text(),'MMA Bangalore, Karnataka')]")
driver.execute_script("arguments[0].scrollIntoView(true);",element)
driver.execute_script("window.scrollBy(0,-250);")
time.sleep(4)
driver.execute_script("window.scrollBy(0,-2000);")
location = element.location_once_scrolled_into_view
print('Location = ' + str(location))
time.sleep(2)
driver.quit()
sy = Scrollyahoo()
sy.test_scroll_yahoo()
|
UTF-8
|
Python
| false
| false
| 888
|
py
| 52
|
scroll_yahoo.py
| 51
| 0.635135
| 0.608108
| 0
| 29
| 29.655172
| 99
|
ongbt/VisionaireDiscord
| 7,559,142,450,544
|
a477e095e355d5ed0fd54fd5985d23c1a75d12eb
|
c25d523e01b371a12595ea222aaea44137935cb2
|
/src/com/squeakysnail/visionairediscord/bot.py
|
f8e8efc2a4e2c2680b5a9281efd24e99c863f796
|
[] |
no_license
|
https://github.com/ongbt/VisionaireDiscord
|
f7e3dc7161ffe1efdc51fb360927643f8715a120
|
53e35a899f28d74a724e989327e42cdbe8e47a9b
|
refs/heads/master
| 2021-06-23T05:11:37.309564
| 2017-08-06T09:09:54
| 2017-08-06T09:09:54
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on 5 Aug 2017
@author: bengt
'''
import pandas
import discord
import asyncio
from bs4 import BeautifulSoup
import requests
import re
import datetime
if __name__ == '__main__':
print('Visionaire Discord Bot')
url = "http://www.mysg-property.com/the-visionaire-ec.html"
lastCheckedTime = datetime.datetime.now()
unitCount = ""
checkInterval = 8
client = discord.Client()
topDate = datetime.datetime(2018, 10, 31, 0,0,0)
data = ""
lastSoldUnit = ""
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
global lastCheckedTime
global unitCount
global lastSoldUnit
global data
currentTime = datetime.datetime.now()
timeDelta = currentTime - lastCheckedTime
hours, remainder = divmod(timeDelta.seconds, 3600)
if message.content == '!help':
await client.send_message(message.channel, "!units:\t\tUnits Left.\n!top:\t\tTime to TOP\n!sold:\t\tLast Sold Unit.")
if message.content == '!v' or message.content == '!units':
isCached = "!"
if hours>checkInterval or data=="":
lastCheckedTime = datetime.datetime.now()
await client.send_message(message.channel, 'Checking Visionaire units...')
r = requests.get(url)
data = r.text
isCached = ""
soup = BeautifulSoup(data)
for link in soup.find_all(text=re.compile("Last(.*)units")):
unitCount = link
await client.send_message(message.channel, unitCount + isCached)
if message.content == '!sold':
if hours>checkInterval or data=="":
lastCheckedTime = datetime.datetime.now()
await client.send_message(message.channel, 'Checking Visionaire units...')
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data)
for link in soup.find_all(text=re.compile("Recently SOLD units:")):
lastSoldUnit = link.parent.parent.nextSibling.nextSibling.nextSibling + ""
await client.send_message(message.channel, lastSoldUnit)
if message.content.startswith('!cache'):
#timeToNextCheck = lastCheckedTime -timeDelta + checkInterval
await client.send_message(message.channel, "Last checked: "+ str(lastCheckedTime) +"]" + " ["+ str(checkInterval-hours) +" hours before next check.]")
if message.content == '!cache!':
data = ""
await client.send_message(message.channel, "Cache invalidated")
if message.content == '!top':
timeToTop = topDate - currentTime
await client.send_message(message.channel, "Time to TOP: "+ str(timeToTop.days) +" days!\nEstimated TOP: " + str(topDate))
#client.run('MzQzMjQ0MDA0MDU5NjQzOTA2.DGgAOA.p41VGt3_wqKfkkD-acv432i2nIE')
client.run('MzQzMjk4NDY0NDI5Mzc1NDk5.DGcJJw.Bv_hjLODcpnJ8WtczjrPcApjxMs')
|
UTF-8
|
Python
| false
| false
| 2,976
|
py
| 5
|
bot.py
| 1
| 0.646169
| 0.633065
| 0
| 82
| 35.292683
| 158
|
atom-chen/Tools-2
| 10,239,202,033,913
|
52bf5978823e91232850ea3f0bccdb164ecd04ee
|
45b32ffcdc7ac3864c0b810b61deeee136616554
|
/ProtocolAnalysis/src/ProtocolAnalysis/UnitTest/TestFuncPtr.py
|
79e3f370a522c883130d2a7fca5254ce87d93b51
|
[] |
no_license
|
https://github.com/atom-chen/Tools-2
|
812071cf6ab3e5a22fb13e4ffdc896ac03de1c68
|
0c41e12bd7526d2e7bd3328b82f11ea1b4a93938
|
refs/heads/master
| 2020-11-29T10:05:24.253448
| 2017-07-12T06:05:17
| 2017-07-12T06:05:17
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
class TestFuncPtr(object):
'''
classdocs
测试函数指针
'''
def __init__(self):
'''
Constructor
'''
def funcHandle(self, hello):
print("funcHandle called")
@staticmethod
def staticFuncHandle(self, hello):
print("hello")
class TestFuncObj(object):
def __init__(self):
self.m_obj = None
self.m_funcPtr = None
self.m_sFuncPtr = None
def run(self):
self.m_obj = TestFuncPtr()
self.m_funcPtr = self.m_obj.funcHandle
#self.m_funcPtr(self.m_obj, "hello")
self.m_sFuncPtr = TestFuncPtr.staticFuncHandle
self.m_sFuncPtr(self.m_obj, "hello")
|
UTF-8
|
Python
| false
| false
| 796
|
py
| 903
|
TestFuncPtr.py
| 644
| 0.497449
| 0.496173
| 0
| 42
| 17.571429
| 54
|
rodolfoksveiga/web-player
| 1,554,778,169,345
|
0008cc980d972b5a3d6076e30cd86f295206fefc
|
d167b6205a5f5ccf66ac6669ee99dbeab33a9316
|
/videos/views.py
|
0d96880f515878411629d403d31bb0786614b553
|
[] |
no_license
|
https://github.com/rodolfoksveiga/web-player
|
f31b8070a4106064fb365f47ec663be79e3a1398
|
911768442baad932c8a40fe2c38858ffdde09a1c
|
refs/heads/main
| 2023-06-19T21:38:05.245606
| 2021-07-07T16:39:01
| 2021-07-07T16:39:01
| 358,970,130
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
import requests
import vimeo
from .models import Video
from .forms import VideoForm
def videos_view(request, *args, **kwargs):
queryset = Video.objects.all()
context = {
'obj_list': queryset
}
return render(request, 'video/videos.html', context)
def play_video_view(request, id):
client = vimeo.VimeoClient(
key='',
secret=''
)
vimeo_authorization_url = client.auth_url(
scope=['public', 'private'],
redirect='http://localhost:8000/videos/',
state='state'
)
try:
code = request.GET.get('code')
token, user, scop = client.exchange_code(
code,
'http://localhost:8000/videos/'
)
print(token)
except:
print('BAD!')
# get video's iframe
# response = client.get('/videos/538327228')
# iframe = response.json().get('embed').get('html')
# update video's information
# client.patch(
# '/videos/538327228',
# data={
# 'name': 'NEW Test Video 1',
# 'description': 'NEW This is a test.'
# }
# )
obj = get_object_or_404(Video, id=id)
context = {
'obj': obj,
'auth': vimeo_authorization_url,
}
return render(request, 'video/play_video.html', context)
@login_required(login_url='login')
def create_video_view(request, *args, **kwargs):
form = VideoForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('../')
context = {
'form': form
}
return render(request, 'video/create_video.html', context)
@login_required(login_url='login')
def update_video_view(request, id, *args, **kwargs):
obj = get_object_or_404(Video, id=id)
values = {
'title': obj.title,
'url': obj.url
}
form = VideoForm(request.POST or None, initial=values, instance=obj)
if form.is_valid():
form.save()
return redirect('../')
context = {
'form': form
}
return render(request, 'video/update_video.html', context)
@login_required(login_url='login')
def delete_video_view(request, id, *args, **kwargs):
obj = get_object_or_404(Video, id=id)
if request.method == 'POST':
obj.delete()
return redirect('../../')
context = {
'obj': obj
}
return render(request, 'video/delete_video.html', context)
|
UTF-8
|
Python
| false
| false
| 2,516
|
py
| 15
|
views.py
| 5
| 0.584658
| 0.569157
| 0
| 97
| 24.938144
| 72
|
rishabdesai7/CollegeConnect
| 10,814,727,683,718
|
2c6ea8e5001561d95a30fee54a6b9738cce90130
|
1d0fef589acc5ffe2969e6763a224bc97da74104
|
/colcon/migrations/0003_auto_20200210_1804.py
|
2ff43a9b24abfc5bd76a3dc1c80f86520148f754
|
[] |
no_license
|
https://github.com/rishabdesai7/CollegeConnect
|
ae3c5f35259f2fa844fbc9031029401f0ecf08e9
|
204480bffb981da9f400112e72e918c8a4537fdd
|
refs/heads/master
| 2022-04-12T04:01:07.547950
| 2020-03-03T15:49:29
| 2020-03-03T15:49:29
| 229,413,010
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 2.2.10 on 2020-02-10 12:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('colcon', '0002_auto_20200210_1603'),
]
operations = [
migrations.RenameField(
model_name='userdetails',
old_name='mather_phno',
new_name='mother_phno',
),
migrations.RemoveField(
model_name='userdetails',
name='password',
),
]
|
UTF-8
|
Python
| false
| false
| 488
|
py
| 22
|
0003_auto_20200210_1804.py
| 21
| 0.557377
| 0.491803
| 0
| 22
| 21.181818
| 48
|
vjcitn/BiocHookChecks
| 10,625,749,133,108
|
3a610c92134ee7790bfbe650c3e258f7498e3795
|
a0a6a68bbdf25a353bf8c50ef73ea4d0bf84b58c
|
/inst/repo-specific/git_hook_utilities.py
|
a48879489ff41f07d6290d3245ad081bde3fee0b
|
[
"MIT"
] |
permissive
|
https://github.com/vjcitn/BiocHookChecks
|
6c0fc7530af58793ff3d61f94df9dc04d7fee450
|
175d3fd0ea4acdf60bbe9d92e9ca1f12a38e3404
|
refs/heads/main
| 2023-08-24T21:22:09.136068
| 2021-10-19T13:14:35
| 2021-10-19T13:14:35
| 418,923,350
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
Bioconductor hook utilities
"""
import subprocess
from os import path
HOOKS_CONF = "file:///home/git/repositories/admin/hook_maintainer.git"
LOCAL_HOOKS_CONF = "file:////Users/ni41435_lca/Documents/bioc/hook_maintainer.git"
def indent_xml(elem, level=0):
"""
Recursive function to indent xml entry in RSS feed.
"""
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
# Recurse (aka leap of faith)
for elem in elem:
indent_xml(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_hooks_conf():
"""This function does a simple 'git archive' clone process of
hooks.conf.
It clones the file in the /tmp directory. This function ignores
the '#' characters in the file.
"""
# NOTE: Change to HOOKS_CONF to LOCAL_HOOKS_CONF when testing
cmd = "git archive --remote=" + HOOKS_CONF + " HEAD hooks.conf | tar -x"
subprocess.check_output(cmd, shell=True, cwd="/tmp")
if path.exists("/tmp/hooks.conf"):
with open("/tmp/hooks.conf") as f:
txt = f.read()
txt = txt.splitlines()
# Ignore '#' in the file
conf = "\n".join([line for line in txt
if not line.startswith("#")])
return conf
|
UTF-8
|
Python
| false
| false
| 1,557
|
py
| 11
|
git_hook_utilities.py
| 9
| 0.584457
| 0.579961
| 0
| 52
| 28.942308
| 82
|
JoaoFiorelli/ExerciciosCV
| 14,310,831,061,579
|
13d55b822e11f1cf9f42a03644650bea05611b5b
|
47020f6145548dbd1bef6bd9a79d9b473375d567
|
/Ex077.py
|
8c0d03892a7c1db70a4507cdc4d0f1a9c146467e
|
[] |
no_license
|
https://github.com/JoaoFiorelli/ExerciciosCV
|
59ebd0556e5cee6787ac3a2fd476affc854eaac6
|
f6eba3c0a79f1bf249d48c0d07a2653bf16f0513
|
refs/heads/master
| 2023-03-12T04:03:10.811352
| 2021-03-04T00:07:45
| 2021-03-04T00:07:45
| 335,791,211
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
palavras = ("casa", "queijo", "sapeca",
"politica", "armazenar", "amaciante",
"louro", "idiota", "intenso")
for p in palavras:
print(f"\nNa palavra {p} temos as vogais ", end = "")
for letra in p:
if letra in "aeiou":
print(f"{letra} ", end = "")
|
UTF-8
|
Python
| false
| false
| 301
|
py
| 101
|
Ex077.py
| 100
| 0.511628
| 0.511628
| 0
| 10
| 29.1
| 57
|
City-of-Helsinki/open-city-profile
| 1,408,749,310,661
|
4b3b091aaafdca0d695aafc481ba609daa42e24b
|
9cf7a03963e0db10d73930322331be2754ed5e2e
|
/profiles/tests/conftest.py
|
bac02a8966d019e40064e470208ea98dcb0dc513
|
[
"MIT"
] |
permissive
|
https://github.com/City-of-Helsinki/open-city-profile
|
b338fbea74dea90034f75b9dd26172f968e752f9
|
b26bb4a7e1c7fc940e52a4868f03d71131819f95
|
refs/heads/develop
| 2023-06-11T05:34:02.486215
| 2023-06-06T10:37:42
| 2023-06-06T10:54:24
| 133,320,223
| 8
| 10
|
MIT
| false
| 2023-06-06T12:04:46
| 2018-05-14T07:15:50
| 2023-01-15T12:47:53
| 2023-06-06T12:03:01
| 1,593
| 7
| 8
| 6
|
Python
| false
| false
|
from datetime import timedelta
import pytest
from django.utils import timezone as django_timezone
from pytest_factoryboy import register
from open_city_profile.tests.conftest import * # noqa
from profiles.models import _default_temporary_read_access_token_validity_duration
from profiles.schema import profile_updated
from profiles.tests.factories import (
AddressDataDictFactory,
EmailDataDictFactory,
PhoneDataDictFactory,
ProfileDataDictFactory,
ProfileFactory,
SensitiveDataFactory,
TemporaryReadAccessTokenFactory,
VerifiedPersonalInformationFactory,
)
from services.tests.factories import (
ServiceClientIdFactory,
ServiceConnectionFactory,
ServiceFactory,
)
@pytest.fixture
def profile():
return ProfileFactory(
user=UserFactory() # noqa: F405 Name may be defined from star imports
)
@pytest.fixture
def profile_with_sensitive_data():
return SensitiveDataFactory().profile
@pytest.fixture
def profile_with_verified_personal_information():
return VerifiedPersonalInformationFactory().profile
@pytest.fixture
def profile_data():
return ProfileDataDictFactory()
@pytest.fixture
def email_data(primary=True):
return EmailDataDictFactory(primary=primary)
@pytest.fixture
def phone_data(primary=False):
return PhoneDataDictFactory(primary=primary)
@pytest.fixture
def address_data(primary=False):
return AddressDataDictFactory(primary=primary)
@pytest.fixture
def profile_updated_listener(mocker):
profile_updated_listener = mocker.MagicMock()
profile_updated.connect(profile_updated_listener)
return profile_updated_listener
# Register factory fixtures
register(ServiceFactory)
register(ServiceConnectionFactory)
register(ServiceClientIdFactory)
@pytest.fixture
def profile_service():
return ServiceFactory(name="profile-service", is_profile_service=True)
@pytest.fixture(autouse=True)
def disable_audit_log(settings):
settings.AUDIT_LOG_TO_LOGGER_ENABLED = False
settings.AUDIT_LOG_TO_DB_ENABLED = False
VERIFIED_PERSONAL_INFORMATION_ADDRESS_FIELD_NAMES = {
"permanent_address": ["street_address", "postal_code", "post_office"],
"temporary_address": ["street_address", "postal_code", "post_office"],
"permanent_foreign_address": [
"street_address",
"additional_address",
"country_code",
],
}
VERIFIED_PERSONAL_INFORMATION_ADDRESS_TYPES = (
VERIFIED_PERSONAL_INFORMATION_ADDRESS_FIELD_NAMES.keys()
)
class TemporaryProfileReadAccessTokenTestBase:
def create_expired_token(self, profile):
over_default_validity_duration = (
_default_temporary_read_access_token_validity_duration()
+ timedelta(seconds=1)
)
expired_token_creation_time = (
django_timezone.now() - over_default_validity_duration
)
token = TemporaryReadAccessTokenFactory(
profile=profile, created_at=expired_token_creation_time
)
return token
|
UTF-8
|
Python
| false
| false
| 2,998
|
py
| 207
|
conftest.py
| 180
| 0.741494
| 0.74016
| 0
| 114
| 25.298246
| 82
|
nbonamy/downtobox
| 2,637,109,928,161
|
1c77256403f6d7219b3993d7ee70a4bae55c832f
|
7944faf34531f3710f6284fd62de39f7e4c552f5
|
/src/test.py
|
aa7a0a2ec1aebce31794b5a042f8a1d06e172288
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/nbonamy/downtobox
|
c285281397a80a46f6cb40c026d6d1d3d21e45cc
|
1e78654c8e729b58809036a64ed1e0e8dafa98e7
|
refs/heads/master
| 2023-08-21T22:03:45.715687
| 2023-07-22T02:21:25
| 2023-07-22T02:21:25
| 208,615,478
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
import unittest
import utils
# test data
filenames = {
'Interstellar.mkv': 'Interstellar',
'Interstellar.avi': 'Interstellar',
'Interstellar.720p.mkv': 'Interstellar',
'Interstellar.MULTI.mkv': 'Interstellar',
'Interstellar.VOSTFR.mkv': 'Interstellar',
'Interstellar.VOST.mkv': 'Interstellar',
'Interstellar.2016.mkv': 'Interstellar',
'Interstellar.MULTI.1080p.mkv': 'Interstellar',
'Interstellar.2016.1080p.mkv': 'Interstellar',
'Interstellar.2016.MULTI.mkv': 'Interstellar',
'Blade.Runner.mkv': 'Blade Runner',
'Blade.Runner.avi': 'Blade Runner',
'Blade.Runner.720p.mkv': 'Blade Runner',
'Blade.Runner.MULTI.mkv': 'Blade Runner',
'Blade.Runner.VOSTFR.mkv': 'Blade Runner',
'Blade.Runner.VOST.mkv': 'Blade Runner',
'Blade.Runner.1982.mkv': 'Blade Runner',
'Blade.Runner.MULTI.1080p.mkv': 'Blade Runner',
'Blade.Runner.1982.1080p.mkv': 'Blade Runner',
'Blade.Runner.1982.MULTI.mkv': 'Blade Runner',
'Blade.Runner.1982.MULTI.1080p.mkv': 'Blade Runner',
'Blade.Runner.1982.MULTI.1080p.mkv': 'Blade Runner',
'Blade.Runner.1982.1982.mkv': 'Blade Runner 1982',
'Blade.Runner.1982.2019.mkv': 'Blade Runner 1982',
'Blade.Runner.1982.2049.mkv': 'Blade Runner 1982',
'Blade.Runner.2049.mkv': 'Blade Runner 2049',
'Blade.Runner.2049.MULTI.1080p.mkv': 'Blade Runner 2049',
'Blade.Runner.2049.1080p.mkv': 'Blade Runner 2049',
'Blade.Runner.2049.2017.MULTI.1080p.mkv': 'Blade Runner 2049',
'Foundation.S01E01.4K.MULTI.2160p.HDR.WEB.H265-EXTREME.mkv': 'Foundation S01E01',
'C\'est.comme.ça.2018.FRENCH.720p.mkv': 'C\'est comme ça',
'L\'élève.Ducobu.TRUEFRENCH.720p.mkv': 'L\'élève Ducobu',
}
# run test
class TestParseFilenameMethod(unittest.TestCase):
def testFilename(self):
for filename in filenames:
title = utils.extractTitle(filename)
self.assertEqual(filenames[filename], title)
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false
| false
| 1,928
|
py
| 15
|
test.py
| 10
| 0.703434
| 0.613424
| 0
| 50
| 37.44
| 83
|
gteu/AtcoderCheatsheet
| 19,198,503,826,075
|
9937af98d8af293d52c5d4536fe5350539536f3a
|
ed9e7329a61151418d16c30111ac3a2eb26afa06
|
/problems/abc/abc246/f.py
|
49a54e064472441d42d986f255d0c145533351c4
|
[] |
no_license
|
https://github.com/gteu/AtcoderCheatsheet
|
e6667ee822b3dd54f8304e70b8efa9b2cfd8047d
|
ec77adb14f5e71de705f284d2af2a6b79ab69e8c
|
refs/heads/main
| 2022-11-13T14:37:31.088246
| 2022-07-04T03:27:51
| 2022-07-04T03:27:51
| 260,360,907
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
MOD = 998244353
N, L = map(int, input().split())
S = []
for _ in range(N):
n = 0
for s in input():
n += 2 ** (ord(s) - ord('a'))
S.append(n)
ans = 0
for i in range(1, 2 ** N):
cur = 2 ** 26 - 1
c = 0
for j in range(N):
if i >> j & 1:
cur &= S[j]
c += 1
cnt = bin(cur).count('1')
if c % 2 == 0:
ans -= pow(cnt, L, MOD)
else:
ans += pow(cnt, L, MOD)
ans %= MOD
print(ans)
|
UTF-8
|
Python
| false
| false
| 468
|
py
| 313
|
f.py
| 312
| 0.401709
| 0.350427
| 0
| 25
| 17.72
| 37
|
sonsus/albert_paraphrase
| 13,159,779,823,950
|
772e58c6dd6fa03f4364afbb0d76a1e15debe14e
|
ce981b62c016f3df63f0b3365a30b55e50b43fed
|
/data/prepdata_test.py
|
9dd9a50271f4d8c9e3f67aa4d4b302b6f2f4316b
|
[] |
no_license
|
https://github.com/sonsus/albert_paraphrase
|
7a928ff26b161c57d148c650b5ec415144ff66b0
|
50950c1183b7bc0d53bf95896f7ab9ef73e0b0dc
|
refs/heads/main
| 2023-02-10T08:37:45.847067
| 2021-01-06T15:17:47
| 2021-01-06T15:17:47
| 312,601,905
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from fire import Fire
import csv
import jsonlines as jsl
import json
from collections import defaultdict
from munch import Munch
def open_vocab(vocabpath):
vocab= Munch(json.load(open(vocabpath)))
return vocab
def tokenize(vocab, inputs):
return [vocab.stoi[t] for t in inputs]
def preptest(datapath='./test.csv', vocabpath='./vocab.json'):
processed = defaultdict(list)
vocab = open_vocab(vocabpath)
with open(datapath) as f, jsl.open('test.jsonl', 'w') as tjsl:
# open csv, jsonl files for splits
rawt= csv.reader(f)
processedt = [ (int(id), s1.split(), s2.split()) for (id,s1,s2) in rawt if id != 'id' ]
for (id,s1,s2) in processedt:
record = {'id': id, 's1': tokenize(vocab, s1), 's2': tokenize(vocab, s2)}
tjsl.write(record)
if __name__ == '__main__':
Fire(preptest)
'''
python prepdata_test.py
if submission=True: all the train.csv --> train.jsonl
'''
|
UTF-8
|
Python
| false
| false
| 968
|
py
| 27
|
prepdata_test.py
| 8
| 0.627066
| 0.616736
| 0
| 37
| 25.162162
| 95
|
GabrielWallace/ics140-prog-fundamentals
| 14,121,852,515,620
|
8f213d93642437a092e564f12b0434309cc257ec
|
3fe936b4d5880339627259e29a843d86b12a4d7a
|
/Programming Fundamentals/bug_collector.py
|
304329f0bab897714c2b31a12b5db28835e32a9a
|
[] |
no_license
|
https://github.com/GabrielWallace/ics140-prog-fundamentals
|
c7703003cbfbc61a092ecfe4d8b4af28d1f2acda
|
125eb5b600c465160c521d49155be61400f31584
|
refs/heads/master
| 2021-01-23T10:55:09.212707
| 2017-06-01T22:43:06
| 2017-06-01T22:43:06
| 93,105,312
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
bug_count = 0
bug_sum = 0
for i in range(5):
user_input = eval(input("Enter number of bugs for the day "))
bug_count = bug_count + user_input
print(bug_count)
|
UTF-8
|
Python
| false
| false
| 171
|
py
| 36
|
bug_collector.py
| 34
| 0.643275
| 0.625731
| 0
| 6
| 26.833333
| 65
|
kjjeong104/MD_kjjeong
| 10,565,619,559,478
|
b04be2312bdc34fd108b588594fa43e8421ccb68
|
2246e52853c710a72f28a81a81093e3ae3b3dfae
|
/py_development/data_process/sapt_dimer/fit_NCCO_pe_test.py
|
813d5b6915fd29c69982a91028103c14c4ae9c4b
|
[] |
no_license
|
https://github.com/kjjeong104/MD_kjjeong
|
39867e22a7cedcce6a47b90ddd8c6769e3e2f5b8
|
2963031e96926bc7c469e81fee7ff996fdcfaee3
|
refs/heads/master
| 2022-12-28T20:19:50.952020
| 2020-10-10T12:04:35
| 2020-10-10T12:04:35
| 302,845,641
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import print_function
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
from time import gmtime, strftime
from datetime import datetime
import numpy as np
from scipy.optimize import curve_fit
# load fake pdb file. Later, new modeller will be defined, and positions will be updated
# do single point energy calculation for mm-relaxed geometries, to calculate residual PE
temperature=300*kelvin
pdbtemp="qm_HCCO_0.pdb"
ffin="ff_onlyNCCO.xml" #ff with only HCCO dihedral
pdb = PDBFile(pdbtemp)
strdir = ''
#infile=sys.argv[1]
#odata=numpy.loadtxt(infile)
#x=odata[:,0]
#targety=odata[:,1]
integ_md = LangevinIntegrator(temperature, 1/picosecond, 0.001*picoseconds)
pdb.topology.loadBondDefinitions('sapt_residues_choline.xml')
pdb.topology.createStandardBonds();
modeller = Modeller(pdb.topology, pdb.positions)
forcefield = ForceField(ffin)
modeller.addExtraParticles(forcefield)
system = forcefield.createSystem(modeller.topology, constraints=None, rigidWater=True)
torsion = [f for f in [system.getForce(i) for i in range(system.getNumForces())] if type(f) == RBTorsionForce][0]
for i in range(system.getNumForces()):
f = system.getForce(i)
type(f)
f.setForceGroup(i)
totmass = 0.*dalton
for i in range(system.getNumParticles()):
totmass += system.getParticleMass(i)
simmd = Simulation(modeller.topology, system, integ_md)
for a in range(0,360,3):
uppdb = PDBFile("mm_NCCO_"+str(a)+".pdb")
modeller2=Modeller(modeller.topology,uppdb.positions)
simmd.context.setPositions(modeller2.positions)
state = simmd.context.getState(getEnergy=True,getForces=True,getPositions=True)
#position = state.getPositions()
print('NCCO dih angle :',a,' PE contrib : '+str(state.getPotentialEnergy()))
#test changing torsion parameters on-the-fly
print(torsion.getNumTorsions())
c0,c1,c2,c3,c4,c5=8.4,1.0,1.0,1.0,1.0,1.0
p1,p2,p3,p4=0,13,16,19
torsion.setTorsionParameters(0,p1,p2,p3,p4,c0,c1,c2,c3,c4,c5)
torsion.updateParametersInContext(simmd.context)
for a in range(0,360,3):
uppdb = PDBFile("mm_NCCO_"+str(a)+".pdb")
modeller2=Modeller(modeller.topology,uppdb.positions)
simmd.context.setPositions(modeller2.positions)
state = simmd.context.getState(getEnergy=True,getForces=True,getPositions=True)
#position = state.getPositions()
print('NCCO dih angle :',a,' PE contrib : '+str(state.getPotentialEnergy()))
#def function(x,c0,c1,c2,c3,c4,c5):
# y=#PES as function of x, using parameters c0~c5
# return y
#new_coeffs,cov=curve_fit(function,x,targety,method='lm')
#print(new_coeffs)
print('Done!')
exit()
|
UTF-8
|
Python
| false
| false
| 2,619
|
py
| 362
|
fit_NCCO_pe_test.py
| 177
| 0.752577
| 0.723559
| 0
| 78
| 32.576923
| 113
|
Groestlcoin/c-lightning-plugin-collection
| 377,957,145,361
|
bd54e1d141b5ba863e4f72d801acb842ab075e9a
|
edb1c2683b031fbcb51f514dbeba27c2e62473c9
|
/simpleFundsOverview/funds.py
|
f3f2be4854e10e061b059e3a2c2d0865bb4b8941
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
https://github.com/Groestlcoin/c-lightning-plugin-collection
|
d9e760414aceeab69b9ac76e034888a5b1946632
|
a2cb8b4ca49bc2112be69b9b2a4bb5521e0bb62d
|
refs/heads/master
| 2020-04-16T11:28:15.945997
| 2019-01-14T11:30:12
| 2019-01-14T11:30:12
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
""" This plugin gives you a nicer overview of the funds that you own.
Instead of calling listfunds and adding all outputs and channels
this plugin does that for you.
Activate the plugin with:
`lightningd --plugin=PATH/TO/LIGHTNING/contrib/plugins/funds/funds.py`
Call the plugin with:
`lightning-cli funds`
The standard unit to depict the funds is set to gros.
The unit can be changed by and arguments after `lightning-cli funds`
for each call. It is also possible to change the standard unit when
starting lightningd just pass `--funds_display_unit={unit}` where
unit can be gro for gro, groestls for groestls, mGRS for milliGroestlcoin and GRS for GRS.
"""
import json
from lightning.lightning import LightningRpc
from lightning.plugin import Plugin
from os.path import join
rpc_interface = None
plugin = Plugin(autopatch=True)
unit_aliases = {
"groestlcoin": "GRS",
"grs": "GRS",
"gro": "gro",
"gros": "gro",
"groestl": "groestls",
"groestls": "groestls",
"milli": "mGRS",
"mgrs": "mGRS",
"milligrs": "mGRS",
"GRS": "GRS",
"GRO": "gro",
"m": "mGRS",
}
unit_divisor = {
"gro": 1,
"groestls": 100,
"mGRS": 100*1000,
"GRS": 100*1000*1000,
}
@plugin.method("funds")
def funds(unit=None, plugin=None):
"""Lists the total funds the lightning node owns off- and onchain in {unit}.
{unit} can take the following values:
gro, GRO, gros to depict gro
groestls, groestl to depict groestls
mGRS, mgrs, milli, milligrs, m to depict mGRS
GRS, groestlcoin, grs to depict GRS
When not using gros (default) the comma values are rounded off."""
plugin.log("call with unit: {}".format(unit), level="debug")
if unit is None:
unit = plugin.get_option("funds_display_unit")
if unit != "G":
unit = unit_aliases.get(unit.lower(), "gro")
else:
unit = "GRS"
div = unit_divisor.get(unit, 1)
funds = rpc_interface.listfunds()
onchain_value = sum([int(x["value"]) for x in funds["outputs"]])
offchain_value = sum([int(x["channel_sat"]) for x in funds["channels"]])
total_funds = onchain_value + offchain_value
return {
'total_'+unit: total_funds//div,
'onchain_'+unit: onchain_value//div,
'offchain_'+unit: offchain_value//div,
}
@plugin.method("init")
def init(options, configuration, plugin):
global rpc_interface
plugin.log("start initialization of the funds plugin", level="debug")
basedir = configuration['lightning-dir']
rpc_filename = configuration['rpc-file']
path = join(basedir, rpc_filename)
plugin.log("rpc interface located at {}".format(path))
rpc_interface = LightningRpc(path)
plugin.log("Funds Plugin successfully initialezed")
plugin.log("standard unit is set to {}".format(
plugin.get_option("funds_display_unit")), level="debug")
# set the standard display unit to satoshis
plugin.add_option('funds_display_unit', 's',
'pass the unit which should be used by default for the simple funds overview plugin')
plugin.run()
|
UTF-8
|
Python
| false
| false
| 3,111
|
py
| 1
|
funds.py
| 1
| 0.666024
| 0.658309
| 0
| 107
| 28.074766
| 103
|
TardC/UDP_Chatroom
| 5,695,126,651,787
|
bf2f52f15e9820adca6c890f4ca3653b3bf5701a
|
6350899085abea5195835baab2fe1e96b7ee3f19
|
/client/client_c.py
|
5eaa3e7fbef9a3873b789aa263b4c7c11c2e8330
|
[] |
no_license
|
https://github.com/TardC/UDP_Chatroom
|
8bdaa5e499a655a9144a751cc87962620fbf22ec
|
bef2f777b7f67dca0f1fd4afb77b56d61c7ad271
|
refs/heads/master
| 2021-01-20T13:56:43.251321
| 2017-05-07T15:43:36
| 2017-05-07T15:43:36
| 90,542,209
| 2
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
from socket import *
def init():
client_c_name = gethostname()
client_c_ip = gethostbyname(client_c_name)
client_c_port = 8886
client_c_address = (client_c_ip, client_c_port)
client_c = socket(AF_INET, SOCK_DGRAM)
client_c.bind(client_c_address)
return client_c
def main():
client_c = init()
server_ip = '192.168.131.1'
server_port = 8888
server_address = (server_ip, server_port)
while True:
message = raw_input("Message>> ")
client_c.sendto(message, server_address)
if message == '-q':
break
client_c.close()
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false
| false
| 663
|
py
| 4
|
client_c.py
| 3
| 0.586727
| 0.558069
| 0
| 33
| 19.090909
| 51
|
fabric-testbed/CredentialManager
| 16,441,134,812,886
|
84eacb7d1073e13ac0d412068c823b8da41f97c2
|
3538fa5a4523e309292a6f2afd214d8fb74b252d
|
/fabric_cm/__init__.py
|
9d72f7b512d291c023105487aa05df7cca14c51f
|
[
"MIT"
] |
permissive
|
https://github.com/fabric-testbed/CredentialManager
|
81c16e6edea549854cbf6dad7d7ec5c8f261ec9e
|
0c211e7b1efeead371100019da5574c6d8c770ea
|
refs/heads/master
| 2023-09-01T19:53:36.443917
| 2023-06-12T18:42:55
| 2023-06-12T18:42:55
| 242,810,981
| 1
| 0
|
MIT
| false
| 2023-09-11T20:07:01
| 2020-02-24T18:26:43
| 2022-04-26T19:54:17
| 2023-09-11T20:07:00
| 2,792
| 1
| 0
| 3
|
Python
| false
| false
|
__version__ = "1.5.0"
__API_REFERENCE__ = "https://github.com/fabric-testbed/CredentialManager"
|
UTF-8
|
Python
| false
| false
| 96
|
py
| 53
|
__init__.py
| 40
| 0.6875
| 0.65625
| 0
| 2
| 47
| 73
|
NurymKenzh/ELake
| 9,818,295,279,907
|
3596e7420c1253e20abea9766e9dbc18c9f831c3
|
40b31a6b1aa0e73e50f7f150c287f76ae58928ac
|
/ELake/Python/MergeShpLayers.py
|
22e2a864daffa481559b75eb5bc3315464c75140
|
[] |
no_license
|
https://github.com/NurymKenzh/ELake
|
5ef98c70e3f765e470c51c0e0eb25761681be933
|
27160ec4a4cc6b1bf2f5a5ac96a5e598bd395f52
|
refs/heads/master
| 2020-03-19T11:00:43.521421
| 2019-05-15T06:47:46
| 2019-05-15T06:47:46
| 136,421,158
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import glob
import shapefile
folder = raw_input()
outfile = raw_input()
files = glob.glob(folder + "/*.shp")
w = shapefile.Writer()
for f in files:
r = shapefile.Reader(f)
w._shapes.extend(r.shapes())
w.records.extend(r.records())
w.fields = list(r.fields)
w.save(outfile)
|
UTF-8
|
Python
| false
| false
| 278
|
py
| 154
|
MergeShpLayers.py
| 9
| 0.690647
| 0.690647
| 0
| 14
| 18.857143
| 36
|
atliSig/isfrost-django
| 9,474,697,875,456
|
1d2674767d2f5cb4f84ae7024ce535cc60dbd6f5
|
4afb48a927ad70765fd100d800d979b529ca7f4b
|
/isfrost_app/migrations/0023_auto_20171218_1709.py
|
b8b9b187421dda84073d9cf98b8040b300a78142
|
[] |
no_license
|
https://github.com/atliSig/isfrost-django
|
305f5ca73573387af56c3adf9d53ff08c5bee753
|
c94ab7d6a84cee0f425fea8a9ac6a9a6f35b47ab
|
refs/heads/master
| 2021-09-10T21:22:41.959655
| 2018-01-12T01:29:45
| 2018-01-12T01:29:45
| 113,074,471
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 2.0 on 2017-12-18 17:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('isfrost_app', '0022_auto_20171218_1650'),
]
operations = [
migrations.AlterModelOptions(
name='company',
options={'verbose_name': 'Fyrirtæki', 'verbose_name_plural': 'Fyrirtækið'},
),
migrations.AddField(
model_name='company',
name='name',
field=models.CharField(default='Ísfrost', max_length=200, verbose_name='nafn'),
),
migrations.AlterField(
model_name='company',
name='phone_number',
field=models.CharField(max_length=8, verbose_name='símanúmer'),
),
]
|
UTF-8
|
Python
| false
| false
| 782
|
py
| 66
|
0023_auto_20171218_1709.py
| 44
| 0.57732
| 0.533505
| 0
| 27
| 27.740741
| 91
|
AlexandreSenpai/video-hub
| 16,140,487,129,506
|
d95889797dbed75331439133aa13bb69941e1c4b
|
37d1e9069fb1f4fbfe46eb075a3bf628c2b5ca53
|
/__init__.py
|
f830ca8a5e8796dda52e76d09d45a17792bb9c29
|
[
"MIT"
] |
permissive
|
https://github.com/AlexandreSenpai/video-hub
|
97e66f54e2a10b31e20e535201083b52ecf2479d
|
d20b580e136d40c742398e27a59748706d09cc81
|
refs/heads/master
| 2020-08-02T15:46:37.193893
| 2019-11-17T18:31:30
| 2019-11-17T18:31:30
| 211,416,012
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask, render_template, request, redirect, session
from utils.download_file import Upload
from utils.storage_utils import Storage
from utils.tk_generator import id_gen
from datetime import datetime
from utils.firebase_utils import firebase
import os
app = Flask(__name__, template_folder='./pages/')
stor = Storage()
upload = Upload()
db = firebase()
@app.route('/', methods=['GET'])
def main_page():
log_msg = session['upload_log'] if 'upload_log' in session.keys() else None
if 'upload_log' in session.keys():
del session['upload_log']
videos = db.limit_videos(48)
recent = db.limit_videos(6)
most_viewed = db.limit_videos(6, u'views')
return render_template('index.html', videos=videos, recent=recent, most_viewed=most_viewed, log={"msg":log_msg})
@app.route('/b/<video>', methods=['GET'])
def dinamic_route(video):
video = db.get_video(video)
another = db.limit_videos(24)
db.update_video(video)
return render_template('player.html', video=video, another=another)
@app.route('/upload_page', methods=['GET'])
def redirect_to_upload_page():
return render_template('upload.html')
@app.route('/upload', methods=['POST'])
def upload_videos():
def manage_data(files):
for file_name in files:
file_url = stor.upload_file(obj.get('id'), file_name)
file_type = file_type = file_name.split('.')[-1]
if file_type in upload.ext.get('video'):
obj['content'] = file_url
else:
obj['thumbnail'] = file_url
db.new_video(obj.get('id'), obj)
session['upload_log'] = 'O upload terminou com sucesso.\n' + str(len(files)) + ' arquivo(s) sucedidos.'
return redirect('/')
def delete_files():
for file in os.listdir('./static/download'):
os.remove('./static/download/'+file)
data = request.files
id = id_gen()
files = []
error = []
ext = []
obj = {
"id": id.generate_id(),
"views": 0,
"name": '',
"thumbnail": 'https://firebasestorage.googleapis.com/v0/b/video-26857.appspot.com/o/resources%2Fno-thumb.jpg?alt=media&token=712f39b4-0220-4ee5-aa99-f4f0df798b00',
"content": '',
"date": datetime.today(),
"creator": ""
}
obj['name'] = request.form.get('vid_name') if request.form.get('vid_name') != '' else 'sem título'
for file in list(data):
file_blob = data.get(file)
file_name = file_blob.filename
file_type = file_name.split('.')[-1]
if file_type not in set(ext):
ext.append(file_type)
response = upload.download_file(file_blob, file_name)
if response == None:
if file_name not in set(files) and file_name != '':
files.append(file_name)
else:
error.append(file_name)
else:
error.append(file_name)
has_vid = [True for f in ext if f in upload.ext['video']]
if error == [] and True in has_vid:
if len(files) < 2 and ext[0] in upload.ext['video']:
manage_data(files)
delete_files()
else:
manage_data(files)
delete_files()
else:
delete_files()
session['upload_log'] = 'O upload terminou com falha.\nLog: 1 ou mais arquivos com erro.'
return redirect('/')
return redirect('/')
@app.route('/join', methods=['GET'])
def join_us_page():
return render_template('join.html')
@app.route('/create_user', methods=['POST'])
def create_user():
data = dict(request.form)
user_email = data.get('email')
user_pass = data.get('password')
db.create_user(user_email, user_pass)
return redirect('/')
if __name__ == '__main__':
app.secret_key = 'nasakuki20'
app.config['SESSION_TYPE'] = 'filesystem'
app.run(debug=True, use_reloader=True)
|
UTF-8
|
Python
| false
| false
| 3,922
|
py
| 14
|
__init__.py
| 5
| 0.592706
| 0.581994
| 0
| 109
| 34.981651
| 171
|
yaront/BMI-MutSig
| 3,135,326,136,798
|
551b64fd09730fb55845b6390397522fb6705630
|
12f74e3cefc01cae4d07155b9915a2b5f2b605d1
|
/scripts/Emdometrial/Statistics/t_test_avg_bmi.py
|
05f4da6dbd260652d22c2c139ed51a172bf4981f
|
[] |
no_license
|
https://github.com/yaront/BMI-MutSig
|
211f684998006104b77e1e8557201b384455b768
|
456dc793ab2dbd955b5cef098fd14539d428de0b
|
refs/heads/master
| 2022-02-22T23:25:16.062334
| 2019-10-06T18:53:47
| 2019-10-06T18:53:47
| 151,335,574
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 7 22:19:05 2018
@author: tomer
"""
#%%
# =================================================
# # T-test for the average BMI of mutaated VS wt
# =================================================
import numpy as np
import pandas as pd
import scipy.stats as st
from itertools import islice
#%%
gene_bmi_mut = pd.read_table('./../../../databases/Endometrial/mutation_gene_bmi/UCEC_bmi_gene_mut.txt', sep = '\t', index_col = 0)
p_value = []
wt_avg = []
mut_avg = []
bmi = gene_bmi_mut.loc['BMI'][:-1]
for index, row in islice(gene_bmi_mut.iterrows(), 2, None):
mut = row[:-1]
wt_bmi = bmi[mut == 0].values
mut_bmi = bmi[mut == 1].values
wt_avg.append(wt_bmi.mean())
mut_avg.append(mut_bmi.mean())
p_value.append(st.ttest_ind(wt_bmi, mut_bmi).pvalue)
#%%
gene_bmi_mut['WT_AVG'] = ['-','-'] + wt_avg
gene_bmi_mut['MUT_AVG'] = ['-','-'] + mut_avg
gene_bmi_mut['P_VALUE'] = ['-','-'] + p_value
gene_bmi_mut = gene_bmi_mut.sort_values(by='P_VALUE')
gene_bmi_mut.to_csv('./../output/bmi_mut_p_value.txt', sep = '\t')
|
UTF-8
|
Python
| false
| false
| 1,108
|
py
| 11
|
t_test_avg_bmi.py
| 9
| 0.546931
| 0.529783
| 0
| 42
| 25.380952
| 131
|
UMNLibraries/experts_dw
| 11,038,065,991,507
|
1029e567e04e15d6081822f5c702d10dcc3489c0
|
9d73e270f44b8893c11d1aafb4c15a0d5b359bae
|
/legacy/populate_umn_dept_pure_org_uuid.py
|
b54f25feb8d1d80b8ec9d0e6b4b75e4153a8e4c9
|
[] |
no_license
|
https://github.com/UMNLibraries/experts_dw
|
55011ee5afda65ac26b57507aec07dd171b85217
|
897c5442fb6a92336247c1372e5387ee7dc60510
|
refs/heads/main
| 2023-04-27T19:08:38.045074
| 2023-04-27T18:34:55
| 2023-04-27T18:34:55
| 81,886,376
| 2
| 0
| null | false
| 2022-04-12T20:03:01
| 2017-02-14T00:28:00
| 2022-01-13T19:08:00
| 2022-04-12T20:03:00
| 2,254
| 2
| 0
| 7
|
Python
| false
| false
|
# One-off to populate umn_dept_pure_org.pure_org_uuid, assumed to be empty.
import db
session = db.session('hotel')
from models import UmnDeptPureOrg, PureOrg
for umn_dept_pure_org in session.query(UmnDeptPureOrg).all():
pure_org = (
session.query(PureOrg)
.filter(PureOrg.pure_id == umn_dept_pure_org.pure_org_id)
.one_or_none()
)
if pure_org is not None:
umn_dept_pure_org.pure_org_uuid = pure_org.pure_uuid
session.add(umn_dept_pure_org)
else:
# Should never happen:
print(umn_dept_pure_org)
session.commit()
|
UTF-8
|
Python
| false
| false
| 550
|
py
| 289
|
populate_umn_dept_pure_org_uuid.py
| 267
| 0.696364
| 0.696364
| 0
| 19
| 27.947368
| 75
|
EduardoSanglard/ProjetoEstudos
| 15,676,630,658,659
|
5083a9c96cb3f375f823125d4618e152240fa1d9
|
50637f643fa6630e42a4e3cbf0e6ed62e24c63f8
|
/EstruturaSequencial/ex2.py
|
443a4c2d139f8a7bd736428ae2829824e005fadf
|
[] |
no_license
|
https://github.com/EduardoSanglard/ProjetoEstudos
|
75f59b86b0c60b82d6d36e9c5c035157f8259936
|
d810c5ddbeb67768189e0783af70245f6d066484
|
refs/heads/master
| 2020-04-13T09:25:20.054921
| 2020-02-16T01:46:42
| 2020-02-16T01:46:42
| 163,110,550
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
numero = input('Informe o numero: ')
try:
print('O numero informado foi ', int(numero))
except ValueError:
print('Voce nao digitou um numero')
|
UTF-8
|
Python
| false
| false
| 151
|
py
| 47
|
ex2.py
| 43
| 0.688742
| 0.688742
| 0
| 6
| 24.333333
| 49
|
akshat343/Python-Programming
| 5,952,824,684,724
|
4aa4df60d88482f8426199d9d4b461528e279a7f
|
a93cfeed4c2a2833f1896bf5f39aa31955f21efe
|
/Algorithms/Greedy Algorithms/Huffman_coding.py
|
c1a5219d01a5ffd77f8a95485af49eb5d149fd23
|
[] |
no_license
|
https://github.com/akshat343/Python-Programming
|
ae83d05408fb67d51d388df22492dfe743596b2a
|
f5a1540770388e49d65536352ce1816c406d5229
|
refs/heads/master
| 2023-08-05T19:58:40.293770
| 2021-10-07T07:25:46
| 2021-10-07T07:25:46
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Author : Robin Singh
Implementation Of Huffman Coding
Huffman coding is a lossless data compression algorithm. In this algorithm, a variable-length code is
assigned to input different characters. The code length is related to how frequently characters are used
Most frequent characters have the smallest codes and longer codes for least frequent characters
Time Complexity : O(nLogn)
"""
class NodeTree:
def __init__(self, left=None, right=None):
self.left = left
self.right = right
def children(self):
return self.left, self.right
def nodes(self):
return self.left, self.right
def __str__(self):
return self.left, self.right
def huffman_coding_greedy(node, left=True, string=''):
if type(node) is str:
return {node: string}
(l, r) = node.children()
d = dict()
d.update(huffman_coding_greedy(l, True, string + '0'))
d.update(huffman_coding_greedy(r, False, string + '1'))
return d
freq = {}
string = input("Entre Your String")
for c in string:
if c in freq:
freq[c] += 1
else:
freq[c] = 1
freq = sorted(freq.items(), key=lambda x: x[1], reverse=True)
nodes = freq
while len(nodes) > 1:
(key1, c1) = nodes[-1]
(key2, c2) = nodes[-2]
nodes = nodes[:-2]
node = NodeTree(key1, key2)
nodes.append((node, c1 + c2))
nodes = sorted(nodes, key=lambda x: x[1], reverse=True)
if __name__ == '__main__':
Code = huffman_coding_greedy(nodes[0][0])
print("Huffman Encoding\nLetter\tFrequency\tCode")
i=0
for (char, frequency) in freq:
print(f" {char}\t\t{freq[i][1]}\t\t\t{Code[char]}")
i+=1
|
UTF-8
|
Python
| false
| false
| 1,663
|
py
| 84
|
Huffman_coding.py
| 81
| 0.630186
| 0.616356
| 0
| 61
| 26.245902
| 104
|
amithasarath/django-vastrakala
| 18,047,452,584,102
|
54b5771b0d33ae4add44cd71914ae9a0f48a9ce2
|
75cb5dc378d91de9abeb7bd2b95d34f35d6abd35
|
/vastrakala/accounts/forms.py
|
c05f0c3e8644405734977eb03fc6bbaa9a4770e3
|
[] |
no_license
|
https://github.com/amithasarath/django-vastrakala
|
3633b6ca4eb8ca268ebb3bcad43a578a7d4ee11d
|
72959f623e2a4f47a09ad3d87bbed22b209f6201
|
refs/heads/master
| 2020-03-29T05:08:13.574918
| 2018-11-12T09:29:05
| 2018-11-12T09:29:05
| 117,645,180
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import forms
from . models import Dealer,Reseller,Customer,SalesOrder
from django.utils.translation import gettext_lazy as _
from django.conf import settings
class DealerForm(forms.ModelForm):
class Meta:
model = Dealer
fields = '__all__'
widgets = {
'dealer_code': forms.TextInput(attrs={
'class': "form-control",
'placeholder' : "Enter Dealer Code"
}),
'dealer_name': forms.TextInput(attrs={
'class': "form-control",
'placeholder': "Enter Dealer Name"
})
}
class ResellerForm(forms.ModelForm):
class Meta:
model = Reseller
fields = '__all__'
widgets = {
'reseller_name': forms.TextInput(attrs={
'class': "form-control"
})
}
class CustomerForm(forms.ModelForm):
class Meta:
model = Customer
fields = '__all__'
widgets = {
'customer_name': forms.TextInput(attrs={
'class': "form-control"
})
}
class SalesOrderForm(forms.ModelForm):
from django.conf import settings
# booking_date = forms.DateField(input_formats=settings.DATE_INPUT_FORMATS)
# def clean(self):
# cleaned_data = super(SalesOrderForm,self).clean()
# client_type = cleaned_data.get("client_type")
# # print client_type
# reseller = cleaned_data.get("reseller")
# # print reseller
# if "Reseller" in client_type and not reseller:
# raise forms.ValidationError(
# "Please Enter the Reseller Name"
# )
def __init__(self, *args, **kwargs):
"""django form dropdown default value remove --------"""
super(SalesOrderForm, self).__init__(*args, **kwargs)
self.fields['client_type'].empty_label = ""
self.fields['reseller'].empty_label = ""
self.fields['customer'].empty_label = ""
self.fields['type'].empty_label = "<Select Item Type>"
self.fields['dealer'].empty_label = "" #None
class Meta:
model = SalesOrder
fields = '__all__'
# appointment_date = forms.DateField(
# widget=forms.DateInput(format='%m/%d/%Y'),
# input_formats=('%m/%d/%Y',)
# )
# fields = ['reseller','customer','dealer_code','cost_price','selling_price','order_status']
# exclude = ['title']
# widgets = {
# 'name': forms.Textarea(attrs={'cols': 80, 'rows': 20}),
# }
widgets ={
'client_type': forms.Select(attrs={
'style': 'margin-bottom: 15px;',
# 'placeholder': 'Write your name here'
'class':"form-control"
}),
'order_status': forms.Select(attrs={
'style': 'margin-bottom: 15px;',
# 'placeholder': 'Write your name here'
'class': "form-control"
}),
'reseller': forms.Select(attrs={
'style': 'margin-bottom: 15px;',
# 'placeholder': 'Write your name here'
'class':"form-control"
}),
'customer': forms.Select(attrs={
'style': 'margin-bottom: 15px;',
# 'placeholder': 'Write your name here'
'class':"form-control"
}),
'type': forms.Select(attrs={
'style': 'margin-bottom: 15px;',
# 'placeholder': 'Write your name here'
'class':"form-control"
}),
'qty': forms.NumberInput(attrs={
'style': 'margin-bottom: 15px;',
'min':1,
'step':1,
# 'placeholder': 'Write your name here'
'class':"form-control"
}),
# 'booking_date': forms.DateInput(format='%d-%m-%Y',attrs={
'booking_date': forms.DateInput(format=settings.DATE_INPUT_FORMATS,attrs={
'style': 'margin-bottom: 15px;',
'input_formats' : settings.DATE_INPUT_FORMATS,
# 'input_formats' : "%d-%m-%Y",
'type':'date',
# 'placeholder': 'Write your name here'
'class':"form-control"
}),
'cost_price': forms.NumberInput(attrs={
'style': 'margin-bottom: 15px;',
'step' : 1,
'min':0,
# 'placeholder': 'Write your name here'
'class':"form-control"
}),
'selling_price': forms.NumberInput(attrs={
'style': 'margin-bottom: 15px;',
'step': 1,
'min': 0,
# 'placeholder': 'Write your name here'
'class':"form-control"
}),
'dealer': forms.Select(attrs={
'style': 'margin-bottom: 15px;',
# 'placeholder': 'Write your name here'
'class':"form-control"
}),
'tracking_id': forms.TextInput(attrs={
'style': 'margin-bottom: 15px;',
# 'placeholder': 'Write your name here'
'class':"form-control"
})
}
labels = {
'order_status': _('Status of Order'),
}
help_texts = {
'dealer_code': _('Short code of the Dealer.'),
'client_type': _('Type of the client.'),
'reseller': _('Name of Reseller.'),
}
error_messages = {
'dealer_code': {
'max_length': _("This code's name is too long."),
},
}
|
UTF-8
|
Python
| false
| false
| 5,727
|
py
| 67
|
forms.py
| 26
| 0.478435
| 0.472848
| 0
| 157
| 35.477707
| 100
|
aleMartinelli04/ultimo4lifebot
| 19,602,230,744,723
|
a2b365b181101cac279881f43d91057e626e3102
|
24d71850f14329d34cb118794f3d2ec773b0f5ed
|
/plugins/api.py
|
ca649a3de0f4c28d1938f71991576173accab137
|
[] |
no_license
|
https://github.com/aleMartinelli04/ultimo4lifebot
|
2dc78e5bad25ba9e98f579192129b1b0bab48cda
|
742adab95bc919c4aa60f6bfa9456a178ed48125
|
refs/heads/master
| 2023-04-12T19:38:09.756806
| 2021-05-08T08:56:14
| 2021-05-08T08:56:14
| 324,736,304
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from random import randint
from pyrogram import Client, filters, emoji
import requests
import config
PREFIXES = ["Ultimo ", "ultimo "]
@Client.on_message(filters.command("gatto", prefixes=PREFIXES))
async def on_cat_api(_, message):
message = message.reply_to_message or message
r = requests.get('https://api.thecatapi.com/v1/images/search')
cat = r.json()[0]["url"]
try:
await message.reply_photo(cat)
except AttributeError:
await message.reply_animation(cat)
@Client.on_message(filters.command("tenor", prefixes=PREFIXES))
async def on_tenor_gif(_, message):
api_key = config.api_key
limit = 10
gif_to_search = ' '.join(message.command[1:])
message = message.reply_to_message or message
if gif_to_search == "":
await message.reply_text("Che gif devo cercare?")
return
params = {"q": gif_to_search, "key": api_key, "limit": limit}
r = requests.get("https://api.tenor.com/v1/search", params=params)
if r.status_code == 200:
try:
url = r.json()["results"][randint(0, limit-1)]["url"]
await message.reply_animation(url)
except IndexError or KeyError:
await message.reply_text(f"Nessuna gif trovata {emoji.FACE_WITH_MONOCLE}")
else:
await message.reply_text("Error 200: status_code")
|
UTF-8
|
Python
| false
| false
| 1,344
|
py
| 18
|
api.py
| 13
| 0.645089
| 0.634673
| 0
| 48
| 27
| 86
|
deepsleeping/dev
| 4,114,578,695,434
|
559dff4746485ab035220e322304e895fa8d1afa
|
c42c21ca1d17595f0163097d521252c95d4ba08e
|
/hardgame.py
|
f0d267d88123c3d6eea3d7a11a85b20de124b1cc
|
[] |
no_license
|
https://github.com/deepsleeping/dev
|
913892b985df2287806cae6c2051aef6eaf07bf7
|
3c8d9a3034da6dad9cbbcbffa5224cf76cbbc37e
|
refs/heads/master
| 2021-01-13T17:24:06.996392
| 2017-02-13T11:45:29
| 2017-02-13T11:45:29
| 81,791,861
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
print("안녕 나는 게임파이썬이야")
point=100
rand=random.randrange(1,101)
box=[]
while(point>0):
i=input("1부터100까지중 원하는 숫자를 골라서 적어:")
if(rand>int(i)):
point = point -1
box.append(i)
print("그 숫자보단 커")
elif(rand<int(i)):
point = point -1
box.append(i)
print("그 숫자보단 작아")
else:
print("축하해")
print("네 점수는 {}점 이야".format(point))
for a in box:
print("네가 고른 틀린 숫자들은:{}".format(a))
break
|
UTF-8
|
Python
| false
| false
| 531
|
py
| 9
|
hardgame.py
| 9
| 0.625304
| 0.591241
| 0
| 22
| 17.681818
| 38
|
brutalic/pynet_brutal
| 1,563,368,145,786
|
b087eaf20f79de1b1280bb59d174641d11bf55af
|
6c752a0f0182872f3f1af9d465963505f9fc859b
|
/class2/BrutalTelnetLib.py
|
a4567545497507241a1f150b89e087bfaeaa733d
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/brutalic/pynet_brutal
|
6802ff351788b354f5375c94751a2692a63127a6
|
2afb94430dc9a19eeaf075460494a44e93fab683
|
refs/heads/master
| 2020-05-22T04:39:37.878082
| 2016-11-28T22:01:46
| 2016-11-28T22:01:46
| 64,899,880
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import telnetlib
import time
import yaml
IpDevice = '184.105.247.70'
TelnetPort = 23
TelnetTimeout = 5
User = 'pyclass'
Pass = '88newclass'
#Initiating a Telnet session
RemoteTelnet = telnetlib.Telnet(IpDevice, TelnetPort, TelnetTimeout)
#Login portion
TelnetUser = RemoteTelnet.read_until("sername: ", TelnetTimeout)
print TelnetUser
RemoteTelnet.write(User + "\n")
TelnetPass = RemoteTelnet.read_until("assword: ", TelnetTimeout)
print TelnetPass
RemoteTelnet.write(Pass + "\n")
time.sleep(1)
RemoteTelnet.read_very_eager()
#Adjusting the terminal length to zero
RemoteTelnet.write("term length 0" + "\n")
time.sleep(1)
RemoteTelnet.read_very_eager()
#Executing and displaying "show ip interface brief" command
RemoteTelnet.write("show ip interface brief" + "\n")
time.sleep(1)
ShowIpOutput = RemoteTelnet.read_very_eager()
print ShowIpOutput
RemoteTelnet.close()
print "\nPrinting output to a yaml file..."
yaml_output = 'ShowIp.yml'
with open(yaml_output, "w") as f:
f.write(yaml.dump(ShowIpOutput, default_flow_style=False))
time.sleep(1)
print "\nDone!\n"
|
UTF-8
|
Python
| false
| false
| 1,098
|
py
| 88
|
BrutalTelnetLib.py
| 39
| 0.752277
| 0.733151
| 0
| 48
| 21.833333
| 68
|
kalpeshshardul/CS-5700-Fundamentals-of-Computer-Networking
| 12,970,801,268,681
|
62d4162e93857252592ab6dc9a7d5f8826b7db12
|
555d38d7e9ceef18cf007295b0dc902cf9fea785
|
/FCN_projects/project5_CDN/dnsserver.py
|
ffa0db325bb0ef3cbddd7c62cd8be3571380f11d
|
[] |
no_license
|
https://github.com/kalpeshshardul/CS-5700-Fundamentals-of-Computer-Networking
|
52024b93c5d4df4139278ba0dfe35858155bfe69
|
041266fad00a9a8a9001f6035cfa410e8f8e2755
|
refs/heads/master
| 2021-05-02T10:16:26.592986
| 2018-02-08T18:46:49
| 2018-02-08T18:46:49
| 120,792,920
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import socket,sys,threading,struct,math,collections,urllib,urllib2,json
from thread import *
###############################################################HEADER DETAILS#######################################################
global dns_response
def head_construct(ident): #Constructing header
flag = '\x81\x80'
qd = q_header[2]
an = 1
ns = 0
ar = 0
Head = struct.pack('!H', ident) + flag + struct.pack('!4H', qd, an, ns, ar)
return Head
def ans_construct(best_replica): #Construction of answer
name = 0xc00c
type = 0x0001
clas = 0X0001
ttl = 60
rd_length = 0x0004
r_data = socket.inet_aton(best_replica)
Ans = struct.pack('!HHHLH4s', name, type, clas, ttl, rd_length, r_data)
return Ans
##########################################################EC2 REPLICAS USING GEO-LOCAION#################################################
def search_best_replica(address): #Searching the best replica out of the nine replicas for the requesting clients with the help of Geo Location
EC2_Hosts_IP = ['54.233.152.60','52.90.80.45','52.28.249.79','52.215.87.82','52.62.198.57','52.192.64.163','54.70.111.57','54.169.10.54','54.183.23.203']
coordinates = collections.OrderedDict() # Inserting EC2_Hosts latitude and longitude values in an ordered dictionary
results=list()
Value=((urllib2.urlopen('http://api.ipinfodb.com/v3/ip-city/?key=d681cbf298b459b62d2313e03b1333355f38ebb46e5f742896b144b4c385802f&ip=' + address)).read()).split(';')
x_cord = (90.0 - float(Value[8]))*0.0174532925199 #Latitude value for client
y_cord = (0.0174532925199*(float(Value[9]))) #Longitube value for client
coordinates['Sao_Paulo'] = [-46.6361, -23.5475]
coordinates['N.Virginia'] = [-77.4875, 39.0437]
coordinates['Frankfrut'] = [8.6841, 50.1155]
coordinates['Ireland'] = [-6.2671, 53.3439]
coordinates['Sydney'] = [151.2073, -33.8678]
coordinates['Tokyo'] = [139.6917, 35.6895]
coordinates['Oregon'] = [-122.6762, 45.5235]
coordinates['Singapore'] = [103.8500, 1.2896]
coordinates['N.California'] = [-122.4194, 37.7749]
for item in coordinates:
distance = (math.sin(0.0174532925199*(90.0 - coordinates[item][1]))*math.sin(x_cord)*math.cos(0.0174532925199*(coordinates[item][0])-y_cord)+math.cos(0.0174532925199*(90.0 - coordinates[item][1]))*math.cos(x_cord))
arc_distance = (math.acos(distance))*3959 # Using Haversine formula to find the arc distance between coordinates of client and server and converting into miles by multiplying it by 3959
results.append(arc_distance)
print "The best ip is", EC2_Hosts_IP[results.index(min(results))] #Prints the nearest replica for the requesting client
return EC2_Hosts_IP[results.index(min(results))]
def quest_construct(Q): # DNS Question Header
initial = list()
name =''
start=12
diff=ord(Q[start])
while diff!=0: # Extracting the Hostname
var=Q[start+1:start+diff+1]
initial.append(var)
start=start+diff+1
diff=ord(Q[start])
for item in initial:
name += item + '.'
return Q[12:start+5],name[:-1]
def execute(query,sock):
while True:
DQ = query[0]
global q_header
q_header = struct.unpack('!HHHHHH', DQ[:12]) # Extract Header Question in repect to the generated ID number
global ident
ident = q_header[0]
question,var = quest_construct(DQ)
dns_response = head_construct(ident) + question + ans_construct(search_best_replica(query[1][0]))
sock.sendto(dns_response,query[1]) # Sending the DNS response to the client
sys.exit()
###################################################THE MAIN FUNCTION#######################################################
def Main(): # Defining main function
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Socket creation
sock.bind(('',p_num))
while True: # Handling client request
incoming_request = sock.recvfrom(62000)
new_request = threading.Thread(target=execute,args=(incoming_request,sock)) # Creating thread for each new request
new_request.start()
if sys.argv[1]=='-p' and sys.argv[3]=='-n': # Check Args are correct or not on command line
try:
if int(sys.argv[2])>= 40000 and int(sys.argv[2])<=65535: # Checking for the valid port number
p_num = int(sys.argv[2])
else:
print "Port range should be between 40000 to 65535"
except ValueError:
print "Wrong Port Number"
sys.exit()
else:
print "Wrong arguments:Follow format: ./dnsserver -p <port> -n <name>"
sys.exit()
if __name__=='__main__': # Calling on main function
Main()
|
UTF-8
|
Python
| false
| false
| 5,413
|
py
| 8
|
dnsserver.py
| 5
| 0.535747
| 0.45908
| 0.000369
| 115
| 46.078261
| 230
|
ipipip1735/PythonBasics
| 7,413,113,599,480
|
81564b1add53843870b53a0cfc53d013c2899b20
|
d354ddcd86dc83b0ecaeda0e5fd7e33d0036c319
|
/buildIn/DataTypes/collectionsModule.py
|
d61d1aed9912f6e290c6d141b933ec1ac8d8bfc8
|
[] |
no_license
|
https://github.com/ipipip1735/PythonBasics
|
1eff2fa8c750f6f9ffff855fdffcfec5dd5fbfa1
|
b52e2850cb0f30573eaadd00ffc48f5c5c723fb6
|
refs/heads/master
| 2022-05-01T17:57:50.103372
| 2022-03-15T21:45:04
| 2022-03-15T21:45:04
| 251,281,482
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import collections
# '''
# collections模块 ChainMap对象
# '''
# dictOne = {'one':111, 'two':222}
# dictTwo = {'two':999, 'three':333}
# chainMap = collections.ChainMap(dictOne, dictTwo)
# list = list(chainMap)#导出key
# print(list)
# dict = dict(chainMap)#导出map
# print(dict)
#复制
# dictOne = {'one':111, 'two':222}
# dictTwo = {'two':999, 'three':333}
# chainMap = collections.ChainMap(dictOne, dictTwo)
# print(chainMap)
#
# s = chainMap.new_child({'four':444})
# print(s)
# print(chainMap)
#父节点
# chainMap = collections.ChainMap({'one':111}, {'two':222, 'three':333}, {'four':444})
# # chainMap = collections.ChainMap({'one':111}, {'two':222, 'three':333}, {'four':444})
# print(chainMap.parents) #返回除首dict以外所有dict
#更新
# dictOne = {'one':111, 'two':222}
# dictTwo = {'two':999, 'three':333}
# chainMap = collections.ChainMap(dictOne, dictTwo)
# print(chainMap)
# d = dictOne.update({'one':888})
# print(chainMap)
#转化为List
# dictOne = {'one':111, 'two':222}
# dictTwo = {'two':999, 'three':333}
# chainMap = collections.ChainMap(dictOne, dictTwo)
#
# list = chainMap.maps #maps属性就是本对象的List版本
# print(chainMap.maps)
# print(list)
#
# dictOne.update({'four':444})
#
# print(chainMap.maps)
# print(list)
# '''
# collections模块 dequeue对象
# '''
# q = collections.deque([11])
# q.append(12)
# print(q)
#
# q.extend([23,28])
# print(q)
#
# q.extendleft([31,32])
# print(q)
#
# print(q.pop())
# print(q)
# print(q.popleft())
# print(q)
# q.rotate(1)
# print(q)
# q.rotate(2)
# print(q)
|
UTF-8
|
Python
| false
| false
| 1,567
|
py
| 23
|
collectionsModule.py
| 21
| 0.63618
| 0.572293
| 0
| 79
| 17.835443
| 89
|
atmani-massinissa/UD2OIE
| 12,687,333,406,807
|
7ad174fecef071abdd735095de45d6dc94e83dfb
|
364c771b7d74d598a193cea9c4383a6de92bc84b
|
/train.py
|
e54737a9e3f2c0ec517b34f78887e5417177d02e
|
[] |
no_license
|
https://github.com/atmani-massinissa/UD2OIE
|
5513c9cfe4bdd992a1bff46365ca52d47dabdee0
|
4c972cdf8cdcb8b10abfb295b61b3c0f624ea4a8
|
refs/heads/master
| 2022-03-08T18:54:46.530384
| 2022-02-09T16:58:03
| 2022-02-09T16:58:03
| 346,315,750
| 3
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import warnings
import os
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore')
warnings.filterwarnings('ignore')
def warn(*args, **kwargs):
pass
warnings.warn = warn
#warnings.filterwarnings("error")
logging.getLogger('tensorflow').setLevel(logging.INFO)
logging.getLogger().setLevel(logging.INFO)
from UM import PreProcessing
pp = PreProcessing()
pp.preprocess()
pp.train()
#pp.predict(os.path.join("Multi_en_Arg/pb"),os.path.join("Multi_en_Pred/pb"))
|
UTF-8
|
Python
| false
| false
| 563
|
py
| 107
|
train.py
| 17
| 0.765542
| 0.763766
| 0
| 18
| 30.277778
| 77
|
bonoron/Atcoder
| 8,211,977,486,511
|
c0f12e782a41549cdb57001b2c1f42f89eff4f66
|
cba8f623e613cfb0cdba73fb373bec68f7bbfdcb
|
/ARC051A.py
|
871c7455726b0fd557e093b392cc81300573b42b
|
[] |
no_license
|
https://github.com/bonoron/Atcoder
|
7d0af77a12b40ce2bdebf5ab5a76462629a03ea5
|
e8c0d1ed7d113a0ea23e30d20e8d9993ba1430fa
|
refs/heads/master
| 2022-12-24T20:00:32.702741
| 2020-09-24T07:03:48
| 2020-09-24T07:03:48
| 271,685,972
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
def dis(x1, y1, x2, y2):
return ((x2-x1)**2 + (y2-y1)**2)**0.5
x1,y1,r=map(int,input().split())
x2,y2,x3,y3=map(int,input().split())
print("NO" if (x2<=x1-r)and(x1+r<=x3)and(y2<=y1-r)and(y1+r<=y3) else "YES")
print("NO" if (r>=dis(x1,y1,x2,y2))and(r>=dis(x1,y1,x2,y3))and(r>=dis(x1,y1,x3,y2))and(r>=dis(x1,y1,x3,y3)) else "YES")
|
UTF-8
|
Python
| false
| false
| 343
|
py
| 221
|
ARC051A.py
| 221
| 0.568513
| 0.446064
| 0
| 9
| 36.333333
| 119
|
lcrepet/AdventOfCode2018
| 13,554,916,786,470
|
2ff2154d0c219b376edde03951980723091a58e1
|
0ea3e4698791964bb0ac8c3d07252d818dfabedc
|
/python/day_3/tests/test_overlapped_claims.py
|
d411161421021dead59bdd16992591e5441f44f5
|
[] |
no_license
|
https://github.com/lcrepet/AdventOfCode2018
|
2a1c5229c1d445c10587748330db8074e7b47c17
|
7efa1483ed21eb1c30bc01157a7129cf538bf6a2
|
refs/heads/master
| 2020-04-10T06:27:35.332561
| 2019-07-19T06:55:54
| 2019-07-19T06:55:54
| 160,854,976
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from overlapped_claims import *
import fabric
def test_count_overlapped_claims():
claims = [fabric.Claim('1', 1, 3, 4, 4),
fabric.Claim('2', 3, 1, 4, 4),
fabric.Claim('3', 5, 5, 2, 2)]
assert count_overlapped_claims(claims, 7, 7) == 4
def test_count_overlapped_claims_with_no_claim():
assert count_overlapped_claims([], 0, 0) == 0
|
UTF-8
|
Python
| false
| false
| 374
|
py
| 46
|
test_overlapped_claims.py
| 28
| 0.601604
| 0.545455
| 0
| 12
| 30.166667
| 53
|
danwillscott/pyFun
| 7,181,185,345,416
|
e477f8f6fd48c2bb7f221004aa9e3000f6c95004
|
d564c631423350dbf9ea55b7e6f28620b6b74290
|
/avg.py
|
cbebc49deb56d47dda5c2aff7ec8f3bf3eddd1a6
|
[] |
no_license
|
https://github.com/danwillscott/pyFun
|
32759f9fe9e0f31f2510149865d25f6a6d11a093
|
eada1f29a42cf5cb96aa4b5e3b2573af1098ecd3
|
refs/heads/master
| 2021-01-12T02:04:13.260364
| 2017-01-10T18:44:26
| 2017-01-10T18:44:26
| 78,465,028
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
print "This will give you the average of a list!"
def avgOfList(a_list):
list_len = len(a_list)
list_sum = 0
for num in a_list:
list_sum = list_sum + num
list_avg = list_sum / list_len
print list_avg
a = [1, 2, 5, 10, 255, 3]
avgOfList(a)
|
UTF-8
|
Python
| false
| false
| 248
|
py
| 13
|
avg.py
| 13
| 0.645161
| 0.604839
| 0
| 13
| 18.153846
| 49
|
MartinToilet/vector-field
| 12,403,865,595,314
|
1bdc6b66c1b94d0ae9e7473f4cba36e2df24e80b
|
d25c7a586d8b8c6c7a033d247f53219cbb96df24
|
/game.py
|
d19464fe82d660a94e860443a7eb9bbc313bbf76
|
[] |
no_license
|
https://github.com/MartinToilet/vector-field
|
54e87c0c3f9387125fa41c11203835d15d2b5c32
|
b3218b2b456a507516f4d45e60fc5c9615b25a5f
|
refs/heads/master
| 2022-10-19T01:10:40.804568
| 2020-06-12T07:20:53
| 2020-06-12T07:20:53
| 271,456,538
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import pygame
import random
import math
import os
import sys
def resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
width = 1600
height = 1000
window = pygame.display.set_mode((width, height))
class Dot:
image = pygame.image.load(resource_path('images/lolguy.png')).convert_alpha()
image = pygame.transform.scale(image, (100, 100))
def __init__(self, x, y):
self.x = x
self.y = y
self.vx = 0
self.vy = 0
def tick(self):
rx = self.x - pygame.mouse.get_pos()[0]
ry = self.y - pygame.mouse.get_pos()[1]
if not pygame.mouse.get_pressed()[0]:
self.x += self.vx
self.y += self.vy
self.vx = 0.0001 * (10 + rx + -ry ** 2)
self.vy = 0.0001 * (2 * ry - rx ** 2)
else:
self.vx = 0.1 * (self.x - pygame.mouse.get_pos()[0])
self.vy = 0.1 * (self.y - pygame.mouse.get_pos()[1])
self.x += self.vx
self.y += self.vy
if math.sqrt(rx ** 2 + ry ** 2) > width * 2:
self.x = random.randint(0, width)
self.y = random.randint(0, height)
self.vx = 0
self.vy = 0
window.blit(Dot.image, (int(self.x), int(self.y)))
pygame.display.set_caption("Vector Field")
crashed = False
clock = pygame.time.Clock()
entities = []
for i in range(120):
entities.append(Dot(random.randint(0, width), random.randint(0, height)))
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
window.fill((150, 100, 150))
for e in entities:
e.tick()
pygame.display.update()
clock.tick(60)
pygame.quit()
quit()
|
UTF-8
|
Python
| false
| false
| 1,848
|
py
| 1
|
game.py
| 1
| 0.550866
| 0.516775
| 0
| 81
| 21.814815
| 81
|
darkcoders321/Torikus-p
| 8,289,286,888,136
|
247af585606b77449b06aa76b72a247a9de7411e
|
e3b5611d61c009023a5664745e8d9c78995fab18
|
/python/lyn/pow.py
|
733a6a54288446ce4c38b6fdc6cc9e5d15eab410
|
[] |
no_license
|
https://github.com/darkcoders321/Torikus-p
|
9d10bf5d205855d7fde2c4a827757e0509f56616
|
be84b40d220410eb0316f054558354ae3e625b9f
|
refs/heads/master
| 2021-01-09T13:56:53.484014
| 2020-02-23T01:15:33
| 2020-02-23T01:15:33
| 242,328,088
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
chanceoftails = 0.5
inarowtails = 3
print(pow(chanceoftails, inarowtails))
chanceofone = .167
inarowone = 2
print(pow(chanceofone, inarowone))
|
UTF-8
|
Python
| false
| false
| 144
|
py
| 51
|
pow.py
| 50
| 0.770833
| 0.722222
| 0
| 7
| 19.571429
| 38
|
Hender-hs/Image-DB-in-Flask
| 15,058,155,368,316
|
13600239d2b4b0f34ba66b20de98e6279a3a7eca
|
63c2ba0e46b479331b045e71c567b711129e000f
|
/app/modules/especific_file.py
|
33ae3e54e84fadf96b2d1bf46064019d2ba1d577
|
[] |
no_license
|
https://github.com/Hender-hs/Image-DB-in-Flask
|
fd0065a5a93abd17aaef49da3adff5b9b7a598ea
|
333a8a6bbe977094af19f2563ac54abca8b6ef7e
|
refs/heads/master
| 2023-07-23T13:33:36.624899
| 2021-09-05T21:34:37
| 2021-09-05T21:34:37
| 397,964,102
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
from os import walk
def get_especific_file_to_download(file_name: str, UPLOAD_FOLDER: str) -> tuple :
def getting_current_file_dir() -> list :
current_dir_files = []
for item in walk(UPLOAD_FOLDER):
dir_files_list = item[2]
if len(dir_files_list) == 0:
continue
current_dir_files.extend(dir_files_list)
return current_dir_files
current_dir_files = getting_current_file_dir()
file_to_download = ''.join(list(filter(lambda item: item.lower() == file_name.lower(), current_dir_files)))
def find_path_of_file_to_download() -> str :
path = file_to_download.split('.')[-1]
return path
file_path = find_path_of_file_to_download()
return (file_to_download, file_path)
|
UTF-8
|
Python
| false
| false
| 818
|
py
| 16
|
especific_file.py
| 14
| 0.581907
| 0.57824
| 0
| 36
| 21.722222
| 111
|
jonespm/etc-jupyterlab-telemetry
| 8,942,121,934,677
|
71a3b822f9b15818d435a640465182c34fdaf87e
|
546b409e81d456c7592893ff8bee95e453f91449
|
/etc-jupyterlab-telemetry/handlers.py
|
91aea543dfc2a075621a1d8d379c8f43ba5b0699
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/jonespm/etc-jupyterlab-telemetry
|
c48cecdd97e4701ed349d3aed5511d2e1cdc5424
|
af296bf8401c4e0ca5ce2b73a6399cb61163fc29
|
refs/heads/main
| 2023-04-24T06:01:07.972684
| 2021-05-12T19:23:32
| 2021-05-12T19:23:32
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
import os
from jupyter_server.base.handlers import APIHandler
from jupyter_server.utils import url_path_join
import tornado
class RouteHandler(APIHandler):
# The following decorator should be present on all verb methods (head, get, post,
# patch, put, delete, options) to ensure only authorized user can request the
# Jupyter server
@tornado.web.authenticated
def get(self, resource):
if resource == 'id':
workspace_id = os.getenv("WORKSPACE_ID") if os.getenv("WORKSPACE_ID") is not None else "UNDEFINED"
result = json.dumps(workspace_id)
self.finish(result)
if resource == 'config':
with open(os.path.join(os.path.dirname(__file__), 'config.json'), 'r') as config_json:
self.finish(config_json.read())
def setup_handlers(web_app):
host_pattern = ".*$"
base_url = web_app.settings["base_url"]
route_pattern = url_path_join(base_url, "etc-jupyterlab-telemetry", "(.*)")
handlers = [(route_pattern, RouteHandler)]
web_app.add_handlers(host_pattern, handlers)
|
UTF-8
|
Python
| false
| false
| 1,103
|
py
| 9
|
handlers.py
| 5
| 0.657298
| 0.657298
| 0
| 30
| 35.766667
| 110
|
rscnt/etl-pipeline
| 609,885,382,423
|
82d00099a1b6b00839fb9115dc20ac2c12dca516
|
4de59ac9d699f1ca1dc6f8253c7d44ea5aa880a4
|
/pipeline/pipeline/tasks/cities_metrics_v1.py
|
c4013375eb1da84ded4beeac8c28d629449db50a
|
[] |
no_license
|
https://github.com/rscnt/etl-pipeline
|
73435807a531e98e860b54f0c1bab410c269f1bf
|
2e9aecdc6600e9fea24256664ca6091b5655b0ab
|
refs/heads/master
| 2023-01-03T16:35:21.925822
| 2020-10-18T11:07:25
| 2020-10-18T11:07:25
| 289,469,955
| 1
| 0
| null | true
| 2021-11-18T20:43:46
| 2020-08-22T10:50:19
| 2020-10-18T11:07:28
| 2020-10-18T12:29:50
| 5,744
| 0
| 0
| 0
|
Jupyter Notebook
| false
| false
|
"""Contains the code written for the data_pipeline as Luigi tasks"""
import datetime
import json
from datetime import date, timedelta
import luigi
import requests
import pandas as pd
from pipeline.extract_history_v1 import extract_history_command
from pipeline.calculate_metrics_v1 import (
calculate_city_states_without_hospitalizations,
calculate_city_states_hospitalizations,
reset_hospitalization_percentages,
calculate_city_stats_with_hospitalizations,
)
from .dropbox import dropbox_target, textio2stringio
def hospitalization_csv_path(city_name, date):
return f"/data/metrics/{city_name}/{date}-hospitalization.csv"
class FetchCovid19IndiaDataTask(luigi.Task):
url = luigi.Parameter(
default="https://raw.githubusercontent.com/covid19india/api/gh-pages/v4/data-all.json"
)
date = luigi.DateParameter(default=date.today())
def requires(self):
return None
def run(self):
response = requests.get(self.url)
with self.output().open("w") as output:
output.write(response.text)
def output(self):
return dropbox_target(f"/data/covid19api/{self.date}.json")
class ExtractHistoryTask(luigi.Task):
date = luigi.DateParameter(default=date.today())
states_and_districts = luigi.DictParameter()
def requires(self):
return FetchCovid19IndiaDataTask(date=self.date)
def output(self):
return dropbox_target(f"/data/history/{self.date}.csv")
def run(self):
with self.input().open("r") as input_file, self.output().open(
"w"
) as output_file:
extract_history_command(
textio2stringio(input_file), self.states_and_districts, output_file
)
class CalculateMetricsWithoutHospitalizationTask(luigi.Task):
date = luigi.DateParameter(default=date.today())
states_and_districts = luigi.DictParameter()
city_name = luigi.Parameter()
def requires(self):
return ExtractHistoryTask(
date=self.date, states_and_districts=self.states_and_districts
)
def output(self):
return dropbox_target(
f"/data/metrics/{self.city_name}/{self.date}-metrics-without-hospitalization.csv"
)
def run(self):
with self.input().open("r") as input_file, self.output().open(
"w"
) as output_file:
calculate_city_states_without_hospitalizations(
textio2stringio(input_file), output_file, self.city_name
)
class CreateDefaultHosptializationTask(luigi.Task):
date = luigi.DateParameter()
metrics_date = luigi.DateParameter()
states_and_districts = luigi.DictParameter()
city_name = luigi.Parameter()
default_hospitalization_path = luigi.Parameter(
default="/data/metrics/hospitalization.csv"
)
def requires(self):
return CalculateMetricsWithoutHospitalizationTask(
date=self.metrics_date,
states_and_districts=self.states_and_districts,
city_name=self.city_name,
)
def output(self):
return dropbox_target(self.default_hospitalization_path)
def run(self):
with self.output().open("w") as output_file, self.input().open(
"r"
) as metrics_input_file:
reset_hospitalization_percentages(
textio2stringio(metrics_input_file), output_file, self.city_name
)
class FetchHospitalizationTask(luigi.Task):
date = luigi.DateParameter()
metrics_date = luigi.DateParameter()
city_name = luigi.Parameter()
states_and_districts = luigi.DictParameter()
def requires(self):
return CreateDefaultHosptializationTask(
date=self.date,
metrics_date=self.metrics_date,
states_and_districts=self.states_and_districts,
city_name=self.city_name,
)
def output(self):
return dropbox_target(hospitalization_csv_path(self.city_name, self.date))
def run(self):
with (self.input().open("r")) as previous_hospitalization_file, (
self.output().open("w")
) as output_file:
output_file.write(previous_hospitalization_file.read())
class CreateHospitalizationTask(luigi.Task):
date = luigi.DateParameter()
metrics_date = luigi.DateParameter()
states_and_districts = luigi.DictParameter()
city_name = luigi.Parameter()
def requires(self):
return CalculateMetricsWithoutHospitalizationTask(
date=self.metrics_date,
states_and_districts=self.states_and_districts,
city_name=self.city_name,
)
def output(self):
return dropbox_target(hospitalization_csv_path(self.city_name, self.date))
def run(self):
yesterday_hospitalization = yield FetchHospitalizationTask(
date=self.date - timedelta(days=1),
metrics_date=self.metrics_date,
city_name=self.city_name,
states_and_districts=self.states_and_districts,
)
with (yesterday_hospitalization.open("r")) as previous_hospitalization_file, (
self.input().open("r")
) as metrics_file, (self.output().open("w")) as output_file:
calculate_city_states_hospitalizations(
textio2stringio(previous_hospitalization_file),
textio2stringio(metrics_file),
output_file,
self.city_name,
)
class CalculateMetricsTask(luigi.Task):
date = luigi.DateParameter(default=date.today())
states_and_districts = luigi.DictParameter()
city_name = luigi.Parameter()
def requires(self):
return CalculateMetricsWithoutHospitalizationTask(
date=self.date,
states_and_districts=self.states_and_districts,
city_name=self.city_name,
)
def output(self):
return dropbox_target(
f"/data/metrics/{self.city_name}/{self.date}-metrics-with-hospitalization.csv"
)
def run(self):
hospitalization = yield CreateHospitalizationTask(
date=self.date,
metrics_date=self.date,
city_name=self.city_name,
states_and_districts=self.states_and_districts,
)
with (self.input().open("r")) as metrics_without_hosptialization, (
hospitalization.open("r")
) as hospitalization_data, (self.output().open("w")) as output_file:
calculate_city_stats_with_hospitalizations(
textio2stringio(metrics_without_hosptialization),
textio2stringio(hospitalization_data),
output_file,
self.city_name,
)
if __name__ == "__main__":
luigi.run()
|
UTF-8
|
Python
| false
| false
| 6,779
|
py
| 51
|
cities_metrics_v1.py
| 21
| 0.642425
| 0.639475
| 0
| 209
| 31.435407
| 94
|
multikillerr/Hacking
| 6,356,551,619,523
|
e16b16c17fd17850b0e05db85353ddc192ba0510
|
8818c1ae07d328c017cbd3596c67af444f1ba05f
|
/file.py
|
34adae560073a8ce8179a9cc134caf67dca866cd
|
[] |
no_license
|
https://github.com/multikillerr/Hacking
|
d275241f3d6a9b42406f3d4ff802d8ed06514219
|
8339df4478f6644a4eea3fd8a033d3f500467d0d
|
refs/heads/master
| 2021-07-24T08:46:14.077091
| 2019-01-20T04:26:05
| 2019-01-20T04:26:05
| 153,230,112
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import shutil
filename=raw_input("Enter the file name: ")
def file1(file):
file=open(filename,"r")
for line in file:
lines=line[0:8]
with open("Test1.txt","a") as f:
f.write(lines)
f.write("\n")
def file2(file):
file=open(filename,"r")
for line in file:
lines=line[9:49]
with open("Test2.txt","a") as g:
g.write(lines)
g.write("\n")
def file3(file):
file=open(filename,"r")
for line in file:
lines=line[51:66]
with open("Test3.txt","a") as h:
h.write(lines)
h.write("\n")
file1(file)
file2(file)
file3(file)
|
UTF-8
|
Python
| false
| false
| 674
|
py
| 16
|
file.py
| 12
| 0.52819
| 0.501484
| 0
| 29
| 22.241379
| 43
|
nedagarmo/Libraries
| 6,846,177,892,959
|
8b94cf5f66a7a8f96a5ea613483f791ff9927809
|
b27842867322b13b6e9d4d91f58d14d29c53ce88
|
/src/application/exceptions.py
|
73fb3deb2508ea0c39d954c8743e7bbe397fd0ce
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/nedagarmo/Libraries
|
50591e02b9720c5f7ab1571435e72e96dbfc6ad9
|
7618bd329593684d475a9e64a38409ffb30697df
|
refs/heads/main
| 2023-05-27T18:46:11.509015
| 2021-06-21T01:33:13
| 2021-06-21T01:33:13
| 376,500,863
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
class WrongDataException(Exception):
def __init__(self, data=None):
self.data = data or {}
def to_dict(self):
return self.data
|
UTF-8
|
Python
| false
| false
| 153
|
py
| 34
|
exceptions.py
| 29
| 0.601307
| 0.601307
| 0
| 7
| 20.857143
| 36
|
saeedeMasoudinejad/BootCamp_Projects
| 12,807,592,522,171
|
040b54eaac62ea4457252f04c0a5d84ef6bf01ce
|
f4cf658f4bb4c981a8485d17d95b26cfbd239f86
|
/Online_shop/OnlineShopProject/loginapp/urls.py
|
3b0efc0606ecd0cc81e457fdb6ebf51a49cba322
|
[] |
no_license
|
https://github.com/saeedeMasoudinejad/BootCamp_Projects
|
968867c856b14dea5311e2d16ddc7b2f441b4007
|
bbe483ca3993382a320762a916893801b62924c0
|
refs/heads/master
| 2023-04-27T10:23:19.514915
| 2020-03-22T14:21:04
| 2020-03-22T14:21:04
| 231,568,924
| 1
| 0
| null | false
| 2023-04-21T20:45:02
| 2020-01-03T10:54:31
| 2020-03-23T14:29:24
| 2023-04-21T20:45:02
| 33,308
| 1
| 0
| 2
|
Python
| false
| false
|
from django.urls import path, include
from . import views
from .views import SignUp, Profile
from django.views.decorators.csrf import csrf_exempt
# from OnlineShopProject.contentapp import urls
urlpatterns = [
path('home/', views.load_main_page, name='home'),
path('signup/', SignUp.as_view(), name='register'),
path('accounts/', include('django.contrib.auth.urls')),
path('profile/', csrf_exempt(Profile.as_view()), name='profile'),
]
|
UTF-8
|
Python
| false
| false
| 452
|
py
| 84
|
urls.py
| 64
| 0.714602
| 0.714602
| 0
| 12
| 36.75
| 69
|
chagan1985/PythonByExample
| 2,645,699,864,805
|
2fa66245a2138d024d216f420435a23056fa1624
|
48cb0bd14a0f83e9f7ec58a76ac2bc6b029ff6f5
|
/ex046.py
|
b1fce77e211364a21bf31ef11af4d03eacbf0174
|
[] |
no_license
|
https://github.com/chagan1985/PythonByExample
|
c8aa81f9d90c8f5dde3bac6357880b3b080b9443
|
db1d7614542fda09dfdd6d04aa90bdf3e181b671
|
refs/heads/master
| 2022-12-31T11:21:57.787270
| 2020-10-20T19:39:49
| 2020-10-20T19:39:49
| 271,625,654
| 0
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#####
# Python By Example
# Exercise 046
# Christopher Hagan
#####
num = 0
while num < 5:
num = int(input('Enter a number: '))
print('The last number entered was {}'.format(num))
|
UTF-8
|
Python
| false
| false
| 185
|
py
| 150
|
ex046.py
| 147
| 0.621622
| 0.594595
| 0
| 11
| 15.818182
| 51
|
hanwufeng/Programs
| 1,529,008,357,649
|
272346b59113688a7c2d469625b8b38b5df09a17
|
e5a9d6a5d8901416ead1712e85478087f05a0a3c
|
/program/switch_run.py
|
4060c808baf04d3f2f92d785744fa5cfb5aae6a1
|
[] |
no_license
|
https://github.com/hanwufeng/Programs
|
58076d1edbc723709d143975142a5f8b9c2defcd
|
625f3bbddd2dcb70e705a38b5990c56e086c7f66
|
refs/heads/master
| 2020-05-29T19:25:30.157002
| 2019-05-30T02:18:00
| 2019-05-30T02:18:00
| 189,329,785
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# !/usr/bin/python
import paramiko
import threading
import time
import os
from conf import *
# 拿到cmd.txt文件中的命令
with open('./cmd.txt', 'r') as f:
cmd_line = f.readlines()
cmd = []
for c in cmd_line:
cmd.append(c)
# 定义连接与操作
def ssh2(ip, username, passwd, cmd):
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, 22, username, passwd, timeout=5)
ssh_shell = ssh.invoke_shell()
for m in cmd:
res = ssh_shell.sendall(m)
time.sleep(float(1))
print
ssh_shell.recv(1024)
ssh.close()
except:
print
'%s\tError\n' % (ip)
if __name__ == '__main__':
print
"Begin......"
for key in swip:
ip = swip[key]
a = threading.Thread(target=ssh2, args=(ip, username, passwd, cmd))
a.start()
|
UTF-8
|
Python
| false
| false
| 937
|
py
| 18
|
switch_run.py
| 15
| 0.560088
| 0.54796
| 0
| 41
| 21.121951
| 75
|
amito/rpi-surv-cam
| 14,147,622,302,169
|
7785efd90d9834eb355d77ae0ee0f9944de28096
|
07eb6608c25cde5894432a79f4c37ac1616cd2fa
|
/servo/maestropacket.py
|
4c7e50535289cb2016afd9cfbd5d49237fb1a0d3
|
[] |
no_license
|
https://github.com/amito/rpi-surv-cam
|
ceaaec9f03d8a4b20004d88378ac79c9135e41e1
|
30d5604e2834f39fc5b2b1c8bd6fc01f2126803d
|
refs/heads/master
| 2021-01-17T15:31:24.689196
| 2016-05-27T08:08:38
| 2016-05-27T08:08:38
| 48,494,390
| 0
| 1
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import struct
import logging
from analogpacket import AnalogPacket
logger = logging.getLogger("servoLogger")
class MaestroPacket(AnalogPacket):
_HEADER = struct.pack('BB', 0xaa, 0x0c)
#################################
#### Maestro Channel Packets ####
#################################
# Maestro protocol packets that act on a specific motor channel.
class MaestroChannelPacket(MaestroPacket):
"""General packet structure for the Pololu Maestro for commands which act
on a specific channel, using the Pololu protocol."""
def __init__(self, channel, **kwargs):
super(MaestroChannelPacket, self).__init__(channel=channel, **kwargs)
def _prepare_field(self):
raise NotImplementedError(
"_prepare_field must be implemented in inheriting classes.")
class MaestroChannelSetPacket(MaestroChannelPacket):
"""Packets which are used to set different attributes, and don't expect
a response."""
# Command format is given as 4 bytes in Python struct format:
# - 1st byte is for the command indicator
# - 2nd byte is for the channel indicator
# - 3rd byte is for the command value LSB
# - 4th byte is for the command value MSB
_COMMAND_FORMAT = '4B'
__QUARTER_US_MULTIPLIER = 4
def __init__(self, channel, **kwargs):
super(MaestroChannelSetPacket, self).__init__(
channel=channel, **kwargs)
def _compile(self):
return struct.pack(
self._COMMAND_FORMAT,
self._COMMAND_BYTE,
self.channel,
self._prepare_field() & 0x7f,
(self._prepare_field() >> 7) & 0x7f)
def _prepare_field(self):
field = getattr(self, self._COMMAND_FIELD)
return field * self.__QUARTER_US_MULTIPLIER
def __repr__(self):
return "{0}(channel={1}, {2}={3})".format(
self._CLASS_NAME, self.channel,
self._COMMAND_FIELD, getattr(self, self._COMMAND_FIELD))
class MaestroChannelGetPacket(MaestroChannelPacket):
"""Packets which are used to get different attributes, hence expect a
response."""
# Command format is given as 2 bytes in a Python struct format:
# - 1st byte is for the command indicator
# - 2nd byte is for the channel indicator
_COMMAND_FORMAT = '2B'
# Reply format is given as 2 bytes in a Python struct format:
# - 1st byte is for the reply value LSB
# - 2nd byte is for the reply value MSB
_REPLY_FORMAT = '2B'
def __init__(self, channel):
super(MaestroChannelGetPacket, self).__init__(channel=channel)
def _compile(self):
return struct.pack(
self._COMMAND_FORMAT, self._COMMAND_BYTE, self.channel)
def _get_answer(self, channel):
raise NotImplementedError(
"_get_answer should be implemented in inhereting classes.")
def __repr__(self):
return "{0}(channel={1})".format(
self._CLASS_NAME, self.channel)
class MaestroSetTarget(MaestroChannelSetPacket):
_CLASS_NAME = 'MaestroSetTarget'
_COMMAND_BYTE = 0x04
_COMMAND_FIELD = 'target'
def __init__(self, channel, target):
super(MaestroSetTarget, self).__init__(channel=channel, target=target)
class MaestroSetSpeed(MaestroChannelSetPacket):
_CLASS_NAME = 'MaestroSetSpeed'
_COMMAND_BYTE = 0x07
_COMMAND_FIELD = 'speed'
def __init__(self, channel, speed):
super(MaestroSetSpeed, self).__init__(channel=channel,
speed=speed)
class MaestroSetAcceleration(MaestroChannelSetPacket):
_CLASS_NAME = 'MaestroSetAcceleration'
_COMMAND_BYTE = 0x09
_COMMAND_FIELD = 'acceleration'
def __init__(self, channel, acceleration):
super(MaestroSetAcceleration, self).__init__(channel=channel,
acceleration=acceleration)
class MaestroGetPosition(MaestroChannelGetPacket):
_CLASS_NAME = 'MaestroGetPosition'
_COMMAND_BYTE = 0x10
def __init__(self, channel):
super(MaestroGetPosition, self).__init__(channel=channel)
def _get_answer(self, channel):
# We expect a 2-byte respone:
lsb, = struct.unpack('B', channel.receive())
msb, = struct.unpack('B', channel.receive())
position = ((msb << 8) + lsb) / 4
logger.debug("Got position {0} from channel {1}".format(
position, self.channel))
return position
################################
#### Maestro Go Home Packet ####
################################
# Maestro protocol packet which sends all servos and outputs to their home
# positions.
class MaestroGoHome(MaestroPacket):
_CLASS_NAME = 'MaestroGoHome'
# Just for documentation:
_COMMAND_BYTE = 0x22
def _compile(self):
return chr(0x22)
def __repr__(self):
return "MaestroGoHome()"
################################
#### Maestro Script Packets ####
################################
# Maestro protocol packets that work with Maestro scripts.
class MaestroScriptPacket(MaestroPacket):
def __init__(self, **kwargs):
super(MaestroScriptPacket, self).__init__(**kwargs)
class MaestroRunScriptSubroutine(MaestroScriptPacket):
_CLASS_NAME = 'MaestroRunScriptSubroutine'
_COMMAND_FORMAT = '2B'
_COMMAND_BYTE = 0x27
_COMMAND_FIELD = 'sub_number'
def __init__(self, sub_number):
super(MaestroRunScriptSubroutine, self).__init__(sub_number=sub_number)
def _compile(self):
return struct.pack(
self._COMMAND_FORMAT,
self._COMMAND_BYTE,
getattr(self, self._COMMAND_FIELD))
def __repr__(self):
return "{0}({1}={2})".format(
self._CLASS_NAME,
self._COMMAND_FIELD,
getattr(self, self._COMMAND_FIELD))
class MaestroRunScriptSubroutineWithParam(MaestroRunScriptSubroutine):
_CLASS_NAME = 'MaestroRunScriptSubroutineWithParam'
_COMMAND_FORMAT = '4B'
_COMMAND_BYTE = 0x28
def __init__(self, sub_number, param):
self.param = param
super(MaestroRunScriptSubroutineWithParam, self).__init__(
sub_number=sub_number)
def _compile(self):
return struct.pack(
self._COMMAND_FORMAT,
self._COMMAND_BYTE,
self.sub_number,
self.param & 0x7f,
(self.param >> 7) & 0x7f)
def __repr__(self):
return "{0}({1}={2}, {3}={4})".format(
self._CLASS_NAME,
'sub_number', self.sub_number,
'param', self.param)
class MaestroStopScriptSubroutine(MaestroScriptPacket):
_CLASS_NAME = 'MaestroStopScriptSubroutine'
_COMMAND_FORMAT = 'B'
_COMMAND_BYTE = 0x24
def __init__(self):
super(MaestroStopScriptSubroutine, self).__init__()
def _compile(self):
return struct.pack(
self._COMMAND_FORMAT,
self._COMMAND_BYTE)
def __repr__(self):
return "{0}()".format(self._CLASS_NAME)
|
UTF-8
|
Python
| false
| false
| 6,998
|
py
| 42
|
maestropacket.py
| 32
| 0.61046
| 0.599457
| 0
| 214
| 31.700935
| 79
|
RamonBomfim/lab-de-prog2
| 11,347,303,601,749
|
57062e3996d7fa65be0921d23eb70b607e1a0624
|
06c962f8d2a7c40ec00a0e11c1d17a2d9080beb0
|
/aula-20-10/teste-conexao.py
|
48a04497c177ab70f74f108029227293ce861334
|
[
"MIT"
] |
permissive
|
https://github.com/RamonBomfim/lab-de-prog2
|
ccdcc9c2ba9896bc76a5a37e6a52d1bb58e031f5
|
fdc054cf092cd1a06d34a2bdc0edcb5826f79775
|
refs/heads/master
| 2023-01-11T05:04:44.673743
| 2020-11-13T23:58:19
| 2020-11-13T23:58:19
| 301,205,879
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
import psycopg2
conn = psycopg2.connect(database='empresa',
user='postgres',
password='941215',
host='localhost',
port='5433')
print("Sucesso na conexão ao Banco")
cursor = conn.cursor()
cursor.execute("SELECT * FROM empregado")
conn.commit()
rows = cursor.fetchall()
for row in rows:
print(row[1])
cursor.close()
conn.close()
|
UTF-8
|
Python
| false
| false
| 434
|
py
| 29
|
teste-conexao.py
| 26
| 0.554273
| 0.524249
| 0
| 21
| 19.619048
| 43
|
harikishoremail/Python
| 14,027,363,222,679
|
2cec75ec3cde59f71b46c6dc3e26500418e1d45c
|
701cc8e58832eeca86a963110a445ef30c1bf294
|
/pattern.py
|
02af26ae61f55b258dc2af3c2cf77671e6ece51f
|
[] |
no_license
|
https://github.com/harikishoremail/Python
|
37ad4bbdd0605c1fea8f1b01ea1161ac4305f625
|
73aa2154b40878a7bdab9b86f3f29709f534768c
|
refs/heads/master
| 2021-01-13T13:48:11.464246
| 2016-12-13T04:43:11
| 2016-12-13T04:43:11
| 76,322,795
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
#!usr/bin/python
import math
sequence = "A B C D E F G F E D C B A";
n = len(sequence)
y =sequence
print (sequence)
for i in range(71,65,-1):
x =""
j=0
while j<=n-1:
if ord(y[j])==i:
x = x
if(y[j+1]==" "):
x = x
j +=1
else:
x += y[j]
j +=1
print (x)
y = x
n = len(y)
|
UTF-8
|
Python
| false
| false
| 385
|
py
| 10
|
pattern.py
| 10
| 0.376623
| 0.350649
| 0
| 23
| 15.73913
| 39
|
walefmachado/atuaria
| 6,734,508,750,961
|
6cdf9d256dee3e94d65f1089f378b69993b32014
|
f071d933b721d1f993679624c89c50608e63b246
|
/atuaria/__init__.py
|
c52ea4dea201b721bdff933a9cec662308463020
|
[] |
no_license
|
https://github.com/walefmachado/atuaria
|
3b4627ecc4fd1e5b3d129661fdb9ccf86ceb1349
|
b2a57a6217476660db6e453ff467fc1a83c1fb6c
|
refs/heads/master
| 2020-03-30T03:14:00.031182
| 2018-09-28T00:02:46
| 2018-09-28T00:02:46
| 150,677,002
| 1
| 0
| null | true
| 2018-09-28T02:51:07
| 2018-09-28T02:51:07
| 2018-09-28T02:50:34
| 2018-09-28T00:02:50
| 6
| 0
| 0
| 0
| null | false
| null |
import pandas as pd
import numpy as np
tabuas = pd.read_csv('https://raw.githubusercontent.com/lincolnfrias/dados/master/tabuas-de-vida.csv')
def sv_vit(i, idade, b, qx):
n = tabuas.idade.max() - idade
px = 1 - qx.values
serie = np.arange(1, n+1)
v = 1/(i+1)**serie
vp2 = (1/(i+1)**2)**serie
qxx = qx[(idade):(idade+n)]
pxx = np.cumprod(px[(idade):(idade+n-1)])
pxx = np.insert(pxx, 0, 1)
Ax = b * np.sum(v*pxx*qxx)
Ax2 = b * np.sum(vp2*pxx*qxx)
Var = (Ax2 - Ax**2)*b
resultado = round(float(Ax), 1), round(float(Ax2), 1), round(float(Var), 1)
return resultado
def sv_temp(i, idade, n, b, qx):
px = 1 - qx.values
serie = np.arange(1, n+1)
v = 1/(i+1)**serie
vp2 = (1/(i+1)**2)**serie
qxx = qx[(idade):(idade+n)]
pxx = np.cumprod(px[(idade):(idade+n-1)])
pxx = np.insert(pxx, 0, 1)
Ax = b * np.sum(v*pxx*qxx)
Ax2 = b * np.sum(vp2*pxx*qxx)
Var = (Ax2 - Ax**2)*b
resultado = round(float(Ax), 1), round(float(Ax2), 1), round(float(Var), 1)
return resultado
|
UTF-8
|
Python
| false
| false
| 1,056
|
py
| 4
|
__init__.py
| 2
| 0.564394
| 0.526515
| 0
| 33
| 31
| 102
|
o-x-y-g-e-n/Web-Scraping-With-Sentiment-Analysis
| 18,442,589,594,455
|
cc34670ea2aa62f183c7107596c3c69ea488a6b7
|
5f998fcb6e8248748526bef2bdfff9eb4c950d58
|
/src/movie_main.py
|
53084ac32e0a1b2077da314eb12b68deb6bc41fd
|
[
"MIT"
] |
permissive
|
https://github.com/o-x-y-g-e-n/Web-Scraping-With-Sentiment-Analysis
|
220f8d41c228fa49933f01fac5dbf6a60f72b21d
|
4f0faaefdbdc515165a58fd53942c2902c3e7372
|
refs/heads/master
| 2022-12-28T06:41:53.205559
| 2020-12-05T11:18:59
| 2020-12-05T11:18:59
| 155,549,376
| 3
| 2
|
MIT
| false
| 2022-07-06T20:56:20
| 2018-10-31T11:53:11
| 2021-06-14T07:30:25
| 2022-07-06T20:56:17
| 4,522
| 3
| 2
| 4
|
JavaScript
| false
| false
|
# imdb user reviews, imdb user ratings scraper
import time
import requests
import pandas as pd
from pandas import ExcelWriter
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
import re
import time
from textblob import TextBlob
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from xml.dom import minidom
from tqdm import tqdm
import os
from imdb import *
from major import *
def percentage(part, whole):
temp = 100 * float(part) / float(whole)
return format(temp, '.2f')
#movie_rev()
major_terms = find_majority_terms("movie_reviews")
print(major_terms)
total_count = [0]*len(major_terms)
positive = [0]*len(major_terms)
negative = [0]*len(major_terms)
neutral = [0]*len(major_terms)
positive_tweet = []
negative_tweet = []
neutral_tweet = []
total_tweets=[]
for i in range(0,len(major_terms)):
total_tweets.append([])
for z in total_tweets:
z.append([])
z.append([])
z.append([])
doc = etree.XMLParser(recover=True)
tree = etree.parse('movie_reviews.xml',doc)
for df in tree.xpath('//review'):
subfields = df.getchildren()
i=0
sentences = nltk.sent_tokenize(str(subfields[0].text))
for term in major_terms:
for sentence in sentences:
words = nltk.word_tokenize(sentence)
if term in words:
analysis = TextBlob(sentence)
if(analysis.sentiment.polarity == 0):
neutral[i] +=1
total_tweets[i][0].append(sentence)
elif(analysis.sentiment.polarity > 0 and analysis.sentiment.polarity <=1):
positive[i] +=1
total_tweets[i][1].append(sentence)
elif(analysis.sentiment.polarity > -1 and analysis.sentiment.polarity <=0):
negative[i]+=1
total_tweets[i][2].append(sentence)
total_count[i] +=1
i+=1
for i in range(0,len(major_terms)):
print("------------"+major_terms[i]+"-------------\n")
if(total_count[i] != 0):
negativea = percentage(negative[i], total_count[i])
if(total_count[i] != 0):
neutrala = percentage(neutral[i], total_count[i])
if(total_count[i] != 0):
positivea = percentage(positive[i], total_count[i])
print()
print(str(positivea) + "% people thought it was positive")
print(str(neutrala) + "% people thought it was neutral")
print(str(negativea) + "% people thought it was negative")
#print("--------------------------------------------\n")
print("---------neutral tweets-------------\n")
for sr in total_tweets[i][2]:
print(str(sr) + "\n")
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n")
print()
# total_tweets[i][0] --> neutral
# total_tweets[i][1] --> positive
# total_tweets[i][2] --> negative
|
UTF-8
|
Python
| false
| false
| 2,850
|
py
| 21
|
movie_main.py
| 16
| 0.665263
| 0.653684
| 0
| 94
| 29.329787
| 80
|
eblot/pyftdi
| 19,533,511,281,266
|
84fc9954a934ef41098be683a360accb31d76ff3
|
3c1a16e6c44c2272a11eeca2a1e48a247338b4f0
|
/pyftdi/ftdi.py
|
c6e147b6ec67a2a4c2371f9722e4548c42bef499
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/eblot/pyftdi
|
11859a8d68b6e4b3855cac809178a95cf97d495c
|
0d20ccd1ae4542d868a252c492ba6855b4e67906
|
refs/heads/master
| 2023-08-18T08:37:34.782085
| 2023-08-12T16:25:05
| 2023-08-12T16:25:28
| 1,162,758
| 445
| 208
|
NOASSERTION
| false
| 2023-08-12T14:55:52
| 2010-12-13T00:49:37
| 2023-08-12T10:37:45
| 2023-08-12T14:55:51
| 15,166
| 437
| 193
| 73
|
Python
| false
| false
|
# Copyright (c) 2010-2023 Emmanuel Blot <emmanuel.blot@free.fr>
# Copyright (c) 2016 Emmanuel Bouaziz <ebouaziz@free.fr>
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""FTDI core driver."""
from binascii import hexlify
from collections import OrderedDict
from enum import IntEnum, unique
from errno import ENODEV
from logging import getLogger, DEBUG
from struct import unpack as sunpack
from sys import platform
from typing import Callable, Optional, List, Sequence, TextIO, Tuple, Union
from usb.core import (Configuration as UsbConfiguration, Device as UsbDevice,
USBError)
from usb.util import (build_request_type, release_interface, CTRL_IN, CTRL_OUT,
CTRL_TYPE_VENDOR, CTRL_RECIPIENT_DEVICE)
from .misc import to_bool
from .usbtools import UsbDeviceDescriptor, UsbTools
#pylint: disable-msg=invalid-name
#pylint: disable-msg=too-many-arguments
#pylint: disable=too-many-arguments
#pylint: disable=too-many-branches
#pylint: disable=too-many-statements
#pylint: disable=too-many-nested-blocks
#pylint: disable=too-many-instance-attributes
#pylint: disable=too-many-nested-blocks
#pylint: disable=too-many-public-methods
#pylint: disable=too-many-locals
#pylint: disable=too-many-lines
class FtdiError(IOError):
"""Base class error for all FTDI device"""
class FtdiFeatureError(FtdiError):
"""Requested feature is not available on FTDI device"""
class FtdiMpsseError(FtdiFeatureError):
"""MPSSE mode not supported on FTDI device"""
class FtdiEepromError(FtdiError):
"""FTDI EEPROM access errors"""
class Ftdi:
"""FTDI device driver"""
SCHEME = 'ftdi'
"""URL scheme for :py:class:`UsbTools`."""
FTDI_VENDOR = 0x403
"""USB VID for FTDI chips."""
VENDOR_IDS = {'ftdi': FTDI_VENDOR}
"""Supported vendors, only FTDI.
To add third parties vendors see :py:meth:`add_custom_vendor`.
"""
PRODUCT_IDS = {
FTDI_VENDOR: OrderedDict((
# use an ordered dict so that the first occurence of a PID takes
# precedence when generating URLs - order does matter.
('232', 0x6001),
('232r', 0x6001),
('232h', 0x6014),
('2232', 0x6010),
('2232c', 0x6010),
('2232d', 0x6010),
('2232h', 0x6010),
('4232', 0x6011),
('4232h', 0x6011),
('ft-x', 0x6015),
('230x', 0x6015),
('231x', 0x6015),
('234x', 0x6015),
('4232ha', 0x6048),
('ft232', 0x6001),
('ft232r', 0x6001),
('ft232h', 0x6014),
('ft2232', 0x6010),
('ft2232c', 0x6010),
('ft2232d', 0x6010),
('ft2232h', 0x6010),
('ft4232', 0x6011),
('ft4232h', 0x6011),
('ft230x', 0x6015),
('ft231x', 0x6015),
('ft234x', 0x6015),
('ft4232ha', 0x6048)))
}
"""Supported products, only FTDI officials ones.
To add third parties and customized products, see
:py:meth:`add_custom_product`.
"""
DEFAULT_VENDOR = FTDI_VENDOR
"""Default vendor: FTDI."""
DEVICE_NAMES = {
0x0200: 'ft232am',
0x0400: 'ft232bm',
0x0500: 'ft2232c',
0x0600: 'ft232r',
0x0700: 'ft2232h',
0x0800: 'ft4232h',
0x0900: 'ft232h',
0x1000: 'ft-x',
0x3600: 'ft4232ha'}
"""Common names of FTDI supported devices."""
# Note that the FTDI datasheets contradict themselves, so
# the following values may not be the right ones...
FIFO_SIZES = {
0x0200: (128, 128), # FT232AM: TX: 128, RX: 128
0x0400: (128, 384), # FT232BM: TX: 128, RX: 384
0x0500: (128, 384), # FT2232C: TX: 128, RX: 384
0x0600: (256, 128), # FT232R: TX: 256, RX: 128
0x0700: (4096, 4096), # FT2232H: TX: 4KiB, RX: 4KiB
0x0800: (2048, 2048), # FT4232H: TX: 2KiB, RX: 2KiB
0x0900: (1024, 1024), # FT232H: TX: 1KiB, RX: 1KiB
0x1000: (512, 512), # FT-X: TX: 512, RX: 512
0x3600: (2048, 2048), # FT4232HA: TX: 2KiB, RX: 2KiB
}
"""FTDI chip internal FIFO sizes
Note that 'TX' and 'RX' are inverted with the datasheet terminology:
Values here are seen from the host perspective, whereas datasheet
values are defined from the device perspective
"""
@unique
class BitMode(IntEnum):
"""Function selection."""
RESET = 0x00 # switch off altnerative mode (default to UART)
BITBANG = 0x01 # classical asynchronous bitbang mode
MPSSE = 0x02 # MPSSE mode, available on 2232x chips
SYNCBB = 0x04 # synchronous bitbang mode
MCU = 0x08 # MCU Host Bus Emulation mode,
OPTO = 0x10 # Fast Opto-Isolated Serial Interface Mode
CBUS = 0x20 # Bitbang on CBUS pins of R-type chips
SYNCFF = 0x40 # Single Channel Synchronous FIFO mode
# MPSSE Commands
WRITE_BYTES_PVE_MSB = 0x10
WRITE_BYTES_NVE_MSB = 0x11
WRITE_BITS_PVE_MSB = 0x12
WRITE_BITS_NVE_MSB = 0x13
WRITE_BYTES_PVE_LSB = 0x18
WRITE_BYTES_NVE_LSB = 0x19
WRITE_BITS_PVE_LSB = 0x1a
WRITE_BITS_NVE_LSB = 0x1b
READ_BYTES_PVE_MSB = 0x20
READ_BYTES_NVE_MSB = 0x24
READ_BITS_PVE_MSB = 0x22
READ_BITS_NVE_MSB = 0x26
READ_BYTES_PVE_LSB = 0x28
READ_BYTES_NVE_LSB = 0x2c
READ_BITS_PVE_LSB = 0x2a
READ_BITS_NVE_LSB = 0x2e
RW_BYTES_PVE_NVE_MSB = 0x31
RW_BYTES_NVE_PVE_MSB = 0x34
RW_BITS_PVE_NVE_MSB = 0x33
RW_BITS_NVE_PVE_MSB = 0x36
RW_BYTES_PVE_NVE_LSB = 0x39
RW_BYTES_NVE_PVE_LSB = 0x3c
RW_BITS_PVE_NVE_LSB = 0x3b
RW_BITS_NVE_PVE_LSB = 0x3e
WRITE_BITS_TMS_PVE = 0x4a
WRITE_BITS_TMS_NVE = 0x4b
RW_BITS_TMS_PVE_PVE = 0x6a
RW_BITS_TMS_PVE_NVE = 0x6b
RW_BITS_TMS_NVE_PVE = 0x6e
RW_BITS_TMS_NVE_NVE = 0x6f
SEND_IMMEDIATE = 0x87
WAIT_ON_HIGH = 0x88
WAIT_ON_LOW = 0x89
READ_SHORT = 0x90
READ_EXTENDED = 0x91
WRITE_SHORT = 0x92
WRITE_EXTENDED = 0x93
# -H series only
DISABLE_CLK_DIV5 = 0x8a
ENABLE_CLK_DIV5 = 0x8b
# Modem status
MODEM_CTS = (1 << 4) # Clear to send
MODEM_DSR = (1 << 5) # Data set ready
MODEM_RI = (1 << 6) # Ring indicator
MODEM_RLSD = (1 << 7) # Carrier detect
MODEM_DR = (1 << 8) # Data ready
MODEM_OE = (1 << 9) # Overrun error
MODEM_PE = (1 << 10) # Parity error
MODEM_FE = (1 << 11) # Framing error
MODEM_BI = (1 << 12) # Break interrupt
MODEM_THRE = (1 << 13) # Transmitter holding register
MODEM_TEMT = (1 << 14) # Transmitter empty
MODEM_RCVE = (1 << 15) # Error in RCVR FIFO
# FTDI MPSSE commands
SET_BITS_LOW = 0x80 # Change LSB GPIO output
SET_BITS_HIGH = 0x82 # Change MSB GPIO output
GET_BITS_LOW = 0x81 # Get LSB GPIO output
GET_BITS_HIGH = 0x83 # Get MSB GPIO output
LOOPBACK_START = 0x84 # Enable loopback
LOOPBACK_END = 0x85 # Disable loopback
SET_TCK_DIVISOR = 0x86 # Set clock
# -H series only
ENABLE_CLK_3PHASE = 0x8c # Enable 3-phase data clocking (I2C)
DISABLE_CLK_3PHASE = 0x8d # Disable 3-phase data clocking
CLK_BITS_NO_DATA = 0x8e # Allows JTAG clock to be output w/o data
CLK_BYTES_NO_DATA = 0x8f # Allows JTAG clock to be output w/o data
CLK_WAIT_ON_HIGH = 0x94 # Clock until GPIOL1 is high
CLK_WAIT_ON_LOW = 0x95 # Clock until GPIOL1 is low
ENABLE_CLK_ADAPTIVE = 0x96 # Enable JTAG adaptive clock for ARM
DISABLE_CLK_ADAPTIVE = 0x97 # Disable JTAG adaptive clock
CLK_COUNT_WAIT_ON_HIGH = 0x9c # Clock byte cycles until GPIOL1 is high
CLK_COUNT_WAIT_ON_LOW = 0x9d # Clock byte cycles until GPIOL1 is low
# FT232H only
DRIVE_ZERO = 0x9e # Drive-zero mode
# USB control requests
REQ_OUT = build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR,
CTRL_RECIPIENT_DEVICE)
REQ_IN = build_request_type(CTRL_IN, CTRL_TYPE_VENDOR,
CTRL_RECIPIENT_DEVICE)
# Requests
SIO_REQ_RESET = 0x0 # Reset the port
SIO_REQ_SET_MODEM_CTRL = 0x1 # Set the modem control register
SIO_REQ_SET_FLOW_CTRL = 0x2 # Set flow control register
SIO_REQ_SET_BAUDRATE = 0x3 # Set baud rate
SIO_REQ_SET_DATA = 0x4 # Set the data characteristics of the port
SIO_REQ_POLL_MODEM_STATUS = 0x5 # Get line status
SIO_REQ_SET_EVENT_CHAR = 0x6 # Change event character
SIO_REQ_SET_ERROR_CHAR = 0x7 # Change error character
SIO_REQ_SET_LATENCY_TIMER = 0x9 # Change latency timer
SIO_REQ_GET_LATENCY_TIMER = 0xa # Get latency timer
SIO_REQ_SET_BITMODE = 0xb # Change bit mode
SIO_REQ_READ_PINS = 0xc # Read GPIO pin value (or "get bitmode")
# Eeprom requests
SIO_REQ_EEPROM = 0x90
SIO_REQ_READ_EEPROM = SIO_REQ_EEPROM + 0 # Read EEPROM content
SIO_REQ_WRITE_EEPROM = SIO_REQ_EEPROM + 1 # Write EEPROM content
SIO_REQ_ERASE_EEPROM = SIO_REQ_EEPROM + 2 # Erase EEPROM content
# Reset arguments
SIO_RESET_SIO = 0 # Reset device
SIO_RESET_PURGE_RX = 1 # Drain USB RX buffer (host-to-ftdi)
SIO_RESET_PURGE_TX = 2 # Drain USB TX buffer (ftdi-to-host)
# Flow control arguments
SIO_DISABLE_FLOW_CTRL = 0x0
SIO_RTS_CTS_HS = (0x1 << 8)
SIO_DTR_DSR_HS = (0x2 << 8)
SIO_XON_XOFF_HS = (0x4 << 8)
SIO_SET_DTR_MASK = 0x1
SIO_SET_DTR_HIGH = (SIO_SET_DTR_MASK | (SIO_SET_DTR_MASK << 8))
SIO_SET_DTR_LOW = (0x0 | (SIO_SET_DTR_MASK << 8))
SIO_SET_RTS_MASK = 0x2
SIO_SET_RTS_HIGH = (SIO_SET_RTS_MASK | (SIO_SET_RTS_MASK << 8))
SIO_SET_RTS_LOW = (0x0 | (SIO_SET_RTS_MASK << 8))
# Parity bits
PARITY_NONE, PARITY_ODD, PARITY_EVEN, PARITY_MARK, PARITY_SPACE = range(5)
# Number of stop bits
STOP_BIT_1, STOP_BIT_15, STOP_BIT_2 = range(3)
# Number of bits
BITS_7, BITS_8 = [7+i for i in range(2)]
# Break type
BREAK_OFF, BREAK_ON = range(2)
# cts: Clear to send
# dsr: Data set ready
# ri: Ring indicator
# dcd: Data carrier detect
# dr: Data ready
# oe: Overrun error
# pe: Parity error
# fe: Framing error
# bi: Break interrupt
# thre: Transmitter holding register empty
# temt: Transmitter empty
# err: Error in RCVR FIFO
MODEM_STATUS = [('', '', '', '', 'cts', 'dsr', 'ri', 'dcd'),
('dr', 'overrun', 'parity', 'framing',
'break', 'thre', 'txe', 'rcve')]
ERROR_BITS = (0x00, 0x8E)
TX_EMPTY_BITS = 0x60
# Clocks and baudrates
BUS_CLOCK_BASE = 6.0E6 # 6 MHz
BUS_CLOCK_HIGH = 30.0E6 # 30 MHz
BAUDRATE_REF_BASE = int(3.0E6) # 3 MHz
BAUDRATE_REF_HIGH = int(12.0E6) # 12 MHz
BITBANG_BAUDRATE_RATIO_BASE = 16
BITBANG_BAUDRATE_RATIO_HIGH = 5
BAUDRATE_TOLERANCE = 3.0 # acceptable clock drift for UART, in %
FRAC_DIV_CODE = (0, 3, 2, 4, 1, 5, 6, 7)
# Latency
LATENCY_MIN = 1
LATENCY_MAX = 255
LATENCY_EEPROM_FT232R = 77
# EEPROM Properties
EXT_EEPROM_SIZES = (128, 256) # in bytes (93C66 seen as 93C56)
INT_EEPROMS = {
0x0600: 0x80, # FT232R: 128 bytes, 1024 bits
0x1000: 0x400 # FT230*X: 1KiB
}
def __init__(self):
self.log = getLogger('pyftdi.ftdi')
self._debug_log = False
self._usb_dev = None
self._usb_read_timeout = 5000
self._usb_write_timeout = 5000
self._baudrate = -1
self._readbuffer = bytearray()
self._readoffset = 0
self._readbuffer_chunksize = 4 << 10 # 4KiB
self._writebuffer_chunksize = 4 << 10 # 4KiB
self._max_packet_size = 0
self._interface = None
self._index = None
self._in_ep = None
self._out_ep = None
self._bitmode = Ftdi.BitMode.RESET
self._latency = 0
self._latency_count = 0
self._latency_min = self.LATENCY_MIN
self._latency_max = self.LATENCY_MAX
self._latency_threshold = None # disable dynamic latency
self._lineprop = 0
self._cbus_pins = (0, 0)
self._cbus_out = 0
self._tracer = None
# --- Public API -------------------------------------------------------
@classmethod
def create_from_url(cls, url: str) -> 'Ftdi':
"""Create an Ftdi instance from an URL
URL scheme: ftdi://[vendor[:product[:index|:serial]]]/interface
:param url: FTDI device selector
:return: a fresh, open Ftdi instance
"""
device = Ftdi()
device.open_from_url(url)
return device
@classmethod
def list_devices(cls, url: Optional[str] = None) -> \
List[Tuple[UsbDeviceDescriptor, int]]:
"""List of URLs of connected FTDI devices.
:param url: a pattern URL to restrict the search
:return: list of (UsbDeviceDescriptor, interface)
"""
return UsbTools.list_devices(url or 'ftdi:///?',
cls.VENDOR_IDS, cls.PRODUCT_IDS,
cls.DEFAULT_VENDOR)
@classmethod
def show_devices(cls, url: Optional[str] = None,
out: Optional[TextIO] = None) -> None:
"""Print the URLs and descriptors of connected FTDI devices.
:param url: a pattern URL to restrict the search
:param out: output stream, default to stdout
"""
devdescs = UsbTools.list_devices(url or 'ftdi:///?',
cls.VENDOR_IDS, cls.PRODUCT_IDS,
cls.DEFAULT_VENDOR)
UsbTools.show_devices('ftdi', cls.VENDOR_IDS, cls.PRODUCT_IDS,
devdescs, out)
@classmethod
def get_identifiers(cls, url: str) -> Tuple[UsbDeviceDescriptor, int]:
"""Extract the identifiers of an FTDI device from URL, if any
:param url: input URL to parse
"""
return UsbTools.parse_url(url,
cls.SCHEME, cls.VENDOR_IDS, cls.PRODUCT_IDS,
cls.DEFAULT_VENDOR)
@classmethod
def get_device(cls, url: str) -> UsbDevice:
"""Get a USB device from its URL, without opening an instance.
:param url: input URL to parse
:return: the USB device that match the specified URL
"""
devdesc, _ = cls.get_identifiers(url)
return UsbTools.get_device(devdesc)
@classmethod
def add_custom_vendor(cls, vid: int, vidname: str = '') -> None:
"""Add a custom USB vendor identifier.
It can be useful to use a pretty URL for opening FTDI device
:param vid: Vendor ID (USB 16-bit identifier)
:param vidname: Vendor name (arbitrary string)
:raise ValueError: if the vendor id is already referenced
"""
if vid in cls.VENDOR_IDS.values():
raise ValueError('Vendor ID 0x%04x already registered' % vid)
if not vidname:
vidname = '0x%04x' % vid
cls.VENDOR_IDS[vidname] = vid
@classmethod
def add_custom_product(cls, vid: int, pid: int, pidname: str = '') -> None:
"""Add a custom USB product identifier.
It is required for opening FTDI device with non-standard VID/PID
USB identifiers.
:param vid: Vendor ID (USB 16-bit identifier)
:param pid: Product ID (USB 16-bit identifier)
:param pidname: Product name (arbitrary string)
:raise ValueError: if the product id is already referenced
"""
if vid not in cls.PRODUCT_IDS:
cls.PRODUCT_IDS[vid] = OrderedDict()
elif pid in cls.PRODUCT_IDS[vid].values():
raise ValueError('Product ID 0x%04x:0x%04x already registered' %
(vid, pid))
if not pidname:
pidname = '0x%04x' % pid
cls.PRODUCT_IDS[vid][pidname] = pid
@classmethod
def decode_modem_status(cls, value: bytes, error_only: bool = False) -> \
Tuple[str, ...]:
"""Decode the FTDI modem status bitfield into short strings.
:param value: 2-byte mode status
:param error_only: only decode error flags
:return: a tuple of status identifiers
"""
status = []
for pos, (byte_, ebits) in enumerate(zip(value, cls.ERROR_BITS)):
for bit, _ in enumerate(cls.MODEM_STATUS[pos]):
if error_only:
byte_ &= ebits
if byte_ & (1 << bit):
status.append(cls.MODEM_STATUS[pos][bit])
return tuple(status)
@staticmethod
def find_all(vps: Sequence[Tuple[int, int]], nocache: bool = False) -> \
List[Tuple[UsbDeviceDescriptor, int]]:
"""Find all devices that match the vendor/product pairs of the vps
list.
:param vps: a sequence of 2-tuple (vid, pid) pairs
:type vps: tuple(int, int)
:param bool nocache: bypass cache to re-enumerate USB devices on
the host
:return: a list of 5-tuple (vid, pid, sernum, iface, description)
device descriptors
:rtype: list(tuple(int,int,str,int,str))
"""
return UsbTools.find_all(vps, nocache)
@property
def is_connected(self) -> bool:
"""Tells whether this instance is connected to an actual USB slave.
:return: the slave connection status
"""
return bool(self._usb_dev)
def open_from_url(self, url: str) -> None:
"""Open a new interface to the specified FTDI device.
:param str url: a FTDI URL selector
"""
devdesc, interface = self.get_identifiers(url)
device = UsbTools.get_device(devdesc)
self.open_from_device(device, interface)
def open(self, vendor: int, product: int, bus: Optional[int] = None,
address: Optional[int] = None, index: int = 0,
serial: Optional[str] = None,
interface: int = 1) -> None:
"""Open a new interface to the specified FTDI device.
If several FTDI devices of the same kind (vid, pid) are connected
to the host, either index or serial argument should be used to
discriminate the FTDI device.
index argument is not a reliable solution as the host may enumerate
the USB device in random order. serial argument is more reliable
selector and should always be prefered.
Some FTDI devices support several interfaces/ports (such as FT2232H,
FT4232H and FT4232HA). The interface argument selects the FTDI port
to use, starting from 1 (not 0).
:param int vendor: USB vendor id
:param int product: USB product id
:param int bus: optional selector, USB bus
:param int address: optional selector, USB address on bus
:param int index: optional selector, specified the n-th matching
FTDI enumerated USB device on the host
:param str serial: optional selector, specified the FTDI device
by its serial number
:param str interface: FTDI interface/port
"""
devdesc = UsbDeviceDescriptor(vendor, product, bus, address, serial,
index, None)
device = UsbTools.get_device(devdesc)
self.open_from_device(device, interface)
def open_from_device(self, device: UsbDevice,
interface: int = 1) -> None:
"""Open a new interface from an existing USB device.
:param device: FTDI USB device (PyUSB instance)
:param interface: FTDI interface to use (integer starting from 1)
"""
if not isinstance(device, UsbDevice):
raise FtdiError("Device '%s' is not a PyUSB device" % device)
self._usb_dev = device
try:
self._usb_dev.set_configuration()
except USBError:
pass
# detect invalid interface as early as possible
config = self._usb_dev.get_active_configuration()
if interface > config.bNumInterfaces:
raise FtdiError('No such FTDI port: %d' % interface)
self._set_interface(config, interface)
self._max_packet_size = self._get_max_packet_size()
# Invalidate data in the readbuffer
self._readoffset = 0
self._readbuffer = bytearray()
# Drain input buffer
self.purge_buffers()
# Shallow reset
self._reset_device()
# Reset feature mode
self.set_bitmode(0, Ftdi.BitMode.RESET)
# Init latency
self._latency_threshold = None
self.set_latency_timer(self.LATENCY_MIN)
self._debug_log = self.log.getEffectiveLevel() == DEBUG
def close(self, freeze: bool = False) -> None:
"""Close the FTDI interface/port.
:param freeze: if set, FTDI port is not reset to its default
state on close. This means the port is left with
its current configuration and output signals.
This feature should not be used except for very
specific needs.
"""
if self._usb_dev:
dev = self._usb_dev
if self._is_pyusb_handle_active():
# Do not attempt to execute the following calls if the
# device has been closed: the ResourceManager may attempt
# to re-open the device that has been already closed, and
# this may lead to a (native) crash in libusb.
try:
if not freeze:
self.set_bitmode(0, Ftdi.BitMode.RESET)
self.set_latency_timer(self.LATENCY_MAX)
release_interface(dev, self._index - 1)
except FtdiError as exc:
self.log.warning('FTDI device may be gone: %s', exc)
try:
self._usb_dev.attach_kernel_driver(self._index - 1)
except (NotImplementedError, USBError):
pass
self._usb_dev = None
UsbTools.release_device(dev)
def reset(self, usb_reset: bool = False) -> None:
"""Reset FTDI device.
:param usb_reset: wether to perform a full USB reset of the device.
Beware that selecting usb_reset performs a full USB device reset,
which means all other interfaces of the same device are also
affected.
"""
if not self.is_connected:
raise FtdiError('Not connected')
self._reset_device()
if usb_reset:
self._reset_usb_device()
def open_mpsse_from_url(self, url: str, direction: int = 0x0,
initial: int = 0x0, frequency: float = 6.0E6,
latency: int = 16, debug: bool = False) -> float:
"""Open a new interface to the specified FTDI device in MPSSE mode.
MPSSE enables I2C, SPI, JTAG or other synchronous serial interface
modes (vs. UART mode).
:param url: a FTDI URL selector
:param direction: a bitfield specifying the FTDI GPIO direction,
where high level defines an output, and low level defines an
input
:param initial: a bitfield specifying the initial output value
:param float frequency: serial interface clock in Hz
:param latency: low-level latency in milliseconds. The shorter
the delay, the higher the host CPU load. Do not use shorter
values than the default, as it triggers data loss in FTDI.
:param debug: use a tracer to decode MPSSE protocol
:return: actual bus frequency in Hz
"""
devdesc, interface = self.get_identifiers(url)
device = UsbTools.get_device(devdesc)
return self.open_mpsse_from_device(device, interface,
direction=direction,
initial=initial,
frequency=frequency,
latency=latency,
debug=debug)
def open_mpsse(self, vendor: int, product: int, bus: Optional[int] = None,
address: Optional[int] = None, index: int = 0,
serial: Optional[str] = None, interface: int = 1,
direction: int = 0x0, initial: int = 0x0,
frequency: float = 6.0E6, latency: int = 16,
debug: bool = False) -> float:
"""Open a new interface to the specified FTDI device in MPSSE mode.
MPSSE enables I2C, SPI, JTAG or other synchronous serial interface
modes (vs. UART mode).
If several FTDI devices of the same kind (vid, pid) are connected
to the host, either index or serial argument should be used to
discriminate the FTDI device.
index argument is not a reliable solution as the host may enumerate
the USB device in random order. serial argument is more reliable
selector and should always be prefered.
Some FTDI devices support several interfaces/ports (such as FT2232H,
FT4232H and FT4232HA). The interface argument selects the FTDI port
to use, starting from 1 (not 0). Note that not all FTDI ports are
MPSSE capable.
:param vendor: USB vendor id
:param product: USB product id
:param bus: optional selector, USB bus
:param address: optional selector, USB address on bus
:param index: optional selector, specified the n-th matching
FTDI enumerated USB device on the host
:param serial: optional selector, specified the FTDI device
by its serial number
:param interface: FTDI interface/port
:param direction: a bitfield specifying the FTDI GPIO direction,
where high level defines an output, and low level defines an
input
:param initial: a bitfield specifying the initial output value
:param frequency: serial interface clock in Hz
:param latency: low-level latency in milliseconds. The shorter
the delay, the higher the host CPU load. Do not use shorter
values than the default, as it triggers data loss in FTDI.
:param bool debug: use a tracer to decode MPSSE protocol
:return: actual bus frequency in Hz
"""
devdesc = UsbDeviceDescriptor(vendor, product, bus, address, serial,
index, None)
device = UsbTools.get_device(devdesc)
return self.open_mpsse_from_device(device, interface,
direction=direction,
initial=initial,
frequency=frequency,
latency=latency,
debug=debug)
def open_mpsse_from_device(self, device: UsbDevice,
interface: int = 1, direction: int = 0x0,
initial: int = 0x0, frequency: float = 6.0E6,
latency: int = 16, tracer: bool = False,
debug: bool = False) -> float:
"""Open a new interface to the specified FTDI device in MPSSE mode.
MPSSE enables I2C, SPI, JTAG or other synchronous serial interface
modes (vs. UART mode).
If several FTDI devices of the same kind (vid, pid) are connected
to the host, either index or serial argument should be used to
discriminate the FTDI device.
index argument is not a reliable solution as the host may enumerate
the USB device in random order. serial argument is more reliable
selector and should always be prefered.
Some FTDI devices support several interfaces/ports (such as FT2232H,
FT4232H and FT4232HA). The interface argument selects the FTDI port
to use, starting from 1 (not 0). Note that not all FTDI ports are
MPSSE capable.
:param device: FTDI USB device
:param interface: FTDI interface/port
:param direction: a bitfield specifying the FTDI GPIO direction,
where high level defines an output, and low level defines an
input
:param initial: a bitfield specifying the initial output value
:param frequency: serial interface clock in Hz
:param latency: low-level latency in milliseconds. The shorter
the delay, the higher the host CPU load. Do not use shorter
values than the default, as it triggers data loss in FTDI.
:param bool tracer: use a tracer to decode MPSSE protocol
:param bool debug: add more debug traces
:return: actual bus frequency in Hz
"""
# pylint: disable-msg=unused-argument
self.open_from_device(device, interface)
if not self.is_mpsse_interface(interface):
self.close()
raise FtdiMpsseError('This interface does not support MPSSE')
if to_bool(tracer): # accept strings as boolean
#pylint: disable-msg=import-outside-toplevel
from .tracer import FtdiMpsseTracer
self._tracer = FtdiMpsseTracer(self.device_version)
self.log.debug('Using MPSSE tracer')
# Set latency timer
self.set_latency_timer(latency)
# Set chunk size
self.write_data_set_chunksize()
self.read_data_set_chunksize()
# Reset feature mode
self.set_bitmode(0, Ftdi.BitMode.RESET)
# Drain buffers
self.purge_buffers()
# Disable event and error characters
self.set_event_char(0, False)
self.set_error_char(0, False)
# Enable MPSSE mode
self.set_bitmode(direction, Ftdi.BitMode.MPSSE)
# Configure clock
frequency = self._set_frequency(frequency)
# Configure I/O
cmd = bytearray((Ftdi.SET_BITS_LOW, initial & 0xFF, direction & 0xFF))
if self.has_wide_port:
initial >>= 8
direction >>= 8
cmd.extend((Ftdi.SET_BITS_HIGH, initial & 0xFF, direction & 0xFF))
self.write_data(cmd)
# Disable loopback
self.write_data(bytearray((Ftdi.LOOPBACK_END,)))
self.validate_mpsse()
# Return the actual frequency
return frequency
def open_bitbang_from_url(self, url: str, direction: int = 0x0,
latency: int = 16, baudrate: int = 1000000,
sync: bool = False) -> float:
"""Open a new interface to the specified FTDI device in bitbang mode.
Bitbang enables direct read or write to FTDI GPIOs.
:param url: a FTDI URL selector
:param direction: a bitfield specifying the FTDI GPIO direction,
where high level defines an output, and low level defines an
input
:param latency: low-level latency to select the USB FTDI poll
delay. The shorter the delay, the higher the host CPU load.
:param baudrate: pace to sequence GPIO exchanges
:param sync: whether to use synchronous or asynchronous bitbang
:return: actual bitbang baudrate in bps
"""
devdesc, interface = self.get_identifiers(url)
device = UsbTools.get_device(devdesc)
return self.open_bitbang_from_device(device, interface,
direction=direction,
latency=latency,
baudrate=baudrate,
sync=sync)
def open_bitbang(self, vendor: int, product: int,
bus: Optional[int] = None, address: Optional[int] = None,
index: int = 0, serial: Optional[str] = None,
interface: int = 1, direction: int = 0x0,
latency: int = 16, baudrate: int = 1000000,
sync: bool = False) -> float:
"""Open a new interface to the specified FTDI device in bitbang mode.
Bitbang enables direct read or write to FTDI GPIOs.
:param vendor: USB vendor id
:param product: USB product id
:param index: optional selector, specified the n-th matching
FTDI enumerated USB device on the host
:param serial: optional selector, specified the FTDI device
by its serial number
:param interface: FTDI interface/port
:param direction: a bitfield specifying the FTDI GPIO direction,
where high level defines an output, and low level defines an
input
:param latency: low-level latency to select the USB FTDI poll
delay. The shorter the delay, the higher the host CPU load.
:param baudrate: pace to sequence GPIO exchanges
:param sync: whether to use synchronous or asynchronous bitbang
:return: actual bitbang baudrate in bps
"""
devdesc = UsbDeviceDescriptor(vendor, product, bus, address, serial,
index, None)
device = UsbTools.get_device(devdesc)
return self.open_bitbang_from_device(device, interface,
direction=direction,
latency=latency,
baudrate=baudrate,
sync=sync)
def open_bitbang_from_device(self, device: UsbDevice,
interface: int = 1, direction: int = 0x0,
latency: int = 16, baudrate: int = 1000000,
sync: bool = False) -> int:
"""Open a new interface to the specified FTDI device in bitbang mode.
Bitbang enables direct read or write to FTDI GPIOs.
:param device: FTDI USB device
:param interface: FTDI interface/port
:param direction: a bitfield specifying the FTDI GPIO direction,
where high level defines an output, and low level defines an
input
:param latency: low-level latency to select the USB FTDI poll
delay. The shorter the delay, the higher the host CPU load.
:param baudrate: pace to sequence GPIO exchanges
:param sync: whether to use synchronous or asynchronous bitbang
:return: actual bitbang baudrate in bps
"""
self.open_from_device(device, interface)
# Set latency timer
self.set_latency_timer(latency)
# Set chunk size
# Beware that RX buffer, over 512 bytes, contains 2-byte modem marker
# on every 512 byte chunk, so data and out-of-band marker get
# interleaved. This is not yet supported with read_data_bytes for now
self.write_data_set_chunksize()
self.read_data_set_chunksize()
# disable flow control
self.set_flowctrl('')
# Enable BITBANG mode
self.set_bitmode(direction, Ftdi.BitMode.BITBANG if not sync else
Ftdi.BitMode.SYNCBB)
# Configure clock
if baudrate:
self._baudrate = self._set_baudrate(baudrate, False)
# Drain input buffer
self.purge_buffers()
return self._baudrate
@property
def usb_path(self) -> Tuple[int, int, int]:
"""Provide the physical location on the USB topology.
:return: a tuple of bus, address, interface; if connected
"""
if not self.is_connected:
raise FtdiError('Not connected')
return (self._usb_dev.bus, self._usb_dev.address,
self._interface.bInterfaceNumber)
@property
def device_version(self) -> int:
"""Report the device version, i.e. the kind of device.
:see: :py:meth:`ic_name` for a product version of this information.
:return: the device version (16-bit integer)
"""
if not self.is_connected:
raise FtdiError('Device characteristics not yet known')
return self._usb_dev.bcdDevice
@property
def ic_name(self) -> str:
"""Return the current type of the FTDI device as a string
see also http://www.ftdichip.com/Support/
Documents/TechnicalNotes/TN_100_USB_VID-PID_Guidelines.pdf
:return: the identified FTDI device as a string
"""
if not self.is_connected:
return 'unknown'
return self.DEVICE_NAMES.get(self.device_version, 'undefined')
@property
def device_port_count(self) -> int:
"""Report the count of port/interface of the Ftdi device.
:return: the count of ports
"""
if not self.is_connected:
raise FtdiError('Device characteristics not yet known')
return self._usb_dev.get_active_configuration().bNumInterfaces
@property
def port_index(self) -> int:
"""Report the port/interface index, starting from 1
:return: the port position/index
"""
if not self.is_connected:
raise FtdiError('Device characteristics not yet known')
return self._index
@property
def port_width(self) -> int:
"""Report the width of a single port / interface
:return: the width of the port, in bits
:raise FtdiError: if no FTDI port is open
"""
if not self.is_connected:
raise FtdiError('Device characteristics not yet known')
if self.device_version in (0x0700, 0x0900):
return 16
if self.device_version in (0x0500, ):
return 12
return 8
@property
def has_mpsse(self) -> bool:
"""Tell whether the device supports MPSSE (I2C, SPI, JTAG, ...)
:return: True if the FTDI device supports MPSSE
:raise FtdiError: if no FTDI port is open
"""
if not self.is_connected:
raise FtdiError('Device characteristics not yet known')
return self.device_version in (0x0500, 0x0700, 0x0800, 0x0900, 0x3600)
@property
def has_wide_port(self) -> bool:
"""Tell whether the device supports 16-bit GPIO ports (vs. 8 bits)
:return: True if the FTDI device supports wide GPIO port
:raise FtdiError: if no FTDI port is open
"""
return self.port_width > 8
@property
def has_cbus(self) -> bool:
"""Tell whether the device supports CBUS bitbang.
CBUS bitbanging feature requires a special configuration in EEPROM.
This function only reports if the current device supports this mode,
not if this mode has been enabled in EEPROM.
EEPROM configuration must be queried to check which CBUS pins have
been configured for GPIO/bitbang mode.
:return: True if the FTDI device supports CBUS bitbang
:raise FtdiError: if no FTDI port is open
"""
if not self.is_connected:
raise FtdiError('Device characteristics not yet known')
return self.device_version in (0x0600, 0x0900, 0x1000)
@property
def has_drivezero(self) -> bool:
"""Tell whether the device supports drive-zero mode, i.e. if the
device supports the open-collector drive mode, useful for I2C
communication for example.
:return: True if the FTDI device features drive-zero mode
:raise FtdiError: if no FTDI port is open
"""
if not self.is_connected:
raise FtdiError('Device characteristics not yet known')
return self.device_version in (0x0900, )
@property
def is_legacy(self) -> bool:
"""Tell whether the device is a low-end FTDI
:return: True if the FTDI device can only be used as a slow USB-UART
bridge
:raise FtdiError: if no FTDI port is open
"""
if not self.is_connected:
raise FtdiError('Device characteristics not yet known')
return self.device_version <= 0x0200
@property
def is_H_series(self) -> bool:
"""Tell whether the device is a high-end FTDI
:return: True if the FTDI device is a high-end USB-UART bridge
:raise FtdiError: if no FTDI port is open
"""
if not self.is_connected:
raise FtdiError('Device characteristics not yet known')
return self.device_version in (0x0700, 0x0800, 0x0900, 0x3600)
@property
def is_mpsse(self) -> bool:
"""Tell whether the device is configured in MPSSE mode
:return: True if the FTDI interface is configured in MPSSE mode
"""
return self._bitmode == Ftdi.BitMode.MPSSE
def is_mpsse_interface(self, interface: int) -> bool:
"""Tell whether the interface supports MPSSE (I2C, SPI, JTAG, ...)
:return: True if the FTDI interface supports MPSSE
:raise FtdiError: if no FTDI port is open
"""
if not self.has_mpsse:
return False
if self.device_version == 0x0800 and interface > 2:
return False
if self.device_version == 0x3600 and interface > 2:
return False
return True
@property
def is_bitbang_enabled(self) -> bool:
"""Tell whether some bitbang mode is activated
:return: True if the FTDI interface is configured to support
bitbanging
"""
return self._bitmode not in (
Ftdi.BitMode.RESET,
Ftdi.BitMode.MPSSE,
Ftdi.BitMode.CBUS # CBUS mode does not change base frequency
)
# legacy API
bitbang_enabled = is_bitbang_enabled
@property
def is_eeprom_internal(self) -> bool:
"""Tell whether the device has an internal EEPROM.
:return: True if the device has an internal EEPROM.
"""
return self.device_version in self.INT_EEPROMS
@property
def max_eeprom_size(self) -> int:
"""Report the maximum size of the EEPROM.
The actual size may be lower, of even 0 if no EEPROM is connected
or supported.
:return: the maximum size in bytes.
"""
if self.device_version in self.INT_EEPROMS:
return self.INT_EEPROMS[self.device_version]
if self.device_version == 0x0600:
return 0x80
return 0x100
@property
def frequency_max(self) -> float:
"""Tells the maximum frequency for MPSSE clock.
:return: the maximum supported frequency in Hz
"""
return Ftdi.BUS_CLOCK_HIGH if self.is_H_series else Ftdi.BUS_CLOCK_BASE
@property
def fifo_sizes(self) -> Tuple[int, int]:
"""Return the (TX, RX) tupple of hardware FIFO sizes
:return: 2-tuple of TX, RX FIFO size in bytes
"""
try:
return Ftdi.FIFO_SIZES[self.device_version]
except KeyError as exc:
raise FtdiFeatureError('Unsupported device: 0x%04x' %
self.device_version) from exc
@property
def mpsse_bit_delay(self) -> float:
"""Delay between execution of two MPSSE SET_BITS commands.
:return: minimum delay (actual value might be larger) in seconds
"""
# measured on FTDI2232H, not documented in datasheet, hence may vary
# from on FTDI model to another...
# left as a variable so it could be tweaked base on the FTDI bcd type,
# the frequency, or ... whatever else
return 0.5E-6 # seems to vary between 5 and 6.5 us
@property
def baudrate(self) -> int:
"""Return current baudrate.
"""
return self._baudrate
@property
def usb_dev(self) -> UsbDevice:
"""Return the underlying USB Device.
"""
return self._usb_dev
def set_baudrate(self, baudrate: int, constrain: bool = True) -> int:
"""Change the current UART or BitBang baudrate.
The FTDI device is not able to use an arbitrary baudrate. Its
internal dividors are only able to achieve some baudrates.
PyFtdi attemps to find the closest configurable baudrate and if
the deviation from the requested baudrate is too high, it rejects
the configuration if constrain is set.
:py:attr:`baudrate` attribute can be used to retrieve the exact
selected baudrate.
:py:const:`BAUDRATE_TOLERANCE` defines the maximum deviation between
the requested baudrate and the closest FTDI achieveable baudrate,
which matches standard UART clock drift (3%). If the achievable
baudrate is not within limits, baudrate setting is rejected.
:param baudrate: the new baudrate for the UART.
:param constrain: whether to validate baudrate is in RS232 tolerance
limits or allow larger drift
:raise ValueError: if deviation from selected baudrate is too large
:raise FtdiError: on IO Error
:return: the effective baudrate
"""
self._baudrate = self._set_baudrate(baudrate, constrain)
return self._baudrate
def set_frequency(self, frequency: float) -> float:
"""Change the current MPSSE bus frequency
The FTDI device is not able to use an arbitrary frequency. Its
internal dividors are only able to achieve some frequencies.
PyFtdi finds and selects the closest configurable frequency.
:param frequency: the new frequency for the serial interface,
in Hz.
:return: the selected frequency, which may differ from the requested
one, in Hz
"""
return self._set_frequency(frequency)
def purge_rx_buffer(self) -> None:
"""Clear the USB receive buffer on the chip (host-to-ftdi) and the
internal read buffer."""
if self._ctrl_transfer_out(Ftdi.SIO_REQ_RESET,
Ftdi.SIO_RESET_PURGE_RX):
raise FtdiError('Unable to flush RX buffer')
# Invalidate data in the readbuffer
self._readoffset = 0
self._readbuffer = bytearray()
self.log.debug('rx buf purged')
def purge_tx_buffer(self) -> None:
"""Clear the USB transmit buffer on the chip (ftdi-to-host)."""
if self._ctrl_transfer_out(Ftdi.SIO_REQ_RESET,
Ftdi.SIO_RESET_PURGE_TX):
raise FtdiError('Unable to flush TX buffer')
def purge_buffers(self) -> None:
"""Clear the buffers on the chip and the internal read buffer."""
self.purge_rx_buffer()
self.purge_tx_buffer()
def write_data_set_chunksize(self, chunksize: int = 0) -> None:
"""Configure write buffer chunk size.
This is a low-level configuration option, which is not intended to
be use for a regular usage.
:param chunksize: the optional size of the write buffer in bytes,
it is recommended to use 0 to force automatic
evaluation of the best value.
"""
if chunksize == 0:
chunksize = self.fifo_sizes[0]
self._writebuffer_chunksize = chunksize
self.log.debug('TX chunksize: %d', self._writebuffer_chunksize)
def write_data_get_chunksize(self) -> int:
"""Get write buffer chunk size.
:return: the size of the write buffer in bytes
"""
return self._writebuffer_chunksize
def read_data_set_chunksize(self, chunksize: int = 0) -> None:
"""Configure read buffer chunk size.
This is a low-level configuration option, which is not intended to
be use for a regular usage.
:param chunksize: the optional size of the read buffer in bytes,
it is recommended to use 0 to force automatic
evaluation of the best value.
"""
# Invalidate all remaining data
self._readoffset = 0
self._readbuffer = bytearray()
if chunksize == 0:
# status byte prolog is emitted every maxpacketsize, but for "some"
# reasons, FT232R emits it every RX FIFO size bytes... Other
# devices use a maxpacketsize which is smaller or equal to their
# FIFO size, so this weird behavior is for now only experienced
# with FT232R. Any, the following compution should address all
# devices.
chunksize = min(self.fifo_sizes[0], self.fifo_sizes[1],
self._max_packet_size)
if platform == 'linux':
if chunksize > 16384:
chunksize = 16384
self._readbuffer_chunksize = chunksize
self.log.debug('RX chunksize: %d', self._readbuffer_chunksize)
def read_data_get_chunksize(self) -> int:
"""Get read buffer chunk size.
:return: the size of the write buffer in bytes
"""
return self._readbuffer_chunksize
def set_bitmode(self, bitmask: int, mode: 'Ftdi.BitMode') -> None:
"""Enable/disable bitbang modes.
Switch the FTDI interface to bitbang mode.
"""
self.log.debug('bitmode: %s', mode.name)
mask = sum(Ftdi.BitMode)
value = (bitmask & 0xff) | ((mode.value & mask) << 8)
if self._ctrl_transfer_out(Ftdi.SIO_REQ_SET_BITMODE, value):
raise FtdiError('Unable to set bitmode')
self._bitmode = mode
def read_pins(self) -> int:
"""Directly read pin state, circumventing the read buffer.
Useful for bitbang mode.
:return: bitfield of FTDI interface input GPIO
"""
pins = self._ctrl_transfer_in(Ftdi.SIO_REQ_READ_PINS, 1)
if not pins:
raise FtdiError('Unable to read pins')
return pins[0]
def set_cbus_direction(self, mask: int, direction: int) -> None:
"""Configure the CBUS pins used as GPIOs
:param mask: which pins to configure as GPIOs
:param direction: which pins are output (vs. input)
"""
# sanity check: there cannot be more than 4 CBUS pins in bitbang mode
if not 0 <= mask <= 0x0F:
raise ValueError('Invalid CBUS gpio mask: 0x%02x' % mask)
if not 0 <= direction <= 0x0F:
raise ValueError('Invalid CBUS gpio direction: 0x%02x' % direction)
self._cbus_pins = (mask, direction)
def get_cbus_gpio(self) -> int:
"""Get the CBUS pins configured as GPIO inputs
:return: bitfield of CBUS read pins
"""
if self._bitmode not in (Ftdi.BitMode.RESET, Ftdi.BitMode.CBUS):
raise FtdiError('CBUS gpio not available from current mode')
if not self._cbus_pins[0] & ~self._cbus_pins[1]:
raise FtdiError('No CBUS IO configured as input')
outv = (self._cbus_pins[1] << 4) | self._cbus_out
oldmode = self._bitmode
try:
self.set_bitmode(outv, Ftdi.BitMode.CBUS)
inv = self.read_pins()
finally:
if oldmode != self._bitmode:
self.set_bitmode(0, oldmode)
return inv & ~self._cbus_pins[1] & self._cbus_pins[0]
def set_cbus_gpio(self, pins: int) -> None:
"""Set the CBUS pins configured as GPIO outputs
:param pins: bitfield to apply to CBUS output pins
"""
if self._bitmode not in (Ftdi.BitMode.RESET, Ftdi.BitMode.CBUS):
raise FtdiError('CBUS gpio not available from current mode')
# sanity check: there cannot be more than 4 CBUS pins in bitbang mode
if not 0 <= pins <= 0x0F:
raise ValueError('Invalid CBUS gpio pins: 0x%02x' % pins)
if not self._cbus_pins[0] & self._cbus_pins[1]:
raise FtdiError('No CBUS IO configured as output')
pins &= self._cbus_pins[0] & self._cbus_pins[1]
value = (self._cbus_pins[1] << 4) | pins
oldmode = self._bitmode
try:
self.set_bitmode(value, Ftdi.BitMode.CBUS)
self._cbus_out = pins
finally:
if oldmode != self._bitmode:
self.set_bitmode(0, oldmode)
def set_latency_timer(self, latency: int):
"""Set latency timer.
The FTDI chip keeps data in the internal buffer for a specific
amount of time if the buffer is not full yet to decrease
load on the usb bus.
The shorted the latency, the shorted the delay to obtain data and
the higher the host CPU load. Be careful with this option.
:param latency: latency (unspecified unit)
"""
if not Ftdi.LATENCY_MIN <= latency <= Ftdi.LATENCY_MAX:
raise ValueError("Latency out of range")
if self._ctrl_transfer_out(Ftdi.SIO_REQ_SET_LATENCY_TIMER, latency):
raise FtdiError('Unable to latency timer')
def get_latency_timer(self) -> int:
"""Get latency timer.
:return: the current latency (unspecified unit)
"""
latency = self._ctrl_transfer_in(Ftdi.SIO_REQ_GET_LATENCY_TIMER, 1)
if not latency:
raise FtdiError('Unable to get latency')
return latency[0]
def poll_modem_status(self) -> int:
"""Poll modem status information.
This function allows the retrieve the two status bytes of the
device, useful in UART mode.
FTDI device does not have a so-called USB "interrupt" end-point,
event polling on the UART interface is done through the regular
control endpoint.
see :py:func:`modem_status` to obtain decoded status strings
:return: modem status, as a proprietary bitfield
"""
value = self._ctrl_transfer_in(Ftdi.SIO_REQ_POLL_MODEM_STATUS, 2)
if not value or len(value) != 2:
raise FtdiError('Unable to get modem status')
status, = sunpack('<H', value)
return status
def modem_status(self) -> Tuple[str, ...]:
"""Provide the current modem status as a tuple of set signals
:return: decodede modem status as short strings
"""
value = self._ctrl_transfer_in(Ftdi.SIO_REQ_POLL_MODEM_STATUS, 2)
if not value or len(value) != 2:
raise FtdiError('Unable to get modem status')
return self.decode_modem_status(value)
def set_flowctrl(self, flowctrl: str) -> None:
"""Select flowcontrol in UART mode.
Either hardware flow control through RTS/CTS UART lines,
software or no flow control.
:param str flowctrl: either 'hw' for HW flow control or '' (empty
string) for no flow control.
:raise ValueError: if the flow control argument is invalid
.. note:: How does RTS/CTS flow control work (from FTDI FAQ):
FTxxx RTS# pin is an output. It should be connected to the CTS#
input pin of the device at the other end of the UART link.
* If RTS# is logic 0 it is indicating the FTxxx device can
accept more data on the RXD pin.
* If RTS# is logic 1 it is indicating the FTxxx device
cannot accept more data.
RTS# changes state when the chip buffer reaches its last 32
bytes of space to allow time for the external device to stop
sending data to the FTxxx device.
FTxxx CTS# pin is an input. It should be connected to the RTS#
output pin of the device at the other end of the UART link.
* If CTS# is logic 0 it is indicating the external device can
accept more data, and the FTxxx will transmit on the TXD
pin.
* If CTS# is logic 1 it is indicating the external device
cannot accept more data. the FTxxx will stop transmitting
within 0~3 characters, depending on what is in the buffer.
**This potential 3 character overrun does occasionally
present problems.** Customers shoud be made aware the FTxxx
is a USB device and not a "normal" RS232 device as seen on
a PC. As such the device operates on a packet basis as
opposed to a byte basis.
Word to the wise. Not only do RS232 level shifting devices
level shift, but they also invert the signal.
"""
ctrl = {'hw': Ftdi.SIO_RTS_CTS_HS,
'': Ftdi.SIO_DISABLE_FLOW_CTRL}
try:
value = ctrl[flowctrl] | self._index
except KeyError as exc:
raise ValueError('Unknown flow control: %s' % flowctrl) from exc
try:
if self._usb_dev.ctrl_transfer(
Ftdi.REQ_OUT, Ftdi.SIO_REQ_SET_FLOW_CTRL, 0, value,
bytearray(), self._usb_write_timeout):
raise FtdiError('Unable to set flow control')
except USBError as exc:
raise FtdiError('UsbError: %s' % str(exc)) from exc
def set_dtr(self, state: bool) -> None:
"""Set dtr line
:param state: new DTR logical level
"""
value = Ftdi.SIO_SET_DTR_HIGH if state else Ftdi.SIO_SET_DTR_LOW
if self._ctrl_transfer_out(Ftdi.SIO_REQ_SET_MODEM_CTRL, value):
raise FtdiError('Unable to set DTR line')
def set_rts(self, state: bool) -> None:
"""Set rts line
:param state: new RTS logical level
"""
value = Ftdi.SIO_SET_RTS_HIGH if state else Ftdi.SIO_SET_RTS_LOW
if self._ctrl_transfer_out(Ftdi.SIO_REQ_SET_MODEM_CTRL, value):
raise FtdiError('Unable to set RTS line')
def set_dtr_rts(self, dtr: bool, rts: bool) -> None:
"""Set dtr and rts lines at once
:param dtr: new DTR logical level
:param rts: new RTS logical level
"""
value = 0
value |= Ftdi.SIO_SET_DTR_HIGH if dtr else Ftdi.SIO_SET_DTR_LOW
value |= Ftdi.SIO_SET_RTS_HIGH if rts else Ftdi.SIO_SET_RTS_LOW
if self._ctrl_transfer_out(Ftdi.SIO_REQ_SET_MODEM_CTRL, value):
raise FtdiError('Unable to set DTR/RTS lines')
def set_break(self, break_: bool) -> None:
"""Start or stop a break exception event on the serial line
:param break_: either start or stop break event
"""
if break_:
value = self._lineprop | (0x01 << 14)
if self._ctrl_transfer_out(Ftdi.SIO_REQ_SET_DATA, value):
raise FtdiError('Unable to start break sequence')
else:
value = self._lineprop & ~(0x01 << 14)
if self._ctrl_transfer_out(Ftdi.SIO_REQ_SET_DATA, value):
raise FtdiError('Unable to stop break sequence')
self._lineprop = value
def set_event_char(self, eventch: int, enable: bool) -> None:
"""Set the special event character"""
value = eventch
if enable:
value |= 1 << 8
if self._ctrl_transfer_out(Ftdi.SIO_REQ_SET_EVENT_CHAR, value):
raise FtdiError('Unable to set event char')
def set_error_char(self, errorch: int, enable: bool) -> None:
"""Set error character"""
value = errorch
if enable:
value |= 1 << 8
if self._ctrl_transfer_out(Ftdi.SIO_REQ_SET_ERROR_CHAR, value):
raise FtdiError('Unable to set error char')
def set_line_property(self, bits: int, stopbit: Union[int, float],
parity: str, break_: bool = False) -> None:
"""Configure the (RS232) UART characteristics.
Arguments match the valid subset for FTDI HW of pyserial
definitions.
Bits accepts one of the following values:
* ``7`` for 7-bit characters
* ``8`` for 8-bit characters
Stopbit accepts one of the following values:
* ``1`` for a single bit
* ``1.5`` for a bit and a half
* ``2`` for two bits
Parity accepts one of the following strings:
* ``N`` for no parity bit
* ``O`` for odd parity bit
* ``E`` for even parity bit
* ``M`` for parity bit always set
* ``S`` for parity bit always reset
:param bits: data bit count
:param stopbit: stop bit count
:param parity: parity mode as a single uppercase character
:param break_: force break event
"""
bytelength = {7: Ftdi.BITS_7,
8: Ftdi.BITS_8}
parities = {'N': Ftdi.PARITY_NONE,
'O': Ftdi.PARITY_ODD,
'E': Ftdi.PARITY_EVEN,
'M': Ftdi.PARITY_MARK,
'S': Ftdi.PARITY_SPACE}
stopbits = {1: Ftdi.STOP_BIT_1,
1.5: Ftdi.STOP_BIT_15,
2: Ftdi.STOP_BIT_2}
if parity not in parities:
raise FtdiFeatureError("Unsupported parity")
if bits not in bytelength:
raise FtdiFeatureError("Unsupported byte length")
if stopbit not in stopbits:
raise FtdiFeatureError("Unsupported stop bits")
value = bits & 0x0F
try:
value |= {Ftdi.PARITY_NONE: 0x00 << 8,
Ftdi.PARITY_ODD: 0x01 << 8,
Ftdi.PARITY_EVEN: 0x02 << 8,
Ftdi.PARITY_MARK: 0x03 << 8,
Ftdi.PARITY_SPACE: 0x04 << 8}[parities[parity]]
value |= {Ftdi.STOP_BIT_1: 0x00 << 11,
Ftdi.STOP_BIT_15: 0x01 << 11,
Ftdi.STOP_BIT_2: 0x02 << 11}[stopbits[stopbit]]
if break_ == Ftdi.BREAK_ON:
value |= 0x01 << 14
except KeyError as exc:
raise ValueError('Invalid line property') from exc
if self._ctrl_transfer_out(Ftdi.SIO_REQ_SET_DATA, value):
raise FtdiError('Unable to set line property')
self._lineprop = value
def enable_adaptive_clock(self, enable: bool = True) -> None:
"""Enable adaptative clock mode, useful in MPSEE mode.
Adaptive clock is a unique feature designed for a feedback clock
for JTAG with ARM core.
:param enable: whether to enable or disable this mode.
:raise FtdiMpsseError: if MPSSE mode is not enabled
"""
if not self.is_mpsse:
raise FtdiMpsseError('Setting adaptive clock mode is only '
'available from MPSSE mode')
self.write_data(bytearray([enable and Ftdi.ENABLE_CLK_ADAPTIVE or
Ftdi.DISABLE_CLK_ADAPTIVE]))
def enable_3phase_clock(self, enable: bool = True) -> None:
"""Enable 3-phase clocking mode, useful in MPSSE mode.
3-phase clock is mostly useful with I2C mode. It is also be used
as a workaround to support SPI mode 3.
:param enable: whether to enable or disable this mode.
:raise FtdiMpsseError: if MPSSE mode is not enabled or device is
not capable of 3-phase clocking
"""
if not self.is_mpsse:
raise FtdiMpsseError('Setting 3-phase clock mode is only '
'available from MPSSE mode')
if not self.is_H_series:
raise FtdiFeatureError('This device does not support 3-phase '
'clock')
self.write_data(bytearray([enable and Ftdi.ENABLE_CLK_3PHASE or
Ftdi.DISABLE_CLK_3PHASE]))
def enable_drivezero_mode(self, lines: int) -> None:
"""Enable drive-zero mode, useful in MPSSE mode.
drive-zero mode is mostly useful with I2C mode, to support the open
collector driving mode.
:param lines: bitfield of GPIO to drive in collector driven mode
:raise FtdiMpsseError: if MPSSE mode is not enabled or device is
not capable of drive-zero mode
"""
if not self.is_mpsse:
raise FtdiMpsseError('Setting drive-zero mode is only '
'available from MPSSE mode')
if not self.has_drivezero:
raise FtdiFeatureError('This device does not support drive-zero '
'mode')
self.write_data(bytearray([Ftdi.DRIVE_ZERO, lines & 0xff,
(lines >> 8) & 0xff]))
def enable_loopback_mode(self, loopback: bool = False) -> None:
"""Enable loopback, i.e. connect DO to DI in FTDI MPSSE port for test
purposes only. It does not support UART (TX to RX) mode.
:param loopback: whether to enable or disable this mode
"""
self.write_data(bytearray((Ftdi.LOOPBACK_START if loopback else
Ftdi.LOOPBACK_END,)))
def calc_eeprom_checksum(self, data: Union[bytes, bytearray]) -> int:
"""Calculate EEPROM checksum over the data
:param data: data to compute checksum over. Must be an even number
of bytes
:return: checksum
"""
length = len(data)
if not length:
raise ValueError('No data to checksum')
if length & 0x1:
raise ValueError('Length not even')
# NOTE: checksum is computed using 16-bit values in little endian
# ordering
checksum = 0XAAAA
mtp = self.device_version == 0x1000 # FT230X
for idx in range(0, length, 2):
if mtp and 0x24 <= idx < 0x80:
# special MTP user section which is not considered for the CRC
continue
val = ((data[idx+1] << 8) + data[idx]) & 0xffff
checksum = val ^ checksum
checksum = ((checksum << 1) & 0xffff) | ((checksum >> 15) & 0xffff)
return checksum
def read_eeprom(self, addr: int = 0, length: Optional[int] = None,
eeprom_size: Optional[int] = None) -> bytes:
"""Read the EEPROM starting at byte address, addr, and returning
length bytes. Here, addr and length are in bytes but we
access a 16-bit word at a time, so automatically update
addr and length to work with word accesses.
:param addr: byte address that desire to read.
:param length: byte length to read or None
:param eeprom_size: total size in bytes of the eeprom or None
:return: eeprom bytes, as an array of bytes
"""
eeprom_size = self._check_eeprom_size(eeprom_size)
if length is None:
length = eeprom_size
if addr < 0 or (addr+length) > eeprom_size:
raise ValueError('Invalid address/length')
word_addr = addr >> 1
word_count = length >> 1
if (addr & 0x1) | (length & 0x1):
word_count += 1
try:
data = bytearray()
while word_count:
buf = self._usb_dev.ctrl_transfer(
Ftdi.REQ_IN, Ftdi.SIO_REQ_READ_EEPROM, 0,
word_addr, 2, self._usb_read_timeout)
if not buf:
raise FtdiEepromError('EEPROM read error @ %d' %
(word_addr << 1))
data.extend(buf)
word_count -= 1
word_addr += 1
start = addr & 0x1
return bytes(data[start:start+length])
except USBError as exc:
raise FtdiError('UsbError: %s' % exc) from exc
def write_eeprom(self, addr: int, data: Union[bytes, bytearray],
eeprom_size: Optional[int] = None,
dry_run: bool = True) -> None:
"""Write multiple bytes to the EEPROM starting at byte address,
addr. This function also updates the checksum
automatically.
.. warning:: You can brick your device with invalid size or content.
Use this function at your own risk, and RTFM.
:param addr: starting byte address to start writing
:param data: data to be written
:param eeprom_size: total size in bytes of the eeprom or None
:param dry_run: log what should be written, do not actually
change the EEPROM content
"""
eeprom_size = self._check_eeprom_size(eeprom_size)
if not data:
return
length = len(data)
if addr < 0 or (addr+length) > eeprom_size:
# accept up to eeprom_size, even if the last two bytes are
# overwritten with a locally computed checksum
raise ValueError('Invalid address/length')
# First, read out the entire EEPROM, based on eeprom_size.
eeprom = bytearray(self.read_eeprom(0, eeprom_size))
# patch in the new data
eeprom[addr:addr+len(data)] = data
# compute new checksum
chksum = self.calc_eeprom_checksum(eeprom[:-2])
self.log.info('New EEPROM checksum: 0x%04x', chksum)
# insert updated checksum - it is last 16-bits in EEPROM
if self.device_version == 0x1000:
# FT230x EEPROM structure is different
eeprom[0x7e] = chksum & 0x0ff
eeprom[0x7f] = chksum >> 8
else:
eeprom[-2] = chksum & 0x0ff
eeprom[-1] = chksum >> 8
# Write back the new data and checksum back to
# EEPROM. Only write data that is changing instead of writing
# everything in EEPROM, even if the data does not change.
#
# Compute start and end sections of eeprom baring in mind that
# they must be even since it is a 16-bit EEPROM.
# If start addr is odd, back it up one.
start = addr
size = length
if start & 0x1:
start -= 1
size += 1
if size & 0x1:
size += 1
if size > eeprom_size-2:
size = eeprom_size-2
# finally, write new section of data and ...
self._write_eeprom_raw(start, eeprom[start:start+size],
dry_run=dry_run)
# ... updated checksum
self._write_eeprom_raw((eeprom_size-2), eeprom[-2:], dry_run=dry_run)
def overwrite_eeprom(self, data: Union[bytes, bytearray],
dry_run: bool = True) -> None:
"""Write the whole EEPROM content, from first to last byte.
.. warning:: You can brick your device with invalid size or content.
Use this function at your own risk, and RTFM.
:param data: data to be written (should include the checksum)
:param dry_run: log what should be written, do not actually
change the EEPROM content
"""
if self.is_eeprom_internal:
eeprom_size = self.INT_EEPROMS[self.device_version]
if len(data) != eeprom_size:
raise ValueError('Invalid EEPROM size')
elif len(data) not in self.EXT_EEPROM_SIZES:
raise ValueError('Invalid EEPROM size')
self._write_eeprom_raw(0, data, dry_run=dry_run)
def write_data(self, data: Union[bytes, bytearray]) -> int:
"""Write data to the FTDI port.
In UART mode, data contains the serial stream to write to the UART
interface.
In MPSSE mode, data contains the sequence of MPSSE commands and
data.
Data buffer is split into chunk-sized blocks before being sent over
the USB bus.
:param data: the byte stream to send to the FTDI interface
:return: count of written bytes
"""
offset = 0
size = len(data)
try:
while offset < size:
write_size = self._writebuffer_chunksize
if offset + write_size > size:
write_size = size - offset
length = self._write(data[offset:offset+write_size])
if length <= 0:
raise FtdiError("Usb bulk write error")
offset += length
return offset
except USBError as exc:
raise FtdiError('UsbError: %s' % str(exc)) from exc
def read_data_bytes(self, size: int, attempt: int = 1,
request_gen: Optional[Callable[[int], bytes]] = None) \
-> bytes:
"""Read data from the FTDI interface
In UART mode, data contains the serial stream read from the UART
interface.
In MPSSE mode, data contains the sequence of data received and
processed with the MPSEE engine.
Data buffer is rebuilt from chunk-sized blocks received over the USB
bus.
FTDI device always sends internal status bytes, which are stripped
out as not part of the data payload.
Because of the multiple buses, buffers, FIFOs, and MPSSE command
processing, data might not be immediately available on the host
side. The attempt argument can be used to increase the attempt count
to retrieve the expected amount of data, before giving up and
returning all the received data, which may be shorted than the
requested amount.
:param size: the number of bytes to received from the device
:param attempt: attempt cycle count
:param request_gen: a callable that takes the number of bytes read
and expect a bytes byffer to send back to the
remote device. This is only useful to perform
optimized/continuous transfer from a slave
device.
:return: payload bytes, as bytes
"""
# Packet size sanity check
if not self._max_packet_size:
raise FtdiError("max_packet_size is bogus")
packet_size = self._max_packet_size
length = 1 # initial condition to enter the usb_read loop
data = bytearray()
# everything we want is still in the cache?
if size <= len(self._readbuffer)-self._readoffset:
data = self._readbuffer[self._readoffset:self._readoffset+size]
self._readoffset += size
return data
# something still in the cache, but not enough to satisfy 'size'?
if len(self._readbuffer)-self._readoffset != 0:
data = self._readbuffer[self._readoffset:]
# end of readbuffer reached
self._readoffset = len(self._readbuffer)
# read from USB, filling in the local cache as it is empty
retry = attempt
req_size = size
try:
while (len(data) < size) and (length > 0):
while True:
tempbuf = self._read()
retry -= 1
length = len(tempbuf)
# the received buffer contains at least one useful databyte
# (first 2 bytes in each packet represent the current modem
# status)
if length >= 2:
if tempbuf[1] & self.TX_EMPTY_BITS:
if request_gen:
req_size -= length-2
if req_size > 0:
cmd = request_gen(req_size)
if cmd:
self.write_data(cmd)
if length > 2:
retry = attempt
if self._latency_threshold:
self._adapt_latency(True)
# skip the status bytes
chunks = (length+packet_size-1) // packet_size
count = packet_size - 2
# if you want to show status, use the following code:
status = tempbuf[:2]
if status[1] & self.ERROR_BITS[1]:
self.log.error(
'FTDI error: %02x:%02x %s',
status[0], status[1], (' '.join(
self.decode_modem_status(status,
True)).title()))
self._readbuffer = bytearray()
self._readoffset = 0
srcoff = 2
for _ in range(chunks):
self._readbuffer += tempbuf[srcoff:srcoff+count]
srcoff += packet_size
length = len(self._readbuffer)
break
# received buffer only contains the modem status bytes
# no data received, may be late, try again
if retry > 0:
continue
# no actual data
self._readbuffer = bytearray()
self._readoffset = 0
if self._latency_threshold:
self._adapt_latency(False)
# no more data to read?
return data
if length > 0:
# data still fits in buf?
if (len(data) + length) <= size:
data += self._readbuffer[self._readoffset:
self._readoffset+length]
self._readoffset += length
# did we read exactly the right amount of bytes?
if len(data) == size:
return data
else:
# partial copy, not enough bytes in the local cache to
# fulfill the request
part_size = min(size-len(data),
len(self._readbuffer)-self._readoffset)
if part_size < 0:
raise FtdiError("Internal Error")
data += self._readbuffer[self._readoffset:
self._readoffset+part_size]
self._readoffset += part_size
return data
except USBError as exc:
raise FtdiError('UsbError: %s' % str(exc)) from exc
# never reached
raise FtdiError("Internal error")
def read_data(self, size: int) -> bytes:
"""Shortcut to received a bytes buffer instead of the array of bytes.
Note that output byte buffer may be shorted than the requested
size.
:param size: the number of bytes to received from the device
:return: payload bytes
"""
return bytes(self.read_data_bytes(size))
def get_cts(self) -> bool:
"""Read terminal status line: Clear To Send
:return: CTS line logical level
"""
status = self.poll_modem_status()
return bool(status & self.MODEM_CTS)
def get_dsr(self) -> bool:
"""Read terminal status line: Data Set Ready
:return: DSR line logical level
"""
status = self.poll_modem_status()
return bool(status & self.MODEM_DSR)
def get_ri(self) -> bool:
"""Read terminal status line: Ring Indicator
:return: RI line logical level
"""
status = self.poll_modem_status()
return bool(status & self.MODEM_RI)
def get_cd(self) -> bool:
"""Read terminal status line: Carrier Detect
:return: CD line logical level
"""
status = self.poll_modem_status()
return bool(status & self.MODEM_RLSD)
def set_dynamic_latency(self, lmin: int, lmax: int,
threshold: int) -> None:
"""Set up or disable latency values.
Dynamic latency management is a load balancer to adapt the
responsiveness of FTDI read request vs. the host CPU load.
It is mostly useful in UART mode, so that read bandwidth can be
increased to the maximum achievable throughput, while maintaining
very low host CPU load when no data is received from the UART.
There should be no need to tweak the default values. Use with care.
Minimum latency is limited to 12 or above, at FTDI device starts
losing bytes when latency is too short...
Maximum latency value is 255 ms.
Polling latency is reset to `lmin` each time at least one payload
byte is received from the FTDI device.
It doubles, up to `lmax`, every `threshold` times no payload has
been received from the FTDI device.
:param lmin: minimum latency level (ms)
:param lmax: maximum latenty level (ms)
:param threshold: count to reset latency to maximum level
"""
if not threshold:
self._latency_count = 0
self._latency_threshold = None
else:
for lat in (lmin, lmax):
if not self.LATENCY_MIN <= lat <= self.LATENCY_MAX:
raise ValueError("Latency out of range: %d" % lat)
self._latency_min = lmin
self._latency_max = lmax
self._latency_threshold = threshold
self._latency = lmin
self.set_latency_timer(self._latency)
def validate_mpsse(self) -> None:
"""Check that the previous MPSSE request has been accepted by the FTDI
device.
:raise FtdiError: if the FTDI device rejected the command.
"""
# only useful in MPSSE mode
bytes_ = self.read_data(2)
if (len(bytes_) >= 2) and (bytes_[0] == '\xfa'):
raise FtdiError("Invalid command @ %d" % bytes_[1])
@classmethod
def get_error_string(cls) -> str:
"""Wrapper for legacy compatibility.
:return: a constant, meaningless string
"""
return "Unknown error"
# --- Private implementation -------------------------------------------
def _set_interface(self, config: UsbConfiguration, ifnum: int):
"""Select the interface to use on the FTDI device"""
if ifnum == 0:
ifnum = 1
if ifnum-1 not in range(config.bNumInterfaces):
raise ValueError("No such interface for this device")
self._interface = config[(ifnum-1, 0)]
self._index = self._interface.bInterfaceNumber+1
endpoints = sorted([ep.bEndpointAddress for ep in self._interface])
self._in_ep, self._out_ep = endpoints[:2]
# detach kernel driver from the interface
try:
if self._usb_dev.is_kernel_driver_active(self._index - 1):
self._usb_dev.detach_kernel_driver(self._index - 1)
except (NotImplementedError, USBError):
pass
#pylint: disable-msg=protected-access
# need to access private member _ctx of PyUSB device (resource manager)
# until PyUSB #302 is addressed
def _reset_usb_device(self) -> None:
"""Reset USB device (USB command, not FTDI specific)."""
self._usb_dev._ctx.backend.reset_device(self._usb_dev._ctx.handle)
def _is_pyusb_handle_active(self) -> bool:
# Unfortunately, we need to access pyusb ResourceManager
# and there is no public API for this.
return bool(self._usb_dev._ctx.handle)
#pylint: enable-msg=protected-access
def _reset_device(self):
"""Reset the FTDI device (FTDI vendor command)"""
if self._ctrl_transfer_out(Ftdi.SIO_REQ_RESET,
Ftdi.SIO_RESET_SIO):
raise FtdiError('Unable to reset FTDI device')
def _ctrl_transfer_out(self, reqtype: int, value: int, data: bytes = b''):
"""Send a control message to the device"""
try:
return self._usb_dev.ctrl_transfer(
Ftdi.REQ_OUT, reqtype, value, self._index,
bytearray(data), self._usb_write_timeout)
except USBError as ex:
raise FtdiError('UsbError: %s' % str(ex)) from None
def _ctrl_transfer_in(self, reqtype: int, length: int):
"""Request for a control message from the device"""
try:
return self._usb_dev.ctrl_transfer(
Ftdi.REQ_IN, reqtype, 0, self._index, length,
self._usb_read_timeout)
except USBError as ex:
raise FtdiError('UsbError: %s' % str(ex)) from None
def _write(self, data: Union[bytes, bytearray]) -> int:
if self._debug_log:
try:
self.log.debug('> %s', hexlify(data).decode())
except TypeError as exc:
self.log.warning('> (invalid output byte sequence: %s)', exc)
if self._tracer:
self._tracer.send(self._index, data)
try:
return self._usb_dev.write(self._in_ep, data,
self._usb_write_timeout)
except USBError as ex:
raise FtdiError('UsbError: %s' % str(ex)) from None
def _read(self) -> bytes:
try:
data = self._usb_dev.read(self._out_ep, self._readbuffer_chunksize,
self._usb_read_timeout)
except USBError as ex:
raise FtdiError('UsbError: %s' % str(ex)) from None
if data:
if self._debug_log:
self.log.debug('< %s', hexlify(data).decode())
if self._tracer and len(data) > 2:
self._tracer.receive(self._index, data[2:])
return data
def _adapt_latency(self, payload_detected: bool) -> None:
"""Dynamic latency adaptation depending on the presence of a
payload in a RX buffer.
:param payload_detected: whether a payload has been received
within last RX buffer
"""
if payload_detected:
self._latency_count = 0
if self._latency != self._latency_min:
self.set_latency_timer(self._latency_min)
self._latency = self._latency_min
return
# no payload received
self._latency_count += 1
if self._latency != self._latency_max:
if self._latency_count > \
self._latency_threshold:
self._latency *= 2
if self._latency > self._latency_max:
self._latency = self._latency_max
else:
self._latency_count = 0
self.set_latency_timer(self._latency)
def _check_eeprom_size(self, eeprom_size: Optional[int]) -> int:
if self.device_version in self.INT_EEPROMS:
if (eeprom_size and
eeprom_size != self.INT_EEPROMS[self.device_version]):
raise ValueError('Invalid EEPROM size: %d' % eeprom_size)
eeprom_size = self.INT_EEPROMS[self.device_version]
else:
if eeprom_size is None:
eeprom_size = self.max_eeprom_size
if eeprom_size not in self.EXT_EEPROM_SIZES:
raise ValueError('Invalid EEPROM size: %d' % eeprom_size)
return eeprom_size
def _write_eeprom_raw(self, addr: int, data: Union[bytes, bytearray],
dry_run: bool = True) -> None:
"""Write multiple bytes to the EEPROM starting at byte address,
addr. Length of data must be a multiple of 2 since the
EEPROM is 16-bits. So automatically extend data by 1 byte
if this is not the case.
:param int addr: starting byte address to start writing
:param bytes data: data to be written
:param dry_run: log what should be written, do not actually
change the EEPROM content
"""
if self.device_version == 0x0600:
# FT232R internal EEPROM is unstable and latency timer seems
# to have a direct impact on EEPROM programming...
latency = self.get_latency_timer()
else:
latency = 0
try:
if latency:
self.set_latency_timer(self.LATENCY_EEPROM_FT232R)
length = len(data)
if addr & 0x1 or length & 0x1:
raise ValueError('Address/length not even')
for word in sunpack('<%dH' % (length//2), data):
if not dry_run:
out = self._usb_dev.ctrl_transfer(
Ftdi.REQ_OUT, Ftdi.SIO_REQ_WRITE_EEPROM,
word, addr >> 1, b'', self._usb_write_timeout)
if out:
raise FtdiEepromError('EEPROM Write Error @ %d' % addr)
self.log.debug('Write EEPROM [0x%02x]: 0x%04x', addr, word)
else:
self.log.info('Fake write EEPROM [0x%02x]: 0x%04x',
addr, word)
addr += 2
finally:
if latency:
self.set_latency_timer(latency)
def _get_max_packet_size(self) -> int:
"""Retrieve the maximum length of a data packet"""
if not self.is_connected:
raise IOError("Device is not yet known", ENODEV)
if not self._interface:
raise IOError("Interface is not yet known", ENODEV)
endpoint = self._interface[0]
packet_size = endpoint.wMaxPacketSize
return packet_size
def _convert_baudrate_legacy(self, baudrate: int) -> Tuple[int, int, int]:
if baudrate > self.BAUDRATE_REF_BASE:
raise ValueError('Invalid baudrate (too high)')
div8 = int(round((8 * self.BAUDRATE_REF_BASE) / baudrate))
if (div8 & 0x7) == 7:
div8 += 1
div = div8 >> 3
div8 &= 0x7
if div8 == 1:
div |= 0xc000
elif div8 >= 4:
div |= 0x4000
elif div8 != 0:
div |= 0x8000
elif div == 1:
div = 0
value = div & 0xFFFF
index = (div >> 16) & 0xFFFF
estimate = int(((8 * self.BAUDRATE_REF_BASE) + (div8//2))//div8)
return estimate, value, index
def _convert_baudrate(self, baudrate: int) -> Tuple[int, int, int]:
"""Convert a requested baudrate into the closest possible baudrate
that can be assigned to the FTDI device
:param baudrate: the baudrate in bps
:return: a 3-uple of the apprimated baudrate, the value and index
to use as the USB configuration parameter
"""
if self.device_version == 0x200:
return self._convert_baudrate_legacy(baudrate)
if self.is_H_series and baudrate >= 1200:
hispeed = True
clock = self.BAUDRATE_REF_HIGH
bb_ratio = self.BITBANG_BAUDRATE_RATIO_HIGH
else:
hispeed = False
clock = self.BAUDRATE_REF_BASE
bb_ratio = self.BITBANG_BAUDRATE_RATIO_BASE
if baudrate > clock:
raise ValueError('Invalid baudrate (too high)')
if baudrate < ((clock >> 14) + 1):
raise ValueError('Invalid baudrate (too low)')
if self.is_bitbang_enabled:
baudrate //= bb_ratio
div8 = int(round((8 * clock) / baudrate))
div = div8 >> 3
div |= self.FRAC_DIV_CODE[div8 & 0x7] << 14
if div == 1:
div = 0
elif div == 0x4001:
div = 1
if hispeed:
div |= 0x00020000
value = div & 0xFFFF
index = (div >> 16) & 0xFFFF
if self.device_version >= 0x0700 or self.device_version == 0x0500:
index <<= 8
index |= self._index
estimate = int(((8 * clock) + (div8//2))//div8)
if self.is_bitbang_enabled:
estimate *= bb_ratio
return estimate, value, index
def _set_baudrate(self, baudrate: int, constrain: bool) -> int:
if self.is_mpsse:
raise FtdiFeatureError('Cannot change frequency w/ current mode')
actual, value, index = self._convert_baudrate(baudrate)
delta = 100*abs(float(actual-baudrate))/baudrate
self.log.debug('Actual baudrate: %d %.1f%% div [%04x:%04x]',
actual, delta, index, value)
# return actual
if constrain and delta > Ftdi.BAUDRATE_TOLERANCE:
raise ValueError('Baudrate tolerance exceeded: %.02f%% '
'(wanted %d, achievable %d)' %
(delta, baudrate, actual))
try:
if self._usb_dev.ctrl_transfer(
Ftdi.REQ_OUT, Ftdi.SIO_REQ_SET_BAUDRATE, value, index,
bytearray(), self._usb_write_timeout):
raise FtdiError('Unable to set baudrate')
return actual
except USBError as exc:
raise FtdiError('UsbError: %s' % str(exc)) from exc
def _set_frequency(self, frequency: float) -> float:
"""Convert a frequency value into a TCK divisor setting"""
if not self.is_mpsse:
raise FtdiFeatureError('Cannot change frequency w/ current mode')
if frequency > self.frequency_max:
raise FtdiFeatureError('Unsupported frequency: %f' % frequency)
# Calculate base speed clock divider
divcode = Ftdi.ENABLE_CLK_DIV5
divisor = int((Ftdi.BUS_CLOCK_BASE+frequency/2)/frequency)-1
divisor = max(0, min(0xFFFF, divisor))
actual_freq = Ftdi.BUS_CLOCK_BASE/(divisor+1)
error = (actual_freq/frequency)-1
# Should we use high speed clock available in H series?
if self.is_H_series:
# Calculate high speed clock divider
divisor_hs = int((Ftdi.BUS_CLOCK_HIGH+frequency/2)/frequency)-1
divisor_hs = max(0, min(0xFFFF, divisor_hs))
actual_freq_hs = Ftdi.BUS_CLOCK_HIGH/(divisor_hs+1)
error_hs = (actual_freq_hs/frequency)-1
# Enable if closer to desired frequency (percentually)
if abs(error_hs) < abs(error):
divcode = Ftdi.DISABLE_CLK_DIV5
divisor = divisor_hs
actual_freq = actual_freq_hs
error = error_hs
# FTDI expects little endian
if self.is_H_series:
cmd = bytearray((divcode,))
else:
cmd = bytearray()
cmd.extend((Ftdi.SET_TCK_DIVISOR, divisor & 0xff,
(divisor >> 8) & 0xff))
self.write_data(cmd)
self.validate_mpsse()
# Drain input buffer
self.purge_rx_buffer()
# Note that bus frequency may differ from clock frequency, when
# 3-phase clock is enable, in which case bus frequency = 2/3 clock
# frequency
if actual_freq > 1E6:
self.log.debug('Clock frequency: %.6f MHz (error: %+.1f %%)',
(actual_freq/1E6), error*100)
else:
self.log.debug('Clock frequency: %.3f KHz (error: %+.1f %%)',
(actual_freq/1E3), error*100)
return actual_freq
def __get_timeouts(self) -> Tuple[int, int]:
return self._usb_read_timeout, self._usb_write_timeout
def __set_timeouts(self, timeouts: Tuple[int, int]):
(read_timeout, write_timeout) = timeouts
self._usb_read_timeout = read_timeout
self._usb_write_timeout = write_timeout
timeouts = property(__get_timeouts, __set_timeouts)
|
UTF-8
|
Python
| false
| false
| 97,088
|
py
| 57
|
ftdi.py
| 42
| 0.566074
| 0.548399
| 0.00001
| 2,341
| 40.472875
| 79
|
lemonad/molnet-polls
| 16,097,537,459,108
|
7100c13bb736549a48f4f493232bf747e711de4a
|
1a29ca23f96102c917cfd5091d133fecfd7a406a
|
/forms.py
|
4d39b30da8b96d28bff801f90c45f3387e3839ee
|
[
"MIT"
] |
permissive
|
https://github.com/lemonad/molnet-polls
|
72d4174c307bd6821f68186f0e0582f313200268
|
1d2aa3c71356f044958d03d23faf56d29006f3a7
|
refs/heads/master
| 2016-09-05T17:22:35.230476
| 2010-09-26T22:02:44
| 2010-09-26T22:02:44
| null | 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from django.forms import (BooleanField, CharField, ChoiceField, Form,
ModelForm, MultiValueField, MultiWidget, RadioSelect,
Textarea, TextInput, ValidationError)
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy as _
from models import Choice, Poll, Vote
class ModelFormRequestUser(ModelForm):
def __init__(self, request, *args, **varargs):
self.user = request.user
super(ModelFormRequestUser, self).__init__(*args, **varargs)
def save(self, commit=True):
obj = super(ModelFormRequestUser, self).save(commit=False)
obj.user = self.user
if commit:
obj.save()
self.save_m2m() # Be careful with ModelForms + commit=False
return obj
class ChoiceWithOtherRenderer(RadioSelect.renderer):
""" RadioFieldRenderer that renders its last choice with a
placeholder.
See http://djangosnippets.org/snippets/863/ (and perhaps also
http://djangosnippets.org/snippets/1377/)
"""
def __init__(self, *args, **kwargs):
super(ChoiceWithOtherRenderer, self).__init__(*args, **kwargs)
self.choices, self.other = self.choices[:-1], self.choices[-1]
def __iter__(self):
for input in super(ChoiceWithOtherRenderer, self).__iter__():
yield input
id = '%s_%s' % (self.attrs['id'], self.other[0]) if 'id' in self.attrs else ''
label_for = ' for="%s"' % id if id else ''
checked = '' if not force_unicode(self.other[0]) == self.value else 'checked="true" '
yield '<label%s><input type="radio" id="%s" value="%s" name="%s" %s/> %s</label> %%s' % (
label_for, id, self.other[0], self.name, checked, self.other[1])
class ChoiceWithOtherWidget(MultiWidget):
""" MultiWidget for use with ChoiceWithOtherField.
See http://djangosnippets.org/snippets/863/ (and perhaps also
http://djangosnippets.org/snippets/1377/)
"""
def __init__(self, choices, other_widget):
widgets = [RadioSelect(choices=choices,
renderer=ChoiceWithOtherRenderer),
other_widget]
super(ChoiceWithOtherWidget, self).__init__(widgets)
def decompress(self, value):
if not value:
return [None, None]
return value
def format_output(self, rendered_widgets):
""" Format the output by substituting the "other" choice into
the first widget.
"""
return rendered_widgets[0] % rendered_widgets[1]
class ChoiceWithOtherField(MultiValueField):
"""
ChoiceField with an option for a user-submitted "other" value.
The last item in the choices array passed to __init__ is expected to be a
choice for "other". This field's cleaned data is a tuple consisting of the
choice the user made, and the "other" field typed in if the choice made was
the last one.
>>> class AgeForm(forms.Form):
... age = ChoiceWithOtherField(choices=[
... (0, '15-29'),
... (1, '30-44'),
... (2, '45-60'),
... (3, 'Other, please specify:')
... ])
...
>>> # rendered as a RadioSelect choice field whose last choice has a text input
... print AgeForm()['age']
<ul>
<li><label for="id_age_0_0"><input type="radio" id="id_age_0_0" value="0" name="age_0" /> 15-29</label></li>
<li><label for="id_age_0_1"><input type="radio" id="id_age_0_1" value="1" name="age_0" /> 30-44</label></li>
<li><label for="id_age_0_2"><input type="radio" id="id_age_0_2" value="2" name="age_0" /> 45-60</label></li>
<li><label for="id_age_0_3"><input type="radio" id="id_age_0_3" value="3" name="age_0" /> Other, please \
specify:</label> <input type="text" name="age_1" id="id_age_1" /></li>
</ul>
>>> form = AgeForm({'age_0': 2})
>>> form.is_valid()
True
>>> form.cleaned_data
{'age': (u'2', u'')}
>>> form = AgeForm({'age_0': 3, 'age_1': 'I am 10 years old'})
>>> form.is_valid()
True
>>> form.cleaned_data
{'age': (u'3', u'I am 10 years old')}
>>> form = AgeForm({'age_0': 1, 'age_1': 'This is bogus text which is ignored since I didn\\'t pick "other"'})
>>> form.is_valid()
True
>>> form.cleaned_data
{'age': (u'1', u'')}
See http://djangosnippets.org/snippets/863/ (and perhaps also
http://djangosnippets.org/snippets/1377/)
"""
def __init__(self, *args, **kwargs):
other_field = kwargs.pop('other_field', None)
if other_field is None:
other_field = CharField(required=False)
fields = [ChoiceField(widget=RadioSelect(renderer=
ChoiceWithOtherRenderer),
*args,
**kwargs),
other_field]
widget = ChoiceWithOtherWidget(choices=kwargs['choices'],
other_widget=other_field.widget)
kwargs.pop('choices')
self._was_required = kwargs.pop('required', True)
kwargs['required'] = False
super(ChoiceWithOtherField, self).__init__(widget=widget,
fields=fields,
*args,
**kwargs)
def clean(self, value):
# MultiValueField turns off the "required" field for all the fields.
# It prevents us from requiring the manual entry. This implements
# that.
if self._was_required:
#if value and value[1] and value[0] != self.fields[0].choices[-1][0]:
# value[0] = self.fields[0].choices[-1][0]
if value and value[0] == self.fields[0].choices[-1][0]:
manual_choice = value[1]
if not manual_choice:
raise ValidationError(self.error_messages['required'])
return super(ChoiceWithOtherField, self).clean(value)
def compress(self, value):
if self._was_required and not value or value[0] in (None, ''):
raise ValidationError(self.error_messages['required'])
if not value:
return [None, u'']
return (value[0], value[1] if force_unicode(value[0]) == \
force_unicode(self.fields[0].choices[-1][0]) else u'')
class PollVotingForm(Form):
""" Form for voting on polls. """
def __init__(self, *args, **kwargs):
choices = kwargs.pop('choices')
allow_new_choices = kwargs.pop('allow_new_choices')
super(PollVotingForm, self).__init__(*args, **kwargs)
if allow_new_choices:
choices.append(('OTHER', 'Other'))
self.fields['choices'] = \
ChoiceWithOtherField(choices=choices,
required=True)
else:
self.fields['choices'] = \
ChoiceField(choices=choices,
widget=RadioSelect,
required=True)
class PollForm(ModelFormRequestUser):
""" Form for adding and editing polls. """
class Meta:
model = Poll
fields = ['title', 'description', 'allow_new_choices']
# Django 1.2 only
# widgets = {'title': TextInput(attrs={'class': 'span-12 last input'}),
# 'description': Textarea(attrs={'class': 'span-12 last input'}),}
def __init__(self, *args, **kwargs):
super(PollForm, self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs['class'] = 'span-12 last input'
self.fields['description'].widget.attrs['class'] = 'span-12 last input'
self.fields['description'].widget.attrs['id'] = 'wmd-input'
class ChoiceForm(ModelFormRequestUser):
""" Form for adding and editing poll choices. """
def __init__(self, request, poll, *args, **varargs):
self.poll = poll
super(ChoiceForm, self).__init__(request, *args, **varargs)
self.fields['choice'].widget.attrs['class'] = 'span-12 last input'
def save(self, commit=True):
obj = super(ChoiceForm, self).save(commit=False)
obj.poll = self.poll
if commit:
obj.save()
self.save_m2m() # Be careful with ModelForms + commit=False
return obj
class Meta:
model = Choice
fields = ['choice']
# Django 1.2 only
# widgets = {'choice': TextInput(attrs={'class': 'span-12 input',
# 'size': '255'}),}
|
UTF-8
|
Python
| false
| false
| 8,700
|
py
| 24
|
forms.py
| 8
| 0.556897
| 0.54069
| 0
| 215
| 39.465116
| 114
|
BushiJhon/iAnimeServer
| 18,485,539,243,298
|
2f9440fda1b956088c8878867dee9ee1448f1c21
|
d4191a69ff2b9effeb4d4a53ab93d92ab9571145
|
/pojo/User.py
|
2e8bfbafdec69f0aeaba0e80007f91142636f13e
|
[] |
no_license
|
https://github.com/BushiJhon/iAnimeServer
|
e9df1fd4952bca70ec88c7368b21aef5260b7510
|
c57ecc1aa2ada163e75dcabc6ec92097f29d9e95
|
refs/heads/master
| 2020-09-07T11:27:24.458713
| 2019-11-20T10:17:20
| 2019-11-20T10:17:20
| 220,764,608
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
class User:
__phone = None
__password = None
__user_id = None
# __nick_name = None
# __avatar = None
# __background_photo = None
# __signature = None
# __follower = None
# __following = None
# __rank = None
# __my_like = None
# def set_my_like(self, my_like):
# self.__my_like = my_like
# return
#
# def set_nick_name(self, nick_name):
# self.__nick_name = nick_name
# return
#
# def set_avatar(self, avatar):
# self.__avatar = avatar
# return
#
# def set_background_photo(self, background_photo):
# self.__background_photo = background_photo
# return
#
# def set_signature(self, signature):
# self.__signature = signature
# return
#
# def set_follower(self, follower):
# self.__follower = follower
# return
#
# def set_following(self, following):
# self.__following = following
# return
#
# def set_rank(self, rank):
# self.__rank = rank
# return
#
def set_user_id(self, user_id):
self.__user_id = user_id
return
def set_phone(self, phone):
self.__phone = phone
return
def set_password(self, password):
self.__password = password
return
def get_phone(self):
return self.__phone
def get_password(self):
return self.__password
def get_user_id(self):
return self.__user_id
# def get_nick_name(self):
# return str(self.__nick_name)
#
# def get_avatar(self):
# return str(self.__avatar)
#
# def get_background_photo(self):
# return str(self.__background_photo)
#
# def get_signature(self):
# return str(self.__signature)
#
# def get_follower(self):
# return len(self.__follower)
#
# def get_following(self):
# return len(self.__following)
#
# def get_rank(self):
# return Rank.get_ank(self.__rank)
#
# def get_my_like(self):
# return self.__my_like
|
UTF-8
|
Python
| false
| false
| 2,109
|
py
| 18
|
User.py
| 18
| 0.525842
| 0.525842
| 0
| 89
| 22.685393
| 55
|
FrozenFist27/Image-Classifier-CIFAR-10-Dataset
| 5,102,421,187,244
|
492d6e00823c98d90955b9d78b876aaf6df4e6b6
|
cda564026f9030fd5d3a142edbc4897c0eef4c7e
|
/image_classifier_cifar_10.py
|
2db4371e729d42a8dcff3588d3860a3a2c1a2dae
|
[] |
no_license
|
https://github.com/FrozenFist27/Image-Classifier-CIFAR-10-Dataset
|
803331e77fbd85ac0ac34ed92344a941249635f7
|
4e8d7db862decbf29e2b4cc6ce00cc7178dc7ff5
|
refs/heads/main
| 2023-06-23T02:50:05.911316
| 2021-07-13T11:21:07
| 2021-07-13T11:21:07
| 385,577,773
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""Image Classifier CIFAR 10.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fWk1LWbLZTn8YIBm5z_ik8fCPD5zimdL
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O
# import backend
import tensorflow as tf
from keras import backend as K
# Model architecture
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Input,Dense, Dropout, Flatten, Conv2D
from keras.layers import MaxPool2D, Activation, MaxPooling2D
from keras.layers.normalization import BatchNormalization
# Annealer
from keras.callbacks import LearningRateScheduler
# Data processing
from keras.preprocessing.image import ImageDataGenerator, img_to_array
from keras.utils import to_categorical
from keras.preprocessing import image
# Visualization
import matplotlib.pyplot as plt
import seaborn as sns
# Progressor
from tqdm import tqdm
import h5py
from keras.optimizers import Adam
from keras.datasets import cifar10
(x_train_all, y_train_all), (x_test, y_test) = cifar10.load_data()
print("Number of training sample: ",x_train_all.shape[0])
print("Number of test samples: ", x_test.shape[0])
LABEL_NAMES = ['Plane', 'Car', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck']
VALIDATION_SIZE = 10000
LABEL_NAMES[y_train_all[10025][0]]
x_train_all = x_train_all /255.0
x_test = x_test /255.0
# split training and validation set.
x_train, x_val, y_train, y_val = train_test_split(x_train_all, y_train_all, random_state=0, test_size=0.2)
x_val.shape
x_train.shape
# Convert to One Hot Encoding
from keras.utils import np_utils
y_train_ohe = np_utils.to_categorical(y_train, num_classes=10)
y_test_ohe = np_utils.to_categorical(y_test, num_classes=10)
y_val_ohe = np_utils.to_categorical(y_val, num_classes=10)
print(y_val_ohe)
y_val_ohe.shape
from keras.applications.vgg16 import VGG16
from keras.models import Model
def create_cnn_model():
image_input = Input(shape=(32, 32, 3))
vgg_model = VGG16(weights='imagenet',include_top=False, input_tensor=image_input)
flatt = Flatten()(vgg_model.output)
couche1 = Dense(128, activation='relu')(flatt)
couche1_normalization = BatchNormalization()(couche1)
couche1_dropout = Dropout(0.2)(couche1_normalization)
couche2 = Dense(64, activation='relu')(couche1_dropout)
couche2_normalization = BatchNormalization()(couche2)
output = Dense(10, activation='softmax', name='output')(couche2_normalization)
model = Model( image_input, output )
return model
model = create_cnn_model()
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
from keras.callbacks import EarlyStopping, ModelCheckpoint
# Use Data Augmentation
datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip= True)
es = EarlyStopping(patience=10, monitor='val_accuracy', mode='max')
mc = ModelCheckpoint('./weights.h5', monitor='val_accuracy', mode='max', save_best_only=True)
model.fit_generator(datagen.flow(x_train, y_train_ohe,batch_size = 32), steps_per_epoch = 1250, epochs=500, validation_data=[x_val, y_val_ohe], callbacks = [es,mc])
# Load The Best weights in the ModelCheckpoint
model.load_weights('./weights.h5')
# Predict The Test
preds = model.predict(x_val)
score_test = accuracy_score( y_val, np.argmax(preds, axis=1) )
print (' The test score : ', score_test)
print('')
_, evaluate = model.evaluate(x_test, y_test_ohe, verbose=1)
print('>%.3f' % (evaluate * 100.0))
prediction = model.predict_classes(test)
|
UTF-8
|
Python
| false
| false
| 3,710
|
py
| 1
|
image_classifier_cifar_10.py
| 1
| 0.731536
| 0.704313
| 0
| 119
| 30.168067
| 164
|
gabriellaec/desoft-analise-exercicios
| 15,247,133,910,661
|
d50b3e3bb524e587f515fd7167c4487d727fa57d
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_176/ch84_2019_06_04_05_16_57_763624.py
|
217edf8460217c8ae37c987ddee80f25f8ad4f14
|
[] |
no_license
|
https://github.com/gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
inv_map = {v: k for k, v in my_map.items()}
|
UTF-8
|
Python
| false
| false
| 43
|
py
| 35,359
|
ch84_2019_06_04_05_16_57_763624.py
| 35,352
| 0.581395
| 0.581395
| 0
| 1
| 43
| 43
|
wulalakuaipao/api_auto
| 4,183,298,167,444
|
a642e5a85e53bb59af74007e5eaa5afb200e05bd
|
bf01461b7608588124b6cb17d158b3074ee75e74
|
/week_8/class_0304/learn_suite.py
|
bb497669d6f3075de8dc9bb44c77c6033aca966d
|
[] |
no_license
|
https://github.com/wulalakuaipao/api_auto
|
b66ac4c2f106d0af04f3ff441fbf76643ae91308
|
218a19da10df4095f3e80cdbee39b7d723dafd18
|
refs/heads/master
| 2023-03-24T13:24:10.931032
| 2020-07-12T07:04:28
| 2020-07-12T07:04:28
| 279,010,891
| 1
| 0
| null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2019/3/1 21:08
# @Author : lemon_huahua
# @Email : 204893985@qq.com
# @File : learn_suite.py
import unittest
import HTMLTestRunnerNew #生成一个非常漂亮的HTML模板的模块
from week_8.class_0304 import class_unittest_learn
# from week_8.class_0304.class_unittest_learn import * #具体到类名
#
# #添加用例
suite=unittest.TestSuite()#测试套件---收集/存储用例
#方法一:
# suite.addTest(class_unittest_learn.TestAdd(1,1,2,'test_001'))#测试用例的实例
# suite.addTest(TestAdd('test_002'))#测试用例的实例
# suite.addTest(TestAdd('test_003'))#测试用例的实例
# suite.addTest(TestAdd('test_004'))#测试用例的实例
# # suite.addTest(TestSub('test_two_negative'))#测试用例的实例
# # suite.addTest(TestSub('test_two_zero'))#测试用例的实例
#LOADER专门来加载用例 两种方式 ddt装饰的用例 只能用loader方式
loader=unittest.TestLoader()#
#方法二:通过测试类来进行添加--
suite.addTest(loader.loadTestsFromTestCase(class_unittest_learn.TestAdd))
# suite.addTest(loader.loadTestsFromTestCase(TestSub))
#方法三:通过测试模块来进行添加
# suite.addTest(loader.loadTestsFromModule(class_unittest_learn))
##执行用例 TestTextRunner---也有一些知识点
with open('test_0304.html','wb') as file:
# runner=unittest.TextTestRunner(stream=file,verbosity=2)#老版本的
runner=HTMLTestRunnerNew.HTMLTestRunner(stream=file,
verbosity=2,
title='20200304测试报告_py14',
description='2020年的第一场报告',
tester='乌拉拉')
runner.run(suite)#执行测试套件里面的用例
#. 一条用例测试通过
#E 一条用例报错 error
#F 一条用例测试未通过 fail 期望!=实际
# file=open('test.log','w')
# try:
# print(a)
# except Exception as e:
# file.write(str(e))#strw w+ r+ a a+ byte二进制 wb wb+ ab+ ab rb+
# print('错误是:{}'.format(e))
# raise e
|
UTF-8
|
Python
| false
| false
| 2,136
|
py
| 105
|
learn_suite.py
| 99
| 0.648727
| 0.611111
| 0
| 55
| 30.436364
| 73
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.