max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
Source/YBio.py
|
YuriShporhun/YBio
| 0
|
12782751
|
<reponame>YuriShporhun/YBio
from YSeq import YSeq
from YLoader import YLoader
from YDNA import YDNA
from YRNA import YRNA
class YSeqFunc:
def __init__(self):
pass
@staticmethod
def hamming_distance(seq_one, seq_two):
"""
En: A Static method which calculates the Hamming distance
between two sequences for
"""
distance = 0
for i in range(0, len(seq_one)):
if seq_one[i] != seq_two[i]:
distance += 1
return distance
@staticmethod
def transition_transversion_ratio(seq_one, seq_two):
"""
En: A Static method which calculates the Transiton and
Transversion ratio
"""
transitions = 0
transversions = 0
for i in range(0, len(seq_two)):
if seq_one[i] != seq_two[i]:
if (seq_one[i] == 'A' and seq_two[i] == 'G') or \
(seq_one[i] == 'C' and seq_two[i] == 'T') or \
(seq_one[i] == 'G' and seq_two[i] == 'A') or \
(seq_one[i] == 'T' and seq_two[i] == 'C'):
transitions += 1
else:
transversions += 1
return transitions / transversions
class _YServiceMatrix:
_matrix = []
_cols = 0
_rows = 0
def __init__(self, dna_sequences):
self._matrix = dna_sequences[:]
self._cols = self.__normalize()
self._rows = len(self._matrix)
def __normalize(self):
max_size = 0
for seq in range(len(self._matrix)):
if len(self._matrix[seq]) > max_size:
max_size = len(self._matrix[seq])
for seq in range(len(self._matrix)):
if len(self._matrix[seq]) < max_size:
self._matrix[seq] += ('_' * (max_size - len(self._matrix[seq])))
return max_size
def _transpose(self):
self._matrix = [[self._matrix[j][i] for j in range(len(self._matrix))] for i in range(len(self._matrix[0]))]
self._cols, self._rows = self._rows, self._cols
def append(self, sequence):
self._matrix.append(sequence)
self._cols = self.__normalize()
self._rows += 1
def get_col_count(self):
return self._cols
def get_row_count(self):
return self._rows
def get_item(self, row, col):
return self._matrix[row][col]
class YMatrix(_YServiceMatrix):
def __init__(self, dna_sequences):
super.__init__(dna_sequences)
def __repr__(self):
result = ''
for item in self._matrix:
result += str(item) + '\n'
return result
def profile(self):
profile = YMatrix([])
for col in range(self._cols):
temp_dna = YDNA([])
for row in range(self._rows):
temp_dna.Append(self._matrix[row][col])
profile.Append(temp_dna.Count())
profile._transpose()
return profile
def save_profile(self, filename, designations = False):
indexes = {
0: 'A',
1: 'C',
2: 'G',
3: 'T'
}
profile = self.Profile()
profile._transpose()
sign_flag = True
with open(filename, 'w') as file:
for i in range(profile.GetColCount()):
if designations and sign_flag:
file.write(indexes[i] + ': ')
for j in range(profile.GetRowCount()):
file.write(str(profile.GetItem(j, i)) + ' ')
file.write('\n')
def save_consensus(self, filename):
consensus = self.Consensus()
consensus.Save(filename)
@staticmethod
def consensus(profile):
consensus = YDNA([])
indexes = {
0: 'A',
1: 'C',
2: 'G',
3: 'T'
}
for col in range(profile.GetColCount()):
max_index = 0
max_count = 0
for row in range(profile.GetRowCount()):
if int(profile.GetItem(row, col)) > max_count:
max_count = int(profile.GetItem(row, col))
max_index = row
consensus.Append(indexes[max_index])
return consensus
| 2.953125
| 3
|
src/python/pyllars/cppparser/generation/clang/tranlation_unit.py
|
nak/pyllars
| 2
|
12782752
|
from pyllars.cppparser.parser.clang_translator import NodeType
from .generator import Generator
class TranslationUnitDeclGenerator(Generator):
def generate(self):
pass
| 1.359375
| 1
|
evaluation/interpolate_pc_codes.py
|
eduarddohr/pc2pix
| 12
|
12782753
|
<gh_stars>10-100
'''Render point clouds from test dataset using pc2pix
python3 interpolate_pc_codes.py --ptcloud_ae_weights=../model_weights/ptcloud/chair-pt-cloud-stacked-ae-chamfer-5-ae-weights-32.h5 -p=32 -k=5 --generator=../model_weights/pc2pix/chair-gen-color.h5 --discriminator=../model_weights/pc2pix/chair-dis-color.h5
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
import tensorflow as tf
import numpy as np
import argparse
import sys
sys.path.append("..")
sys.path.append("../lib")
sys.path.append("../external")
from pc2pix import PC2Pix
from ptcloud_stacked_ae import PtCloudStackedAE
from general_utils import plot_3d_point_cloud
from shapenet import get_split
from in_out import load_ply
from loader import read_view_angle
from general_utils import plot_3d_point_cloud, plot_image, plot_images
import os
import datetime
from PIL import Image
import scipy.misc
#sys.path.append("evaluation")
from utils import get_ply, plot_images
def render_by_pc2pix(pc_code, pc2pix, elev=10., azim=240.):
elev += 40.
azim += 180.
elev_code = np.array([elev / 80.])
azim_code = np.array([azim / 360.])
noise = np.random.uniform(-1.0, 1.0, size=[1, 128])
fake_image = pc2pix.generator.predict([noise, pc_code, elev_code, azim_code])
fake_image *= 0.5
fake_image += 0.5
fake_image = fake_image[0]
return fake_image
def norm_angle(angle):
angle *= 0.5
angle += 0.5
return angle
def norm_pc(pc):
pc = pc / 0.5
return pc
PLY_PATH = "../data/shape_net_core_uniform_samples_2048"
PC_CODES_PATH = "pc_codes"
PLOTS_PATH = "plots3d"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
help_ = "Load generator model trained weights"
parser.add_argument("-g", "--generator", default=None, help=help_)
help_ = "Load discriminator model trained weights"
parser.add_argument("-d", "--discriminator", default=None, help=help_)
help_ = "Load h5 ptcloud_ae model trained ae weights"
parser.add_argument("-w", "--ptcloud_ae_weights", help=help_)
help_ = "Shapnet category or class (chair, airplane, etc)"
parser.add_argument("-a", "--category", default='chair', help=help_)
help_ = "Split file"
parser.add_argument("-s", "--split_file", default='data/chair_exp.json', help=help_)
help_ = "PLY files folder"
parser.add_argument("--ply", default=PLY_PATH, help=help_)
help_ = "pc codes folder"
parser.add_argument("--pc_codes", default=PC_CODES_PATH, help=help_)
help_ = "Point cloud code dim"
parser.add_argument("-p", "--pc_code_dim", default=32, type=int, help=help_)
help_ = "Kernel size"
parser.add_argument("-k", "--kernel_size", default=1, type=int, help=help_)
args = parser.parse_args()
batch_size = 32
pc_code_dim = args.pc_code_dim
category = args.category
ptcloud_ae = PtCloudStackedAE(latent_dim=args.pc_code_dim,
kernel_size=args.kernel_size,
category=category,
evaluate=True)
ptcloud_ae.stop_sources()
exit(0)
if args.ptcloud_ae_weights:
print("Loading point cloud ae weights: ", args.ptcloud_ae_weights)
ptcloud_ae.use_emd = False
ptcloud_ae.ae.load_weights(args.ptcloud_ae_weights)
else:
print("Trained point cloud ae required to pc2pix")
exit(0)
pc2pix = PC2Pix(ptcloud_ae=ptcloud_ae, gw=args.generator, dw=args.discriminator, pc_code_dim=args.pc_code_dim, batch_size=batch_size, category=category)
js = get_ply(args.split_file)
datasets = ('test')
start_time = datetime.datetime.now()
os.makedirs(PLOTS_PATH, exist_ok=True)
t = 0
interpolate = False
for key in js.keys():
# key eg 03001627
data = js[key]
tags = data['test']
ply_path_main = os.path.join(args.ply, key)
tagslen = len(tags)
n_interpolate = 10
if not interpolate:
n_interpolate = 2
for i in range(tagslen - 1):
n = 0
tag = tags[i]
images = []
pc_codes = []
ply_file = os.path.join(ply_path_main, tag + ".ply")
pc = load_ply(ply_file)
target_path = os.path.join(PLOTS_PATH, tag + "_" + str(n) + ".png")
n += 1
fig = plot_3d_point_cloud(pc[:, 0],
pc[:, 1],
pc[:, 2],
show=False,
azim=320,
colorize='rainbow',
filename=target_path)
image = np.array(Image.open(target_path)) / 255.0
images.append(image)
pc = norm_pc(pc)
shape = pc.shape
pc = np.reshape(pc, [-1, shape[0], shape[1]])
pc_code1 = ptcloud_ae.encoder.predict(pc)
pc_codes.append(pc_code1)
tag = tags[i+1]
ply_file = os.path.join(ply_path_main, tag + ".ply")
pc = load_ply(ply_file)
target_path = os.path.join(PLOTS_PATH, tag + "_" + str(n_interpolate + 1) + ".png")
fig = plot_3d_point_cloud(pc[:, 0],
pc[:, 1],
pc[:, 2],
azim=320,
show=False,
colorize='rainbow',
filename=target_path)
image_end = np.array(Image.open(target_path)) / 255.0
pc = norm_pc(pc)
shape = pc.shape
pc = np.reshape(pc, [-1, shape[0], shape[1]])
pc_code2 = ptcloud_ae.encoder.predict(pc)
shape = pc_code1.shape
if interpolate:
for i in range(n_interpolate):
#pc_code = []
delta = (pc_code2 - pc_code1)/(n_interpolate + 1)
delta *= (i + 1)
pc_code = pc_code1 + delta
pc_codes.append(pc_code)
pc = ptcloud_ae.decoder.predict(pc_code)
pc *= 0.5
target_path = os.path.join(PLOTS_PATH, tag + "_" + str(n) + ".png")
n += 1
fig = plot_3d_point_cloud(pc[0][:, 0],
pc[0][:, 1],
pc[0][:, 2],
show=False,
azim=320,
colorize='rainbow',
filename=target_path)
image = np.array(Image.open(target_path)) / 255.0
images.append(image)
#print("pc_code shape:", pc_code.shape)
#print(pc_code)
images.append(image_end)
pc_codes.append(pc_code2)
else:
tag = tags[i+2]
ply_file = os.path.join(ply_path_main, tag + ".ply")
pc = load_ply(ply_file)
target_path = os.path.join(PLOTS_PATH, tag + "_" + str(1) + ".png")
fig = plot_3d_point_cloud(pc[:, 0],
pc[:, 1],
pc[:, 2],
show=False,
azim=320,
colorize='rainbow',
filename=target_path)
image = np.array(Image.open(target_path)) / 255.0
images.append(image)
pc = norm_pc(pc)
shape = pc.shape
pc = np.reshape(pc, [-1, shape[0], shape[1]])
pc_code = ptcloud_ae.encoder.predict(pc)
pc_codes.append(pc_code)
images.append(image_end)
pc_codes.append(pc_code2)
pc_code = pc_code1 - pc_code + pc_code2
pc_codes.append(pc_code)
pc = ptcloud_ae.decoder.predict(pc_code)
pc *= 0.5
target_path = os.path.join(PLOTS_PATH, tag + "_" + str(3) + ".png")
n += 1
fig = plot_3d_point_cloud(pc[0][:, 0],
pc[0][:, 1],
pc[0][:, 2],
show=False,
azim=320,
colorize='rainbow',
filename=target_path)
image = np.array(Image.open(target_path)) / 255.0
images.append(image)
for pc_code in pc_codes:
# default of plot_3d_point_cloud is azim=240 which is -120
# or 60 = 180 - 120
image = render_by_pc2pix(pc_code, pc2pix, azim=(320-360))
images.append(image)
print(len(images))
plot_images(2, n_interpolate + 2, images, tag + ".png", dir_name="point_clouds")
t += 1
if t >= len(tags):
del pc2pix
del ptcloud_ae
exit(0)
#exit(0)
| 2.140625
| 2
|
webservice/funcs/ask_question.py
|
jordsti/hacker-jeopardy
| 6
|
12782754
|
<gh_stars>1-10
from ..service_func import service_func, func_error, meta_arg
import random
class ask_question(service_func):
def __init__(self):
service_func.__init__(self, '/question/ask')
self.name = "Ask Question"
self.description = "Ask a question to a team, with a category id and a rank"
self.question = None
self.points = 0
self.args.append(meta_arg("key", "Protection Key", "none"))
self.args.append(meta_arg("category", "Category Id", "none"))
self.args.append(meta_arg("rank", "Rank Id", "none"))
self.args.append(meta_arg("team", "Team Id", "none"))
def init(self):
self.question = None
self.points = 0
def execute(self, args, server):
key = args["key"]
category = int(args["category"])
rank = int(args["rank"])
team = int(args["team"])
if server.key == key:
cat = server.game_data.get_category(category)
if cat is not None:
if cat.ranks_available[rank]:
if server.game_data.current_question is None:
pool = []
for q in cat.questions:
if q.rank == rank and not q.asked:
pool.append(q)
if len(pool) > 0:
q_i = random.randint(0, len(pool)-1)
self.question = pool[q_i]
self.question.asked = True
self.points = server.game_data.points_table.points[self.question.rank]
server.game_data.ask_question(self.question, team)
cat.ranks_available[rank] = False
else:
raise func_error("No more question in this category with this rank")
else:
raise func_error("A question is already asked and waiting for an answer")
else:
raise func_error("You can't ask anymore question with this rank and category")
else:
raise func_error("Invalid category")
else:
raise func_error("Invalid key")
def answer(self):
data = {'question': {
'id': self.question.id,
'question': self.question.question,
'answer': self.question.answer,
'rank': self.question.rank,
'points': self.points
}}
return data
| 2.78125
| 3
|
util/level_set/ls_util/interactive_ls.py
|
margaritiko/UGIR
| 46
|
12782755
|
<gh_stars>10-100
import time
import os
import GeodisTK
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage.filters as filters
from PIL import Image
from scipy import ndimage
from skimage import measure
from mpl_toolkits.mplot3d import Axes3D
from level_set.ls_util.drlse_reion import *
def show_leve_set(fig, phi):
ax1 = fig.add_subplot(111, projection='3d')
y, x = phi.shape
x = np.arange(0, x, 1)
y = np.arange(0, y, 1)
X, Y = np.meshgrid(x, y)
ax1.plot_surface(X, Y, phi, rstride=2, cstride=2, color='r', linewidth=0, alpha=0.6, antialiased=True)
ax1.contour(X, Y, phi, 0, colors='g', linewidths=2)
def show_image_and_segmentation(fig, img, contours, seeds = None):
ax2 = fig.add_subplot(111)
ax2.imshow(img, interpolation='nearest', cmap=plt.cm.gray)
for n, contour in enumerate(contours):
ax2.plot(contour[:, 1], contour[:, 0], linewidth=2, color='green')
if(seeds is not None):
h_idx, w_idx = np.where(seeds[0] > 0)
ax2.plot(w_idx, h_idx, linewidth=2, color='red')
h_idx, w_idx = np.where(seeds[1] > 0)
ax2.plot(w_idx, h_idx, linewidth=2, color='blue')
ax2.axis('off')
def get_distance_based_likelihood(img, seed, D):
if(seed.sum() > 0):
geoD = GeodisTK.geodesic2d_raster_scan(img, seed, 0.1, 2)
geoD[geoD > D] = D
else:
geoD = np.ones_like(img)*D
geoD = np.exp(-geoD)
return geoD
def interactive_level_set(img, seg, seed_f, seed_b, param, display = True, intensity = False):
"""
Refine an initial segmentation with interaction based level set
Params:
img: a 2D image array
sed: a 2D image array representing the intial binary segmentation
seed_f: a binary array representing the existence of foreground scribbles
seed_b: a binary array representing the existence of background scribbles
display: a bool value, whether display the segmentation result
intensity: a bool value, whether define the region term based on intensity
"""
img = np.asarray(img, np.float32)
img = (img - img.mean())/img.std()
seg = np.asarray(seg, np.float32)
Df = get_distance_based_likelihood(img, seed_f, 4)
Db = get_distance_based_likelihood(img, seed_b, 4)
Pfexp = np.exp(Df); Pbexp = np.exp(Db)
Pf = Pfexp / (Pfexp + Pbexp)
# if(display):
# plt.subplot(1,3,1)
# plt.imshow(Df)
# plt.subplot(1,3,2)
# plt.imshow(Db)
# plt.subplot(1,3,3)
# plt.imshow(Pf)
# plt.show()
[H, D] = img.shape
zoom = [64.0/H, 64.0/D]
img_d = ndimage.interpolation.zoom(img, zoom)
seg_d = ndimage.interpolation.zoom(seg, zoom)
Pf_d = ndimage.interpolation.zoom(Pf, zoom)
if(intensity is True):
print("use intensity")
ls_img = img_d
else:
print("use segmentation")
ls_img = seg_d
# parameters
timestep = 1 # time step
iter_inner = 50
iter_outer_max = 10
mu = param['mu']/timestep # coefficient of the distance regularization term R(phi)
lmda = param['lambda'] # coefficient of the weighted length term L(phi)
alfa = param['alpha'] # coefficient of the weighted area term A(phi)
beta = param['beta'] # coefficient for user interactin term
epsilon = 1.5 # parameter that specifies the width of the DiracDelta function
# initialize LSF as binary step function
# the level set has positive value inside the contour and negative value outside
# this is opposite to DRLSE
c0 = 20
initialLSF = -c0 * np.ones(seg_d.shape)
initialLSF[seg_d > 0.5] = c0
phi = initialLSF.copy()
t0 = time.time()
# start level set evolution
seg_size0 = np.asarray(phi > 0).sum()
for n in range(iter_outer_max):
phi = drlse_region_interaction(phi, ls_img, Pf_d, lmda, mu, alfa, beta, epsilon, timestep, iter_inner, 'double-well')
seg_size = np.asarray(phi > 0).sum()
ratio = (seg_size - seg_size0)/float(seg_size0)
if(abs(ratio) < 1e-3):
print('iteration', n*iter_inner, ratio)
break
else:
seg_size0 = seg_size
runtime = time.time() - t0
print('iteration', (n + 1)*iter_inner)
print('running time', runtime)
finalLSF = phi.copy()
finalLSF = ndimage.interpolation.zoom(finalLSF, [1.0/item for item in zoom])
if(display):
plt.ion()
fig1 = plt.figure(1)
fig2 = plt.figure(2)
fig3 = plt.figure(3)
fig1.clf()
init_contours = measure.find_contours(seg, 0.5)
show_image_and_segmentation(fig1, img, init_contours, [seed_f, seed_b])
fig1.suptitle("(a) Initial Segmentation")
# fig1.savefig("init_seg.png")
fig2.clf()
final_contours = measure.find_contours(finalLSF, 0)
show_image_and_segmentation(fig2, img, final_contours)
fig2.suptitle("(b) Refined Result")
# fig2.savefig("refine_seg.png")
fig3.clf()
show_leve_set(fig3, finalLSF)
fig3.suptitle("(c) Final Level Set Function")
# fig3.savefig("levelset_func.png")
plt.pause(10)
plt.show()
return finalLSF > 0, runtime
| 1.992188
| 2
|
tests/asp/gringo/simplify.002.test.py
|
bernardocuteri/wasp
| 19
|
12782756
|
input = """
a :- b, not a.
"""
output = """
{}
"""
| 1.65625
| 2
|
msg/__init__.py
|
trym-inc/django-msg
| 7
|
12782757
|
default_app_config = 'msg.apps.MsgConfig'
| 1.210938
| 1
|
skyfield/projections.py
|
dieli/python-skyfield
| 0
|
12782758
|
<reponame>dieli/python-skyfield
from numpy import sqrt
from .functions import length_of
def _derive_stereographic():
"""Compute the formulae to cut-and-paste into the routine below."""
from sympy import symbols, atan2, acos, rot_axis1, rot_axis3, Matrix
x_c, y_c, z_c, x, y, z = symbols('x_c y_c z_c x y z')
# The angles we'll need to rotate through.
around_z = atan2(x_c, y_c)
around_x = acos(-z_c)
# Apply rotations to produce an "o" = output vector.
v = Matrix([x, y, z])
xo, yo, zo = rot_axis1(around_x) * rot_axis3(-around_z) * v
# Which we then use the stereographic projection to produce the
# final "p" = plotting coordinates.
xp = xo / (1 - zo)
yp = yo / (1 - zo)
return xp, yp
def build_stereographic_projection(center):
"""Compute *x* and *y* coordinates at which to plot the positions."""
# TODO: Computing the center should really be done using
# optimization, as in:
# https://math.stackexchange.com/questions/409217/
p = center.position.au
u = p / length_of(p)
c = u.mean(axis=1)
c = c / length_of(c)
x_c, y_c, z_c = c
def project(position):
p = position.position.au
u = p / length_of(p)
x, y, z = u
x_out = (x*y_c/sqrt(x_c**2 + y_c**2) - x_c*y/sqrt(x_c**2 + y_c**2))/(x*x_c*sqrt(-z_c**2 + 1)/sqrt(x_c**2 + y_c**2) + y*y_c*sqrt(-z_c**2 + 1)/sqrt(x_c**2 + y_c**2) + z*z_c + 1)
y_out = (-x*x_c*z_c/sqrt(x_c**2 + y_c**2) - y*y_c*z_c/sqrt(x_c**2 + y_c**2) + z*sqrt(-z_c**2 + 1))/(x*x_c*sqrt(-z_c**2 + 1)/sqrt(x_c**2 + y_c**2) + y*y_c*sqrt(-z_c**2 + 1)/sqrt(x_c**2 + y_c**2) + z*z_c + 1)
return x_out, y_out
return project
| 3.46875
| 3
|
raynet/common/camera.py
|
paschalidoud/raynet
| 76
|
12782759
|
import numpy as np
class Camera(object):
"""Camera is a simple finite pinhole camera defined by the matrices K, R
and t.
see "Multiple View Geometry in Computer Vision" by <NAME> and <NAME> for notation.
Parameters
----------
K: The 3x3 intrinsic camera parameters
R: The 3x3 rotation matrix from world to camera coordinates
t: The 3x1 translation vector for the camera center in camera coordinates
(so that the camera center is the origin in the camera coordinates)
"""
def __init__(self, K, R, t):
# Make sure the input data have the right shape
assert K.shape == (3, 3)
assert R.shape == (3, 3)
assert t.shape == (3, 1)
self._K = K
self._R = R
self._t = t
self._P = None
self._P_pinv = None
self._center = None
@property
def K(self):
return self._K
@property
def R(self):
return self._R
@property
def t(self):
return self._t
@property
def center(self):
# Compute the center of the camera in homogenous coordinates and return
# it as a 4x1 vector
if self._center is None:
self._center = np.vstack(
[(-np.linalg.inv(self.R)).dot(self.t), [1]]
).astype(np.float32)
assert self._center.shape == (4, 1)
return self._center
@property
def P(self):
# Compute and return a 3x4 projection matrix
if self._P is None:
self._P = self._K.dot(np.hstack([self._R, self._t]))
return self._P
@property
def P_pinv(self):
if self._P_pinv is None:
self._P_pinv = np.linalg.pinv(self.P)
return self._P_pinv
| 3.6875
| 4
|
Examples/Suppliments/hello_unicode.py
|
Sharmila8/intropython2016
| 0
|
12782760
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
hello = 'Hello '
world = u'世界'
print hello + world
print u"It was nice weather today: it reached 80\u00B0"
print u"Maybe it will reach 90\N{degree sign}"
print u"It is extremely rare for it ever to reach 100° in Seattle"
| 2.421875
| 2
|
PyEngine3D/Utilities/Config.py
|
ubuntunux/PyEngine3D
| 121
|
12782761
|
<reponame>ubuntunux/PyEngine3D<gh_stars>100-1000
import os
import configparser
import traceback
from . import Logger
# util class
class Empty:
pass
def evaluation(value):
# find value type
try:
evalValue = eval(value)
if type(evalValue) in [int, float, list, tuple, dict]:
return evalValue
except:
return value
def getValue(config, section, option, default_value=None):
return evaluation(config[section][option]) if config.has_option(section, option) else default_value
def setValue(config, section, option, value):
if not config.has_section(section):
config.add_section(section)
config.set(section, option, value)
# ------------------------------ #
# CLASS : Configure
# Usage :
# config = Configure()
# # get value example, section:Screen, option:wdith
# print(config.Screen.width)
# ------------------------------ #
class Config:
def __init__(self, configFilename, log_level=Logger.WARN, prevent_lowercase=True):
self.log_level = log_level
self.isChanged = False
self.filename = configFilename
self.config = configparser.ConfigParser()
self.config.read(configFilename)
# prevent the key value being lowercase
if prevent_lowercase:
self.config.optionxform = lambda option_name: option_name
if self.log_level <= Logger.INFO:
print("Load Config : %s" % self.filename)
# set sections
for section in self.config.sections():
if self.log_level == Logger.DEBUG:
print("[%s]" % section)
if not hasattr(self, section):
setattr(self, section, Empty())
# set value to member variables
current_section = getattr(self, section)
for option in self.config[section]:
value = self.config.get(section, option)
if self.log_level == Logger.DEBUG:
print("%s = %s" % (option, value))
setattr(current_section, option, evaluation(value))
def hasValue(self, section, option):
return self.config.has_option(section, option)
def getValue(self, section, option, default_value=None):
return evaluation(self.config[section][option]) if self.config.has_option(section, option) else default_value
def setValue(self, section, option, value):
# set value
if not self.config.has_section(section):
self.config.add_section(section)
self.config[section][option] = str(value)
# set value to member variables
if not hasattr(self, section):
setattr(self, section, Empty())
self.isChanged = True
elif not self.isChanged:
self.isChanged = value != getattr(self, section)
current_section = getattr(self, section)
setattr(current_section, option, value)
def setDefaultValue(self, section, option, value):
if not self.hasValue(section, option):
self.setValue(section, option, value)
def save(self):
if self.isChanged or not os.path.exists(self.filename):
with open(self.filename, 'w') as configfile:
self.config.write(configfile)
if self.log_level <= Logger.INFO:
print("Saved Config : " + self.filename)
self.isChanged = False
def getFilename(self):
return self.filename
if __name__ == '__main__':
import unittest
class test(unittest.TestCase):
def testConfig(self):
# load test
testConfig = Config("TestConfig.ini", debug=False)
# set value
testConfig.setValue("TestSection", "test_int", 45)
testConfig.setValue("TestSection", "test_float", 0.1)
testConfig.setValue("TestSection", "test_string", "Hello, World")
testConfig.setValue("TestSection", "test_list", [1, 2, 3])
testConfig.setValue("TestSection", "test_tuple", (4, 5, 6))
testConfig.setValue("TestSection", "test_dict", {"x":7.0, "y":8.0})
# call test
self.assertEqual(testConfig.TestSection.test_int, 45)
self.assertEqual(testConfig.TestSection.test_float, 0.1)
self.assertEqual(testConfig.TestSection.test_string, "Hello, World")
self.assertEqual(testConfig.TestSection.test_list, [1, 2, 3])
self.assertEqual(testConfig.TestSection.test_tuple, (4, 5, 6))
self.assertEqual(testConfig.TestSection.test_dict['x'], 7.0)
self.assertEqual(testConfig.TestSection.test_dict['y'], 8.0)
# set value test
testConfig.setValue("TestSection", "test_int", 99)
self.assertEqual(testConfig.TestSection.test_int, 99)
testConfig.save()
unittest.main()
| 2.4375
| 2
|
examples/load.py
|
KokaKiwi/ryaml
| 14
|
12782762
|
SRC = """
---
- - college
- -380608299.3165369
- closely: 595052867
born: false
stomach: true
expression: true
chosen: 34749965
somebody: false
- positive
- true
- false
- price
- 2018186817
- average
- young
- -1447308110
"""
import ryaml
for _ in range(1000):
ryaml.loads(SRC)
| 1.601563
| 2
|
mi/dataset/parser/parad_j_cspp.py
|
rmanoni/mi-dataset
| 1
|
12782763
|
<filename>mi/dataset/parser/parad_j_cspp.py
"""
@package mi.dataset.parser.parad_j_cspp
@file marine-integrations/mi/dataset/parser/parad_j_cspp.py
@author <NAME>
@brief Parser for the parad_j_cspp dataset driver
Release notes:
initial release
"""
__author__ = '<NAME>'
__license__ = 'Apache 2.0'
import re
import numpy
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import DataParticle
from mi.core.exceptions import RecoverableSampleException
from mi.dataset.parser.common_regexes import \
END_OF_LINE_REGEX, \
FLOAT_REGEX, \
INT_REGEX, \
MULTIPLE_TAB_REGEX
from mi.dataset.parser.cspp_base import \
CsppParser, \
Y_OR_N_REGEX, \
CsppMetadataDataParticle, \
MetadataRawDataKey, \
encode_y_or_n
# Date is in format MM/DD/YY, example 04/17/14
DATE_REGEX = r'\d{2}/\d{2}/\d{2}'
# Time is in format HH:MM:SS, example 15:22:31
TIME_REGEX = r'\d{2}:\d{2}:\d{2}'
# regex for the data record
DATA_REGEX = r'(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Profiler Timestamp
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Depth
DATA_REGEX += '(' + Y_OR_N_REGEX + ')' + MULTIPLE_TAB_REGEX # Suspect Timestamp
DATA_REGEX += '(' + DATE_REGEX + ')' + MULTIPLE_TAB_REGEX # Date
DATA_REGEX += '(' + TIME_REGEX + ')' + MULTIPLE_TAB_REGEX # Time
DATA_REGEX += '(' + INT_REGEX + ')' + END_OF_LINE_REGEX # par
# IDD states the configuration rows after the header as well as occasional malformed data rows
# can be ignored.
#
# Ignore any rows that begin with the timestamp and depth but
# do not match the data record or the header rows formats
IGNORE_REGEX = FLOAT_REGEX + MULTIPLE_TAB_REGEX # Profiler Timestamp
IGNORE_REGEX += FLOAT_REGEX + MULTIPLE_TAB_REGEX # Depth
IGNORE_REGEX += Y_OR_N_REGEX + MULTIPLE_TAB_REGEX # Suspect Timestamp
IGNORE_REGEX += r'[^\t]*' + END_OF_LINE_REGEX # any text (excluding tabs) after the Suspect Timestamp
IGNORE_MATCHER = re.compile(IGNORE_REGEX)
class DataMatchesGroupNumber(BaseEnum):
"""
An enum for group match indices for a data record chunk.
Used to access the match groups in the particle raw data
"""
PROFILER_TIMESTAMP = 1
DEPTH = 2
SUSPECT_TIMESTAMP = 3
DATE = 4
TIME = 5
PAR = 6
class DataParticleType(BaseEnum):
"""
The data particle types that a parad_j_cspp parser could generate
"""
METADATA_RECOVERED = 'parad_j_cspp_metadata_recovered'
INSTRUMENT_RECOVERED = 'parad_j_cspp_instrument_recovered'
METADATA_TELEMETERED = 'parad_j_cspp_metadata'
INSTRUMENT_TELEMETERED = 'parad_j_cspp_instrument'
class ParadJCsppParserDataParticleKey(BaseEnum):
"""
The data particle keys associated with parad_j_cspp data instrument particle parameters
"""
PROFILER_TIMESTAMP = 'profiler_timestamp'
PRESSURE_DEPTH = 'pressure_depth'
SUSPECT_TIMESTAMP = 'suspect_timestamp'
DATE_STRING = 'date_string'
TIME_STRING = 'time_string'
PAR = 'par'
class ParadJCsppMetadataDataParticle(CsppMetadataDataParticle):
"""
Base Class for building a parad_j_cspp metadata particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
results = []
try:
# Append the base metadata parsed values to the results to return
results += self._build_metadata_parsed_values()
data_match = self.raw_data[MetadataRawDataKey.DATA_MATCH]
# Set the internal timestamp
internal_timestamp_unix = numpy.float(data_match.group(
DataMatchesGroupNumber.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
except (ValueError, TypeError, IndexError) as ex:
log.warn("Exception when building parsed values")
raise RecoverableSampleException(
"Error (%s) while decoding parameters in data: [%s]"
% (ex, self.raw_data))
return results
class ParadJCsppMetadataRecoveredDataParticle(ParadJCsppMetadataDataParticle):
"""
Class for building a parad_j_cspp recovered metadata particle
"""
_data_particle_type = DataParticleType.METADATA_RECOVERED
class ParadJCsppMetadataTelemeteredDataParticle(ParadJCsppMetadataDataParticle):
"""
Class for building a parad_j_cspp telemetered metadata particle
"""
_data_particle_type = DataParticleType.METADATA_TELEMETERED
class ParadJCsppInstrumentDataParticle(DataParticle):
"""
Base Class for building a parad_j_cspp instrument data particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
results = []
try:
results.append(self._encode_value(ParadJCsppParserDataParticleKey.PROFILER_TIMESTAMP,
self.raw_data.group(DataMatchesGroupNumber.PROFILER_TIMESTAMP),
numpy.float))
results.append(self._encode_value(ParadJCsppParserDataParticleKey.PRESSURE_DEPTH,
self.raw_data.group(DataMatchesGroupNumber.DEPTH),
float))
results.append(self._encode_value(ParadJCsppParserDataParticleKey.SUSPECT_TIMESTAMP,
self.raw_data.group(DataMatchesGroupNumber.SUSPECT_TIMESTAMP),
encode_y_or_n))
results.append(self._encode_value(ParadJCsppParserDataParticleKey.DATE_STRING,
self.raw_data.group(DataMatchesGroupNumber.DATE),
str))
results.append(self._encode_value(ParadJCsppParserDataParticleKey.TIME_STRING,
self.raw_data.group(DataMatchesGroupNumber.TIME),
str))
results.append(self._encode_value(ParadJCsppParserDataParticleKey.PAR,
self.raw_data.group(DataMatchesGroupNumber.PAR),
int))
# Set the internal timestamp
internal_timestamp_unix = numpy.float(self.raw_data.group(
DataMatchesGroupNumber.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
except (ValueError, TypeError, IndexError) as ex:
log.warn("Exception when building parsed values")
raise RecoverableSampleException(
"Error (%s) while decoding parameters in data: [%s]"
% (ex, self.raw_data))
return results
class ParadJCsppInstrumentRecoveredDataParticle(ParadJCsppInstrumentDataParticle):
"""
Class for building a parad_j_cspp recovered instrument data particle
"""
_data_particle_type = DataParticleType.INSTRUMENT_RECOVERED
class ParadJCsppInstrumentTelemeteredDataParticle(ParadJCsppInstrumentDataParticle):
"""
Class for building a parad_j_cspp telemetered instrument data particle
"""
_data_particle_type = DataParticleType.INSTRUMENT_TELEMETERED
class ParadJCsppParser(CsppParser):
def __init__(self,
config,
stream_handle,
exception_callback):
"""
This method is a constructor that will instantiate an ParadJCsppParser object.
@param config The configuration for this ParadJCsppParser parser
@param stream_handle The handle to the data stream containing the parad_j_cspp data
@param exception_callback The function to call to report exceptions
"""
# Call the superclass constructor
super(ParadJCsppParser, self).__init__(config,
stream_handle,
exception_callback,
DATA_REGEX,
ignore_matcher=IGNORE_MATCHER)
| 1.796875
| 2
|
odoo-13.0/addons/website_slides_survey/models/slide_slide.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
| 0
|
12782764
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class SlidePartnerRelation(models.Model):
_inherit = 'slide.slide.partner'
user_input_ids = fields.One2many('survey.user_input', 'slide_partner_id', 'Certification attempts')
survey_quizz_passed = fields.Boolean('Certification Quizz Passed', compute='_compute_survey_quizz_passed', store=True)
@api.depends('partner_id', 'user_input_ids.quizz_passed')
def _compute_survey_quizz_passed(self):
passed_user_inputs = self.env['survey.user_input'].sudo().search([
('slide_partner_id', 'in', self.ids),
('quizz_passed', '=', True)
])
passed_slide_partners = passed_user_inputs.mapped('slide_partner_id')
for record in self:
record.survey_quizz_passed = record in passed_slide_partners
@api.model_create_multi
def create(self, vals_list):
res = super(SlidePartnerRelation, self).create(vals_list)
completed = res.filtered('survey_quizz_passed')
if completed:
completed.write({'completed': True})
return res
def _write(self, vals):
res = super(SlidePartnerRelation, self)._write(vals)
if vals.get('survey_quizz_passed'):
self.sudo().write({'completed': True})
return res
class Slide(models.Model):
_inherit = 'slide.slide'
slide_type = fields.Selection(selection_add=[('certification', 'Certification')])
survey_id = fields.Many2one('survey.survey', 'Certification')
nbr_certification = fields.Integer("Number of Certifications", compute='_compute_slides_statistics', store=True)
_sql_constraints = [
('check_survey_id', "CHECK(slide_type != 'certification' OR survey_id IS NOT NULL)", "A slide of type 'certification' requires a certification."),
('check_certification_preview', "CHECK(slide_type != 'certification' OR is_preview = False)", "A slide of type certification cannot be previewed."),
]
@api.onchange('survey_id')
def _on_change_survey_id(self):
if self.survey_id:
self.slide_type = 'certification'
@api.model
def create(self, values):
rec = super(Slide, self).create(values)
if rec.survey_id:
rec.slide_type = 'certification'
return rec
def _generate_certification_url(self):
""" get a map of certification url for certification slide from `self`. The url will come from the survey user input:
1/ existing and not done user_input for member of the course
2/ create a new user_input for member
3/ for no member, a test user_input is created and the url is returned
Note: the slide.slides.partner should already exist
We have to generate a new invite_token to differentiate pools of attempts since the
course can be enrolled multiple times.
"""
certification_urls = {}
for slide in self.filtered(lambda slide: slide.slide_type == 'certification' and slide.survey_id):
if slide.channel_id.is_member:
user_membership_id_sudo = slide.user_membership_id.sudo()
if user_membership_id_sudo.user_input_ids:
last_user_input = next(user_input for user_input in user_membership_id_sudo.user_input_ids.sorted(
lambda user_input: user_input.create_date, reverse=True
))
certification_urls[slide.id] = last_user_input._get_survey_url()
else:
user_input = slide.survey_id.sudo()._create_answer(
partner=self.env.user.partner_id,
check_attempts=False,
**{
'slide_id': slide.id,
'slide_partner_id': user_membership_id_sudo.id
},
invite_token=self.env['survey.user_input']._generate_invite_token()
)
certification_urls[slide.id] = user_input._get_survey_url()
else:
user_input = slide.survey_id.sudo()._create_answer(
partner=self.env.user.partner_id,
check_attempts=False,
test_entry=True, **{
'slide_id': slide.id
}
)
certification_urls[slide.id] = user_input._get_survey_url()
return certification_urls
| 2.125
| 2
|
pacote-download/Exercicios/Desafio100.py
|
lucasdmazon/CursoVideo_Python
| 0
|
12782765
|
from random import randint
from time import sleep
def sorteia(lst):
print('Sorteando 5 valores da lista: ', end='')
for i in range(0, 5):
num = randint(1, 10)
lst.append(num)
print(num, end=' ')
sleep(0.5)
print('PRONTO!')
def somaPar(lst):
soma = 0
for i in lst:
if i % 2 == 0:
soma += i
print(f'Somando os valores pares de {lst}, temos {soma}')
numeros = list()
sorteia(numeros)
somaPar(numeros)
| 3.6875
| 4
|
plugins/viewcam.py
|
komoto48g/wxpj
| 0
|
12782766
|
#! python
# -*- coding: utf-8 -*-
## import time
import wx
import cv2
import numpy as np
from mwx.controls import Param, LParam
from mwx.controls import ToggleButton, Choice
from mwx.graphman import Layer, Thread
import editor as edi
class Plugin(Layer):
"""Plugins of camera viewer
"""
menu = "Cameras"
menustr = "Camera &viewer"
camerasys = property(lambda self: self.camera_selector.value)
cameraman = property(lambda self: self.parent.require(self.camerasys))
def Init(self):
self.viewer = Thread(self)
self.button = ToggleButton(self, "View camera", icon='cam',
handler=lambda v: self.viewer.Start(self.run)
if v.IsChecked() else self.viewer.Stop())
self.rate_param = LParam('rate', (100,500,100), 500, tip="refresh speed [ms] (>= 100ms)")
self.size_param = Param('size', (128,256,512,1024), 512, tip="resizing view window (<= 1k)")
self.camera_selector = Choice(self,
choices=['JeolCamera', 'RigakuCamera'], readonly=1)
self.layout((
self.button,
),
)
self.layout((
self.rate_param,
self.size_param,
self.camera_selector,
),
title="Setting",
row=1, show=0, type='vspin', lw=40, tw=40, cw=-1
)
def init_session(self, session):
self.rate_param.value = session.get('rate')
self.size_param.value = session.get('size')
self.camera_selector.value = session.get('camera')
def save_session(self, session):
session.update({
'rate': self.rate_param.value,
'size': self.size_param.value,
'camera': self.camera_selector.value,
})
def Destroy(self):
if self.viewer.is_active:
self.viewer.Stop()
return Layer.Destroy(self)
def run(self):
try:
title = self.__module__
if not self.cameraman:
print(self.message("- Camera manager is not selected."))
return
while self.viewer.is_active:
src = edi.imconv(self.cameraman.capture())
h, w = src.shape
H = self.size_param.value
W = H * w // h
dst = cv2.resize(src, (W, H), interpolation=cv2.INTER_AREA)
## dst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
## 照準サークルを xor で足し合わせる
if 1:
## lines and circles with color:cyan #00c0c0
## c = (192,192,0)
c = 255
cx, cy = W//2, H//2
buf = np.zeros((H, W), dtype=dst.dtype)
## buf = np.resize(0, (H, W)).astype(dst.dtype)
cv2.line(buf, (0, cy), (W, cy), c, 1)
cv2.line(buf, (cx, 0), (cx, H), c, 1)
cv2.circle(buf, (cx, cy), cx//2, c, 1)
cv2.circle(buf, (cx, cy), cx//4, c, 1)
dst = cv2.bitwise_xor(buf, dst)
cv2.imshow(title, dst)
cv2.waitKey(self.rate_param.value)
if cv2.getWindowProperty(title, 0) < 0:
self.button.Value = False
self.viewer.Stop()
break
finally:
cv2.destroyAllWindows()
if __name__ == '__main__':
from plugins import JeolCamera, RigakuCamera
from mwx.graphman import Frame
app = wx.App()
frm = Frame(None)
frm.load_plug(__file__, show=1)
frm.load_plug(JeolCamera, show=0)
frm.load_plug(RigakuCamera, show=0)
frm.Show()
app.MainLoop()
| 2.1875
| 2
|
stib_administraciones/personales/models.py
|
nfheredia/stib-administraciones
| 0
|
12782767
|
<reponame>nfheredia/stib-administraciones<filename>stib_administraciones/personales/models.py<gh_stars>0
from django.db import models
from ..core.models import TimeStampedModel
class Personales(TimeStampedModel):
"""
Modelo para almacenar los diferentes
tipos de personales que trabajan en un edificio.
Ej: Portero, Limpieza, Seguridad
"""
nombre = models.CharField(blank=False,
max_length=150,
null=False,
verbose_name='Tipo de Personal',
help_text='Ej: Portero, Limpieza, Seguridad',
unique=True)
comentario = models.TextField(blank=True, verbose_name='Comentario')
def __unicode__(self):
""" Muestro el nombre """
return self.nombre
class Meta:
verbose_name = 'Personal de edificios'
verbose_name_plural = 'Personal de edificios'
| 2.0625
| 2
|
migrations/versions/57beb47d38d3_init.py
|
Bloodielie/trip_counter
| 0
|
12782768
|
<filename>migrations/versions/57beb47d38d3_init.py
"""init
Revision ID: 57beb47d38d3
Revises:
Create Date: 2021-12-02 18:11:42.551720
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '57<PASSWORD>4<PASSWORD>3'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('fuels',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('price', sa.Numeric(precision=15, scale=6), nullable=True),
sa.Column('identifier', sa.String(length=150), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_fuels_identifier'), 'fuels', ['identifier'], unique=True)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('codename', sa.String(length=64), nullable=False),
sa.Column('description', sa.String(length=256), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_roles_codename'), 'roles', ['codename'], unique=True)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('telegram_id', sa.Integer(), nullable=True),
sa.Column('balance', sa.Numeric(precision=15, scale=6), nullable=True),
sa.Column('identifier', sa.String(length=250), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_identifier'), 'users', ['identifier'], unique=True)
op.create_index(op.f('ix_users_telegram_id'), 'users', ['telegram_id'], unique=True)
op.create_table('autos',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('multiplier', sa.Float(), nullable=True),
sa.Column('consumption', sa.Float(), nullable=True),
sa.Column('identifier', sa.String(length=150), nullable=True),
sa.Column('owner', sa.Integer(), nullable=True),
sa.Column('fuel', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['fuel'], ['fuels.id'], ),
sa.ForeignKeyConstraint(['owner'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_autos_fuel'), 'autos', ['fuel'], unique=False)
op.create_index(op.f('ix_autos_identifier'), 'autos', ['identifier'], unique=False)
op.create_table('invites',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('creator', sa.Integer(), nullable=True),
sa.Column('invited', sa.Integer(), nullable=True),
sa.Column('hash', sa.String(length=64), nullable=False),
sa.Column('user_identifier', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['creator'], ['users.id'], ),
sa.ForeignKeyConstraint(['invited'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user_identifier')
)
op.create_index(op.f('ix_invites_hash'), 'invites', ['hash'], unique=False)
op.create_table('transactions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
sa.Column('amount', sa.Numeric(precision=15, scale=6), nullable=True),
sa.Column('sender', sa.Integer(), nullable=True),
sa.Column('receiver', sa.Integer(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['receiver'], ['users.id'], ),
sa.ForeignKeyConstraint(['sender'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_transactions_is_active'), 'transactions', ['is_active'], unique=False)
op.create_table('users_roles',
sa.Column('user', sa.Integer(), nullable=False),
sa.Column('role', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['role'], ['roles.id'], ),
sa.ForeignKeyConstraint(['user'], ['users.id'], ),
sa.PrimaryKeyConstraint('user', 'role')
)
op.create_table('trips',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('driver', sa.Integer(), nullable=True),
sa.Column('distance', sa.Float(), nullable=True),
sa.Column('cost', sa.Numeric(precision=15, scale=6), nullable=True),
sa.Column('auto', sa.Integer(), nullable=True),
sa.Column('date', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
sa.Column('is_deleted', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['auto'], ['autos.id'], ),
sa.ForeignKeyConstraint(['driver'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_trips_auto'), 'trips', ['auto'], unique=False)
op.create_index(op.f('ix_trips_driver'), 'trips', ['driver'], unique=False)
op.create_index(op.f('ix_trips_is_deleted'), 'trips', ['is_deleted'], unique=False)
op.create_table('trip_passengers',
sa.Column('passenger', sa.Integer(), nullable=False),
sa.Column('trip', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['passenger'], ['users.id'], ),
sa.ForeignKeyConstraint(['trip'], ['trips.id'], ),
sa.PrimaryKeyConstraint('passenger', 'trip')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('trip_passengers')
op.drop_index(op.f('ix_trips_is_deleted'), table_name='trips')
op.drop_index(op.f('ix_trips_driver'), table_name='trips')
op.drop_index(op.f('ix_trips_auto'), table_name='trips')
op.drop_table('trips')
op.drop_table('users_roles')
op.drop_index(op.f('ix_transactions_is_active'), table_name='transactions')
op.drop_table('transactions')
op.drop_index(op.f('ix_invites_hash'), table_name='invites')
op.drop_table('invites')
op.drop_index(op.f('ix_autos_identifier'), table_name='autos')
op.drop_index(op.f('ix_autos_fuel'), table_name='autos')
op.drop_table('autos')
op.drop_index(op.f('ix_users_telegram_id'), table_name='users')
op.drop_index(op.f('ix_users_identifier'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_roles_codename'), table_name='roles')
op.drop_table('roles')
op.drop_index(op.f('ix_fuels_identifier'), table_name='fuels')
op.drop_table('fuels')
# ### end Alembic commands ###
| 1.789063
| 2
|
app/main/hs_api_log_forms.py
|
RRRoger/MyWebserver-flask
| 20
|
12782769
|
<filename>app/main/hs_api_log_forms.py
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField, TextAreaField, BooleanField
from wtforms.validators import DataRequired, EqualTo, Length
class HsApiLogSearch(FlaskForm):
methods = [
('url', 'URL'),
('create_uid', 'Create User'),
('is_success', 'Is Success'),
('remote_addr', 'Remote Address'),
]
method = SelectField(choices=methods, validators=[DataRequired(message=u'名称不能为空')], coerce=str)
content = StringField()
submit = SubmitField('搜索')
class HsApiLogForm(FlaskForm):
record_id = IntegerField(validators=[])
url = StringField()
remote_addr = StringField()
is_success = BooleanField()
form_body = TextAreaField()
data_body = TextAreaField()
file_body = TextAreaField()
response_body = TextAreaField()
create_date = StringField()
create_user_name = StringField()
| 2.578125
| 3
|
django_project/weatherapp/settings_dev.py
|
bbsoft0/weather
| 1
|
12782770
|
<filename>django_project/weatherapp/settings_dev.py
"""local runserver settings"""
import os
from .settings import BASE_DIR
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "<KEY>"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Static Files
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATIC_URL = "/static/"
| 2
| 2
|
psana/psana/detector/test_xx_ipython.py
|
ZhenghengLi/lcls2
| 16
|
12782771
|
def test_ipython():
print('DATA FILE IS AVAILABLE ON drp-ued-cmp001 ONLY')
#from psana.pyalgos.generic.NDArrUtils import info_ndarr
from psana import DataSource
ds = DataSource(files='/u2/pcds/pds/ued/ueddaq02/xtc/ueddaq02-r0028-s000-c000.xtc2')
run = next(ds.runs())
det = run.Detector('epixquad')
step = next(run.steps())
evt = next(step.events())
v = det.step.value(evt)
d = det.step.docstring(evt)
detsd = run.Detector('step_docstring') #Out[6]: <psana.detector.envstore.scan_raw_2_0_0 at 0x7f1a24735c10>
detsv = run.Detector('step_value') #Out[8]: <psana.detector.envstore.scan_raw_2_0_0 at 0x7f1a0b205c10>
from psana import DataSource
ds = DataSource(exp='tmoc00118', run=123, max_events=100)
run = next(ds.runs())
det = run.Detector('tmoopal')
print('run.dsparms.det_classes dict content:\n %s' % str(run.dsparms.det_classes))
run = None
evt = None
from psana import DataSource
ds = DataSource(exp='ascdaq18', run=24, max_events=100)
print('ds.xtc_files:\n ', '\n '.join(ds.xtc_files))
for irun,run in enumerate(ds.runs()):
print('\n==== %02d run: %d exp: %s detnames: %s' % (irun, run.runnum, run.expt, ','.join(run.detnames)))
det = run.Detector('epixhr')
print('det.raw._fullname :', det.raw._fullname())
for istep,step in enumerate(run.steps()):
print('\nStep %02d' % istep, type(step), end='')
for ievt,evt in enumerate(step.events()):
if ievt>10: continue #exit('exit by number of events limit %d' % args.evtmax)
print('\n Event %02d' % (ievt))
st = evt.run().step(evt)
print('XXX dir(st):', dir(st))
| 2.15625
| 2
|
validate_infrastruture.py
|
dell-ai-engineering/BigDL4CDSW
| 2
|
12782772
|
import os
import os.path
from IPython.display import display, HTML
def html_log(message,tag="H1", color="black",center=False):
if center:
display(HTML('<{tag}> <center> <font color="{color}"> {message}</font></center></{tag}>'.format(tag=tag, message=message,color=color)))
else:
display(HTML('<{tag}> <font color="{color}"> {message}</font></{tag}>'.format(tag=tag, message=message,color=color)))
def html_table(data):
display(HTML(
'<table style="border: 1px solid black" ><tr>{}</tr></table>'.format(
'</tr><tr style="border: 1px solid black">'.join(
'<td style="border: 1px solid black">{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)
)
))
html_log("Checking BigDL Environment",center=True)
def check_env(env_var):
if env_var not in os.environ:
display(HTML('<H2 <font color="red">{0} Environment variable not set </font></H2>'.format(env_var)))
return False
env_paths = os.environ.get(env_var).split(':')
if not all([ os.path.isfile(p) for p in env_paths ] ):
display(HTML("<H3> <font color=\"red\"> {0} Environment set ,but one of the paths not present</font></H2>".format(env_var)))
return False
else:
html_log("Succesfully checked for {0}".format(env_var), 'p', 'gree')
#display(HTML("<H3> <font color='green'> </font></H3>".format(env_var)))
print "{}=={}".format(env_var, os.environ.get(env_var))
return True
for bigdl_var in ['BigDL_JAR_PATH', 'PYTHONPATH']:
check_env(bigdl_var)
try:
from bigdl.util.common import *
from bigdl.nn.layer import *
import bigdl.version
except:
html_log('Unable to import BigDL Libary', 'p', 'red')
else:
html_log(" BigDL Python Library Imported",'p','gree')
#display(HTML('<H3> <font color="green"> BigDL Python Library Imported </font></H3>'))
try:
from pyspark import SparkContext
sc = SparkContext.getOrCreate(conf=create_spark_conf().setMaster("local[*]"))
except:
html_log('Unable to open get a spark context', 'p', 'red')
#display(HTML('<H3> <font color="red">Unable to open Spark context </font></H3>'))
else:
html_log('Got a spark context handle', 'p', 'gree')
#display(HTML('<H3> <font color="green">Spark Context created </font></H3>'))
html_table(sc._conf.getAll())
try:
init_engine() # prepare the bigdl environment
except:
html_log('Unable to Initialize BigDL Engine', 'p', 'red')
else:
html_log('BigDL Engine initialized , Good to go ....', 'p', 'gree')
print "BigDL Version : {} ".format(bigdl.version.__version__)
;
| 2.609375
| 3
|
Python/NeonOcean.S4.Main/NeonOcean/S4/Main/Director.py
|
NeonOcean/Main
| 1
|
12782773
|
from __future__ import annotations
import typing
import clock
import zone
from NeonOcean.S4.Main import Mods, This
from NeonOcean.S4.Main.Tools import Exceptions
from protocolbuffers import FileSerialization_pb2
from server import client as clientModule
from sims4 import service_manager
from sims4.tuning import instance_manager
_announcers = list() # type: typing.List[typing.Type[Announcer]]
class Announcer:
Host = This.Mod # type: Mods.Mod
Enabled = True # type: bool
Reliable = False # type: bool # Whether the announcer will be called if the host is disabled.
Preemptive = False # type: bool # Whether the annoucnment methods are called before or after the function they are announcing.
_priority = 0 # type: float # Higher priority announcers will run before lower priority ones.
def __init_subclass__ (cls, **kwargs):
SetupAnnouncer(cls)
@classmethod
def GetPriority (cls) -> float:
return cls._priority
@classmethod
def SetPriority (cls, value) -> None:
cls._priority = value
_SortAnnouncer()
@classmethod
def InstanceManagerOnStart (cls, instanceManager: instance_manager.InstanceManager) -> None:
pass
@classmethod
def InstanceManagerLoadDataIntoClassInstances (cls, instanceManager: instance_manager.InstanceManager) -> None:
pass
@classmethod
def InstanceManagerOnStop (cls, instanceManager: instance_manager.InstanceManager) -> None:
pass
@classmethod
def OnLoadingScreenAnimationFinished (cls, zoneReference: zone.Zone) -> None:
pass
@classmethod
def OnClientConnect (cls, clientReference: clientModule.Client) -> None:
pass
@classmethod
def OnClientDisconnect (cls, clientReference: clientModule.Client) -> None:
pass
@classmethod
def OnEnterMainMenu (cls) -> None:
pass
@classmethod
def ZoneLoad (cls, zoneReference: zone.Zone) -> None:
pass
@classmethod
def ZoneSave (cls, zoneReference: zone.Zone, saveSlotData: typing.Optional[FileSerialization_pb2.SaveSlotData] = None) -> None:
pass
@classmethod
def ZoneStartServices (cls, zoneReference: zone.Zone, gameplayZoneData: FileSerialization_pb2.GameplayData, saveSlotData: FileSerialization_pb2.SaveSlotData) -> None:
pass
@classmethod
def ZoneOnToreDown (cls, zoneReference: zone.Zone, clientReference: clientModule.Client) -> None:
pass
@classmethod
def ZoneUpdate (cls, zoneReference: zone.Zone, absoluteTicks: int) -> None:
pass
@classmethod
def ServiceManagerOnZoneLoad (cls, zoneManager: service_manager.ServiceManager) -> None:
pass
@classmethod
def ServiceManagerOnZoneUnload (cls, zoneManager: service_manager.ServiceManager) -> None:
pass
@classmethod
def GameClockTickGameClock (cls, gameClock: clock.GameClock, absoluteTicks: int) -> None:
pass
def GetAllAnnouncers () -> typing.List[typing.Type[Announcer]]:
return list(_announcers)
def SetupAnnouncer (announcer: typing.Type[Announcer]) -> None:
if not isinstance(announcer, type):
raise Exceptions.IncorrectTypeException(announcer, "announcer", (type,))
if not issubclass(announcer, Announcer):
raise Exceptions.DoesNotInheritException("announcer", (Announcer,))
if announcer in _announcers:
return
_Register(announcer)
_SortAnnouncer()
def _Register (announcer: typing.Type[Announcer]) -> None:
if not announcer in _announcers:
_announcers.append(announcer)
def _SortAnnouncer () -> None:
global _announcers
announcersCopy = _announcers.copy() # type: typing.List[typing.Type[Announcer]]
sortedAnnouncers = list()
for loopCount in range(len(announcersCopy)): # type: int
targetIndex = None # type: typing.Optional[int]
for currentIndex in range(len(announcersCopy)):
if targetIndex is None:
targetIndex = currentIndex
continue
if -announcersCopy[currentIndex].GetPriority() != -announcersCopy[targetIndex].GetPriority():
if -announcersCopy[currentIndex].GetPriority() < -announcersCopy[targetIndex].GetPriority():
targetIndex = currentIndex
continue
else:
if announcersCopy[currentIndex].__module__ < announcersCopy[targetIndex].__module__:
targetIndex = currentIndex
continue
sortedAnnouncers.append(announcersCopy[targetIndex])
announcersCopy.pop(targetIndex)
_announcers = sortedAnnouncers
| 2.09375
| 2
|
preprocess_scripts/group.py
|
ictnlp/STEMM
| 11
|
12782774
|
<gh_stars>10-100
import os
import shutil
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--lang", help="target language")
args = parser.parse_args()
splits = ['dev', 'tst-COMMON', 'tst-HE', 'train']
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
seg_path = os.path.join(root, 'data', 'mustc', f'en-{args.lang}', 'segment')
for split in splits:
split_path = os.path.join(seg_path, split)
for f in os.listdir(split_path):
if f.startswith('ted'):
speaker = f.split('_')[1]
speaker_dir = os.path.join(split_path, speaker)
os.makedirs(speaker_dir, exist_ok=True)
shutil.move(os.path.join(split_path, f), speaker_dir)
| 2.421875
| 2
|
1556_thousand_separator.py
|
claytonjwong/leetcode-py
| 1
|
12782775
|
#
# 1556. Thousand Separator
#
# Q: https://leetcode.com/problems/thousand-separator/
# A: https://leetcode.com/problems/thousand-separator/discuss/805674/Javascript-Python3-C%2B%2B-1-Liners
#
class Solution:
def thousandSeparator(self, n: int) -> str:
return str(n) if n < 1000 else self.thousandSeparator(n // 1000) + '.' + str(n % 1000).zfill(3)
| 3.203125
| 3
|
rest_models/backend/utils.py
|
matheusmatos/django-rest-models
| 61
|
12782776
|
<reponame>matheusmatos/django-rest-models<gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import logging
logger = logging.getLogger(__name__)
def message_from_response(response):
return "[%d]%s" % (
response.status_code,
response.text if '<!DOCTYPE html>' not in response.text[:30] else response.reason
)
try:
from django.contrib.postgres.fields import JSONField as JSONFieldLegacy
except ImportError:
def JSONField(*args, **kwargs):
return None
else:
class JSONField(JSONFieldLegacy):
def get_prep_value(self, value):
return value
| 2.078125
| 2
|
main.py
|
keygen-sh/example-python-activation-proof-verification
| 2
|
12782777
|
<reponame>keygen-sh/example-python-activation-proof-verification
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.backends import default_backend
from cryptography.exceptions import InvalidSignature
import base64
import sys
import os
# Cryptographically verify the activation proof using our public key
def verify_activation_proof(activation_proof):
assert activation_proof, 'activation proof is missing'
# Split Activation proof to obtain a dataset and signature, then decode
# base64url encoded values
signing_data, enc_sig = activation_proof.split('.')
signing_prefix, enc_proof = signing_data.split('/')
assert signing_prefix == 'proof', 'activation proof prefix %s is invalid' % signing_prefix
proof = base64.urlsafe_b64decode(enc_proof)
sig = base64.urlsafe_b64decode(enc_sig)
# Load the PEM formatted public key from the environment
pub_key = serialization.load_pem_public_key(
os.environ['KEYGEN_PUBLIC_KEY'].encode(),
backend=default_backend()
)
# Verify the proof
try:
pub_key.verify(
sig,
("proof/%s" % enc_proof).encode(),
padding.PKCS1v15(),
hashes.SHA256()
)
print('[INFO] Activation proof contents: %s' % proof)
return True
except (InvalidSignature, TypeError):
return False
try:
ok = verify_activation_proof(
sys.argv[1]
)
except AssertionError as e:
print('[ERROR] %s' % e)
sys.exit(1)
except Exception as e:
print('[ERROR] cryptography: %s' % e)
sys.exit(1)
if ok:
print('[OK] Activation proof is authentic!')
sys.exit(0)
else:
print('[ERROR] Activation proof is not authentic!')
sys.exit(1)
| 3.078125
| 3
|
tests/models.py
|
ckirby/django-model-ident
| 2
|
12782778
|
from django.db import models
class BaseManagerModel(models.Model):
@classmethod
def create(cls):
return cls.objects.create()
class TestManager(models.Manager):
def get_queryset(self):
return super(TestManager, self).get_queryset().none()
class RenameManagerModel(models.Model):
instances = models.Manager()
@classmethod
def create(cls):
return cls.instances.create()
class ReplaceManagerModel(models.Model):
objects = TestManager()
@classmethod
def create(cls):
return cls.objects.create()
class MultipleManagerModel(models.Model):
objects = models.Manager()
instances = TestManager()
@classmethod
def create(cls):
return cls.objects.create()
| 2.21875
| 2
|
Poisson_S1_hypersphere.py
|
zhang-liu-official/project3-pinn-test
| 0
|
12782779
|
<filename>Poisson_S1_hypersphere.py
"""Backend supported: tensorflow.compat.v1"""
import deepxde as dde
import xde as xde
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import random
from deepxde.backend import tf
## useful reference: https://en.wikipedia.org/wiki/Laplace_operator#Coordinate_expressions
## Laplacian-beltrami operator in spherical coordinates for 2-sphere:
## https://en.wikipedia.org/wiki/Laplace%E2%80%93Beltrami_operator#Examples
# note thta u (the solution) is the y here!
def pde(x, y):
## Poisson vs Laplacian: only diff is the rhs is f(x) vs 0
# (X1, X2) = (x,y) = (cos(theta), sin(theta))
X1, X2= x[:, 0], x[:,1]
X1 = tf.reshape(X1, (X1.shape[0],1))
X2 = tf.reshape(X2, (X2.shape[0],1))
dy_xx = dde.grad.hessian(y, x, i=0, j=0)
dy_yy = dde.grad.hessian(y, x, i=1, j=1)
lhs = dy_xx + dy_yy
# sin(theta)
rhs = X2
return lhs - rhs
def boundary(x, on_boundary):
## (Note that because of rounding-off errors, it is often wise to use np.isclose to test whether two floating point values are equivalent.)
return on_boundary
def solution(x):
# (X1, X2) = (x,y) = (cos(theta), sin(theta))
X1, X2= x[:, 0], x[:,1]
X1 = tf.reshape(X1, (X1.shape[0],1))
X2 = tf.reshape(X2, (X2.shape[0],1))
## if laplacian, the solution is:
# return r * np.cos(theta)
##-np.sin(theta)
return -X2
# Use [r*sin(theta), r*cos(theta)] as features,
# so that the network is automatically periodic along the theta coordinate.
# Backend tensorflow.compat.v1 or tensorflow
# def feature_transform(x):
# return tf.concat(
# [tf.sin(x[:]), tf.cos(x[:])], axis=1 ## since r = 1
# )
def main():
# geom = dde.geometry.Rectangle(xmin=[0, 0], xmax=[1, 2 * np.pi])
# unit sphere centered at (0,0,0) (radius = 1)
# geom = dde.geometry.geometry_nd.Hypersphere([0,0], radius = 1)
geom = xde.geometry.geometry_nd.Hypersphere([0,0], radius = 1)
## BC: u(0) = u(2 * pi)
bc = xde.ZeroLossBC(
geom,
lambda x: x,
boundary,
)
# bc = xde.ZeroLossBC(geom, func, boundary)
data = dde.data.PDE(
geom, pde, [bc], num_domain=400, num_boundary=0, num_test = 80, solution = solution)
## original NN parameters
net = dde.maps.FNN([2] + [500] + [1], "tanh", "Glorot uniform")
## over-parameterized
# net = dde.maps.FNN([2] + [1200]*2 + [1], "tanh", "Glorot uniform")
# net.apply_feature_transform(feature_transform)
model = dde.Model(data, net)
model.compile("adam", lr=0.001, metrics=["l2 relative error"])
losshistory, train_state = model.train(epochs=15000)
dde.saveplot(losshistory, train_state, issave=True, isplot=True)
## uniform_points not implemented for hypersphere. test data used random_points instead, following distribution defined here: https://mathworld.wolfram.com/DiskPointPicking.html
X = geom.uniform_points(1000)
# X = feature_transform(X)
y_true = solution(X)
# y_pred is PDE residual
y_pred = model.predict(X, operator = pde)
print("L2 relative error:", dde.metrics.l2_relative_error(y_true, y_pred))
y_true = y_true.reshape((y_true.shape[0],1))
y_pred = y_pred.reshape((y_pred.shape[0],1))
np.savetxt("test.dat", np.hstack((X,y_true, y_pred)))
if __name__ == "__main__":
main()
| 2.734375
| 3
|
invest/tests/test_template_tags.py
|
uktrade/invest
| 1
|
12782780
|
<filename>invest/tests/test_template_tags.py<gh_stars>1-10
from unittest.mock import call, Mock, patch
import pytest
from invest.templatetags.language_tags import change_lang_with_querystring
@pytest.mark.parametrize(
'change_lang_response,expected_response',
[
('', ''),
('foo?bar=hello', 'foo?bar=hello&lang=es'),
('foo', 'foo?lang=es')
]
)
def test_change_lang_with_querystring(change_lang_response, expected_response):
with patch(
'invest.templatetags.language_tags.change_lang'
) as mocked_change_lang:
context = Mock()
mocked_change_lang.return_value = change_lang_response
response = change_lang_with_querystring(context, 'es')
assert response == expected_response
assert mocked_change_lang.call_args == call(context, 'es')
| 2.578125
| 3
|
tensorflow/stream_executor/cl/test/test_random.py
|
salvatoretrimarchi/tf-coriander
| 0
|
12782781
|
from __future__ import print_function
import tensorflow as tf
import numpy as np
import pytest
import sys
from tensorflow.python.ops import array_ops
shapes = [
(3, 4),
(50, 70, 12)
]
seed = 123
def _test_random_func(func_name, shape):
print('func_name', func_name)
func = eval(func_name)
with tf.Graph().as_default():
with tf.device('/cpu:0'):
W_t = tf.Variable(func(shape, seed=seed))
with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
sess.run(tf.initialize_all_variables())
W_cpu = sess.run(W_t)
with tf.device('/gpu:0'):
W_t = tf.Variable(func(shape, seed=seed))
with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
sess.run(tf.initialize_all_variables())
W_gpu = sess.run(W_t)
if np.prod(np.array(shape)) < 20:
print('W_cpu', W_cpu)
print('W_gpu', W_gpu)
else:
print('W_cpu.reshape(-1)[:20]', W_cpu.reshape(-1)[:20])
print('W_gpu.reshape(-1)[:20]', W_gpu.reshape(-1)[:20])
assert np.all(np.abs(W_cpu - W_gpu) < 1e-4)
@pytest.mark.parametrize(
'shape',
shapes)
def test_random_normal(shape):
_test_random_func('tf.random_normal', shape)
@pytest.mark.parametrize(
'shape',
shapes)
def test_random_uniform(shape):
_test_random_func('tf.random_uniform', shape)
@pytest.mark.parametrize(
'shape',
shapes)
@pytest.mark.skip(reason='Causes abort currently')
def test_truncated_normal(shape):
_test_random_func('tf.truncated_normal', shape)
if __name__ == '__main__':
if len(sys.argv) == 1:
print('Please run using py.test')
else:
eval('%s((3, 4))' % sys.argv[1])
| 2.234375
| 2
|
gpycharts.py
|
diotrahenriyan/goopycharts
| 0
|
12782782
|
<reponame>diotrahenriyan/goopycharts
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Creating Graphs with Python and GooPyCharts
# Source: [datascience+](https://datascienceplus.com/creating-graphs-with-python-and-goopycharts/)
# %% [markdown]
# ## Install gpcharts library
#
# ```python
# pip install gpcharts
# ```
# %% [markdown]
# ## Our First Graph
#
# %%
from gpcharts import figure
my_plot = figure(title='Demo')
my_plot.plot([1, 2, 10, 15, 12, 23])
# %% [markdown]
# ## Creating a Bar Graph
# %%
fig3 = figure()
xVals = ['Temps','2016-03-20','2016-03-21','2016-03-25','2016-04-01']
yVals = [['Shakuras','Korhal','Aiur'],[10,30,40],[12,28,41],[15,34,38],[8,33,47]]
fig3.title = 'Weather over Days'
fig3.ylabel = 'Dates'
fig3.bar(xVals, yVals)
# %% [markdown]
# ## Creating Other Types of Graphs
# %%
my_fig = figure()
xVals = ['Dates','2016-03-20','2016-03-21','2016-03-25','2016-04-01']
yVals = [['Shakuras','Korhal','Aiur'],[10,30,40],[12,28,41],[15,34,38],[8,33,47]]
my_fig.title = 'Scatter Plot'
my_fig.ylabel = 'Temps'
my_fig.scatter(xVals, yVals)
| 3.15625
| 3
|
django_sendgrid_tracking/signals.py
|
MattFanto/django-sendgrid-tracking
| 5
|
12782783
|
from sendgrid_backend.signals import sendgrid_email_sent
from django_sendgrid_tracking.mail import create_send_email
sendgrid_email_sent.connect(create_send_email)
| 1.21875
| 1
|
nugridpy/regression_tests/ImageCompare/compare_image_entropy.py
|
NuGrid/NuGridPy
| 16
|
12782784
|
<gh_stars>10-100
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
import matplotlib.image as mpimg
import matplotlib.pylab as plb
import numpy
import sys
from scipy import stats
import glob
import os.path
import warnings
import time
def compare_entropy(name_img1,name_img2,method="rmq"):
'''Compare two images by the Kullback-Leibler divergence
Parameters
----------
name_img1 : string
filename of image 1 (png format)
name_img2 : string
filename of image 2 (png format)
Returns
-------
S : float
Kullback-Leibler divergence S = sum(pk * log(pk / qk), axis=0)
Note
----
See http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.entropy.html
'''
img1 = mpimg.imread(name_img1)
img2 = mpimg.imread(name_img2)
fimg1 = img1.flatten()
fimg2 = img2.flatten()
if method == "KL-div":
eps = 0.0001
S = stats.entropy(fimg2+eps,fimg1+eps)
S = numpy.log10(S)
elif method == "rmq":
fdiff=fimg1-fimg2
fdiff_sqr = fdiff**4
S = (fdiff_sqr.sum())**(old_div(1.,4))
return S,fimg1, fimg2
def compare_images(path = '.'):
S_limit = 10.
file_list = glob.glob(os.path.join(path, 'Abu*'))
file_list_master = glob.glob(os.path.join(path, 'MasterAbu*'))
file_list.sort()
file_list_master.sort()
S=[]
print("Identifying images with rmq > "+'%3.1f'%S_limit)
ierr_count = 0
for i in range(len(file_list)):
this_S,fimg1,fimg2 = compare_entropy(file_list[i],file_list_master[i])
if this_S > S_limit:
warnings.warn(file_list[i]+" and "+file_list_master[i]+" differ by "+'%6.3f'%this_S)
ierr_count += 1
S.append(this_S)
if ierr_count > 0:
print("Error: at least one image differs by more than S_limit")
sys.exit(1)
#print ("S: ",S)
#plb.plot(S,'o')
#plb.xlabel("image number")
#plb.ylabel("modified log KL-divergence to previous image")
#plb.show()
if __name__ == "__main__":
compare_images()
| 2.3125
| 2
|
30_days_leetcode_challenge/MinStack.py
|
Imipenem/Competitive_Prog_with_Python
| 0
|
12782785
|
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.min = []
self.stack = []
def push(self, x: int) -> None:
self.stack.insert(0, x)
if not self.min or x <= self.min[-1]: # only those <= to actual min must be taken into account, other will be popped before
# and wont ever be the minimum
self.min.append(x)
def pop(self) -> None:
if self.min[-1] == self.top():
self.min.pop(-1)
self.stack.pop(0)
def top(self) -> int:
return self.stack[0]
def getMin(self) -> int:
return self.min[-1]
if __name__ == '__main__':
minStack = MinStack()
minStack.push(-2)
minStack.push(0)
minStack.push(-3)
print(minStack.getMin()) # --> Returns -3.
minStack.pop()
print(minStack.top()) # --> Returns 0.
print(minStack.getMin()) # --> Returns -2.
| 4.03125
| 4
|
contests/kickstart-2021H/p1.py
|
forewing/lc
| 0
|
12782786
|
def solve(s, f):
dist = [100] * 26
avaliable = set(map(lambda c: ord(c) - ord('a'), f))
for i in range(26):
if i in avaliable:
dist[i] = 0
else:
for a in avaliable:
dist[i] = min(dist[i], abs(a-i), 26-abs(a-i))
ans = 0
for c in s:
ans += dist[ord(c) - ord('a')]
return ans
if __name__ == "__main__":
T = int(input())
for i in range(T):
s = input()
f = input()
print(f"Case #{i+1}: {solve(s, f)}")
| 2.921875
| 3
|
exercises/en/exc_08_04a.py
|
Lavendulaa/programming-in-python-for-data-science
| 1
|
12782787
|
<reponame>Lavendulaa/programming-in-python-for-data-science
import numpy as np
# Create 2 lists containing any the same number of elements
# Save each as objects named a_list and b_list
____
____
# Using boolean operators, what is outputted when you test to see if they are equal?
____
| 3.90625
| 4
|
getDailyBhav.py
|
krthkj/pythonDumps
| 0
|
12782788
|
#!/usr/bin/python
import zipfile # read, test zipfile
import os, errno # delete file and handle error
import urllib2 # download file
import datetime # date and time object
# deletes the file
def silentRemove(filename):
try:
os.remove(filename)
print filename + " removed"
except OSError as e:
if e.errno != errno.ENOENT:
raise e
return
# checks the zip file
def extractZip(bhavZipFile):
if os.path.exists(bhavZipFile):
try:
fileHandler = zipfile.ZipFile(bhavZipFile,'r')
if fileHandler.testzip():
raise zipfile.BadZipfile
for files in fileHandler.namelist():
fileHandler.extract(files)
fileHandler.close()
except(zipfile.LargeZipFile, zipfile.BadZipfile) as e:
print(e)
finally:
silentRemove(bhavZipFile)
return
# Download the zip file
def getBhav(bhavDate):
downloadUrl = "http://www.bseindia.com/download/BhavCopy/Equity/eq"+bhavDate+"_csv.zip"
bhavZipFile="eq"+bhavDate+"_csv.zip"
output = open(bhavZipFile,'wb')
try:
output.write(urllib2.urlopen(downloadUrl).read())
print bhavZipFile+" download success"
except (urllib2.URLError, urllib2.HTTPError,ValueError) as e:
print(e)
print bhavZipFile+" download failed"
finally:
output.close()
return bhavZipFile
# Generate today's date ddmmyy
def bhavDate ():
return datetime.datetime.today().strftime("%d%m%y")
# Get History till date
# helps in setup of database
def bhavHistory(noOfDays):
today = datetime.datetime.today()
itr=1
while itr != noOfDays:
val = (today - datetime.timedelta(days=itr)).strftime("%d%m%y")
extractZip(getBhav (val))
itr += 1
return
try:
bhavHistory(5)
extractZip(getBhav(bhavDate()))
except KeyboardInterrupt ,e :
print e
| 3.265625
| 3
|
aquascope/data_processing/export.py
|
MicroscopeIT/aquascope_backend
| 0
|
12782789
|
import os
import tempfile
from aquascope.webserver.data_access.conversions import list_of_item_dicts_to_tsv
from aquascope.webserver.data_access.storage.export import upload_export_file
def export_items(items, storage_client):
with tempfile.TemporaryDirectory() as tmpdirname:
local_filepath = os.path.join(tmpdirname, 'features.tsv')
list_of_item_dicts_to_tsv(items, local_filepath)
return upload_export_file(storage_client, local_filepath)
| 2.28125
| 2
|
retiolum/scripts/adv_graphgen/tinc_graphs/BackwardsReader.py
|
makefu/painload
| 9
|
12782790
|
import sys
import os
import string
class BackwardsReader:
""" Stripped and stolen from : http://code.activestate.com/recipes/120686-read-a-text-file-backwards/ """
def readline(self):
while len(self.data) == 1 and ((self.blkcount * self.blksize) < self.size):
self.blkcount = self.blkcount + 1
line = self.data[0]
try:
self.f.seek(-self.blksize * self.blkcount, 2)
self.data = string.split(self.f.read(self.blksize) + line, '\n')
except IOError:
self.f.seek(0)
self.data = string.split(self.f.read(self.size - (self.blksize * (self.blkcount-1))) + line, '\n')
if len(self.data) == 0:
return ""
line = self.data[-1]
self.data = self.data[:-1]
return line + '\n'
def __init__(self, file, blksize=4096):
"""initialize the internal structures"""
self.size = os.stat(file)[6]
self.blksize = blksize
self.blkcount = 1
self.f = open(file, 'rb')
if self.size > self.blksize:
self.f.seek(-self.blksize * self.blkcount, 2)
self.data = string.split(self.f.read(self.blksize), '\n')
if not self.data[-1]:
self.data = self.data[:-1]
| 2.921875
| 3
|
Desafios/desafio045.py
|
LucasHenrique-dev/Exercicios-Python
| 1
|
12782791
|
from random import choice
from time import sleep
print('Vamos jogar \033[32mJokenpô\033[m')
escolhas = ['pedra', 'papel', 'tesoura']
computador = choice(escolhas)
jogador = str(input('Já escolhi a minha opção, qual a sua jogador \033[34mdesafiante\033[m: ')).strip().lower()
while not (jogador in escolhas):
jogador = str(input('opção invalida, por favor digite outra: ')).strip().lower()
print('Jogada contabilizada, hora de saber o vencedor')
sleep(1.5)
print('\033[34mJo\033[m...')
sleep(1)
print('\033[34mKen\033[m...')
sleep(1)
print('\033[34mPô\033[m!!!')
sleep(2)
print('\033[1;31mComputador\033[m: \033[1;35m{}\033[m'.format(computador))
print('\033[1;32mJogador\033[m: \033[1;36m{}\033[m'.format(jogador))
if computador == jogador:
print('\033[1;33mEMPATE\033[m')
elif computador == 'pedra':
if jogador == 'tesoura':
print('Vitória do \033[1;31mCOMPUTADOR\033[m')
else:
print('Vitória do \033[1;34mJOGADOR DESAFIANTE\033[m')
elif computador == 'papel':
if jogador == 'tesoura':
print('Vitória do \033[1;34mJOGADOR DESAFIANTE\033[m')
else:
print('Vitória do \033[1;31mCOMPUTADOR\033[m')
elif computador == 'tesoura':
if jogador == 'pedra':
print('Vitória do \033[1;34mJOGADOR DESAFIANTE\033[m')
else:
print('Vitória do \033[31mCOMPUTADOR\033[m')
| 3.71875
| 4
|
qftimports.py
|
Bra-A-Ket/QFTools
| 1
|
12782792
|
#!/usr/bin/env python3
# external packages
import os
import getopt
import sys
from time import time
import itertools as it
import csv
from collections import Counter
import numpy as np
# internal imports
from qftoolslib.wick import *
| 1.109375
| 1
|
python/gdal_cookbook/cookbook_geometry/calculate_in_geometry.py
|
zeroam/TIL
| 0
|
12782793
|
<filename>python/gdal_cookbook/cookbook_geometry/calculate_in_geometry.py
from osgeo import ogr
"""
Calculate Envelope of a Geometry
"""
wkt = "LINESTRING (1181866.263593049 615654.4222507705, 1205917.1207499576 623979.7189589312, 1227192.8790041457 643405.4112779726, 1224880.2965852122 665143.6860159477)"
geom = ogr.CreateGeometryFromWkt(wkt)
# Get Envelope return a tuple (minX, maxX, minY, maxY)
env = geom.GetEnvelope()
print(f'minX:{env[0]}, minY:{env[0]}, maxX:{env[1]}, maxY:{env[3]}')
"""
Calculate the Area of a Geometry
"""
wkt = "POLYGON ((1162440.5712740074 672081.4332727483, 1162440.5712740074 647105.5431482664, 1195279.2416228633 647105.5431482664, 1195279.2416228633 672081.4332727483, 1162440.5712740074 672081.4332727483))"
poly = ogr.CreateGeometryFromWkt(wkt)
print(f'Area = {poly.GetArea()}')
"""
Calculate the Length of a Geometry
"""
wkt = "LINESTRING (1181866.263593049 615654.4222507705, 1205917.1207499576 623979.7189589312, 1227192.8790041457 643405.4112779726, 1224880.2965852122 665143.6860159477)"
geom = ogr.CreateGeometryFromWkt(wkt)
print(f'Length = {geom.Length()}')
"""
Get the geometry type (as a string) from a Geometry
"""
wkts = [
"POINT (1198054.34 648493.09)",
"LINESTRING (1181866.263593049 615654.4222507705, 1205917.1207499576 623979.7189589312, 1227192.8790041457 643405.4112779726, 1224880.2965852122 665143.6860159477)",
"POLYGON ((1162440.5712740074 672081.4332727483, 1162440.5712740074 647105.5431482664, 1195279.2416228633 647105.5431482664, 1195279.2416228633 672081.4332727483, 1162440.5712740074 672081.4332727483))"
]
for wkt in wkts:
geom = ogr.CreateGeometryFromWkt(wkt)
print(geom.GetGeometryName())
"""
Calculate intersection between two Geometries
"""
wkt1 = "POLYGON ((1208064.271243039 624154.6783778917, 1208064.271243039 601260.9785661874, 1231345.9998651114 601260.9785661874, 1231345.9998651114 624154.6783778917, 1208064.271243039 624154.6783778917))"
wkt2 = "POLYGON ((1199915.6662253144 633079.3410163528, 1199915.6662253144 614453.958118695, 1219317.1067437078 614453.958118695, 1219317.1067437078 633079.3410163528, 1199915.6662253144 633079.3410163528)))"
poly1 = ogr.CreateGeometryFromWkt(wkt1)
poly2 = ogr.CreateGeometryFromWkt(wkt2)
intersection = poly1.Intersection(poly2)
print(intersection.ExportToWkt())
"""
Calculate union between two Geometries
"""
wkt1 = "POLYGON ((1208064.271243039 624154.6783778917, 1208064.271243039 601260.9785661874, 1231345.9998651114 601260.9785661874, 1231345.9998651114 624154.6783778917, 1208064.271243039 624154.6783778917))"
wkt2 = "POLYGON ((1199915.6662253144 633079.3410163528, 1199915.6662253144 614453.958118695, 1219317.1067437078 614453.958118695, 1219317.1067437078 633079.3410163528, 1199915.6662253144 633079.3410163528)))"
poly1 = ogr.CreateGeometryFromWkt(wkt1)
poly2 = ogr.CreateGeometryFromWkt(wkt2)
union = poly1.Union(poly2)
print(f'poly1: {poly1}')
print(f'poly2: {poly2}')
print(f'union: {union.ExportToWkt()}')
| 2.875
| 3
|
src/opts.py
|
xdr940/som-TSP
| 0
|
12782794
|
<gh_stars>0
import argparse
class OPT:
def __init__(self):
self.parser = argparse.ArgumentParser(description='M2CT2020')
# -------------------------------
self.parser.add_argument('--wk_root', type=str, default='/home/roit/aws/aprojects/M2CT2020/proj')
self.parser.add_argument('--data_dir',default='./data')
self.parser.add_argument('--iteration',default=5000)
self.parser.add_argument('--evaluate_freq',default=50)
self.parser.add_argument('--out_dir',default='./out_dir')
self.parser.add_argument('--data_out',default='./data_out.csv')
self.parser.add_argument('--route_plt',
default=[19,18,25,26,29,21,23,24,28,22,4,3,5,10,13,16,27,12,8,15,14,11,6,7
,9,2,1,0,17,20])
#args
self.parser.add_argument('--decay',default=0.9997)
self.parser.add_argument('--learning_rate',default=0.9997)
self.parser.add_argument('--routes',
default=[
[0,21,23,24,28,22,4,3],
[0,5,13,27,16,10],
[0,17,20,19,18,25,26,29],
[0, 1, 9, 7, 6, 11, 14, 15,12,8,2]
])
def args(self):
self.options = self.parser.parse_args()
return self.options
| 2.4375
| 2
|
qualifiedname/qname_inspect.py
|
maxfischer2781/qualifiedname
| 0
|
12782795
|
from __future__ import print_function
import inspect
import ast
import sys
import collections
import weakref
def qualname(obj):
"""
Lookup or compute the ``__qualname__`` of ``obj``
:param obj: class or function to lookup
:return: ``__qualname__`` of ``obj``
:rtype: str
:raises: AttributeError if no ``__qualname__`` can be found
"""
# only compute qualname if not present already
try:
return obj.__qualname__
except AttributeError as err:
no_qualname_exception = err
obj = getattr(obj, '__func__', obj)
# inspect source to retrace definition
source, line_no = inspect.findsource(obj)
try:
__qualname__ = QNameTracer(''.join(source)).at_line_no(line_no)
except KeyError as err:
no_qualname_exception.__context__ = err
raise no_qualname_exception
return __qualname__
def get_qualname(module, line_no):
"""
Return the qualname corresponding to a definition
Parses the abstract syntax tree to reconstruct the name of scopes.
A qualname is defined at the beginning of a scope - a ``class`` or
``def`` statement.
:param module: name of the module in which the definition is performed
:param line_no: line number at which the definition is performed
:return: qualname at ``line_no`` of ``module``
:raises: KeyError if ``module`` or ``line_no`` do not point to valid definitions
"""
module = sys.modules[module]
source, _ = inspect.findsource(module)
return QNameTracer(''.join(source)).at_line_no(line_no)
class QNameTracer(ast.NodeVisitor):
_cache = weakref.WeakValueDictionary()
_cache_fifo = collections.deque(maxlen=10) # limit cache to 10 elements
_init = False
def __new__(cls, source):
try:
return cls._cache[source]
except KeyError:
self = ast.NodeVisitor.__new__(cls)
cls._cache[source] = self
cls._cache_fifo.append(self)
return self
def __init__(self, source):
if self._init:
return
ast.NodeVisitor.__init__(self)
self._name_stack = []
self._lno_qualname = {}
self.visit(ast.parse(source=source))
self._init = True
def at_line_no(self, line_no):
return self._lno_qualname[line_no]
def _set_qualname(self, ast_line_no, push_qualname=None):
# ast_line_no starts at 1, inspect line_no starts at 0
line_no = ast_line_no
name_stack = self._name_stack + ([push_qualname] if push_qualname is not None else [])
self._lno_qualname[line_no] = '.'.join(name_stack)
def visit_FunctionDef(self, node):
# enter scope
self._name_stack.append(node.name)
self._set_qualname(node.lineno)
# proceed in function local namespace
self._name_stack.append('<locals>')
self.generic_visit(node)
# unwind at exit
self._name_stack.pop()
self._name_stack.pop()
def visit_ClassDef(self, node):
# enter scope
self._name_stack.append(node.name)
self._set_qualname(node.lineno)
# proceed at same scope
self.generic_visit(node)
# unwind at exit
self._name_stack.pop()
def visit_Exec(self, node):
try:
qnames = self.__class__(node.body.s)
except SyntaxError:
return
for ast_line_no, exec_qualname in qnames._lno_qualname.items():
self._set_qualname(node.lineno + ast_line_no, push_qualname=exec_qualname)
| 2.640625
| 3
|
qatm.py
|
gyhdtc/QATM_pytorch
| 0
|
12782796
|
<reponame>gyhdtc/QATM_pytorch<filename>qatm.py
from pathlib import Path
import torch
import torchvision
from torchvision import models, transforms, utils
import argparse
import multiprocessing
import pyrealsense2 as rs
from ctypes import c_wchar_p
import numpy as np
import cv2
# +
# import functions and classes from qatm_pytorch.py
print("import qatm_pytorch.py...")
import ast
import types
import sys
from utils import *
with open("qatm_pytorch.py") as f:
p = ast.parse(f.read())
for node in p.body[:]:
if not isinstance(node, (ast.FunctionDef, ast.ClassDef, ast.Import, ast.ImportFrom)):
p.body.remove(node)
module = types.ModuleType("mod")
code = compile(p, "mod.py", 'exec')
sys.modules["mod"] = module
exec(code, module.__dict__)
from mod import *
# -
# global
global image_index
imageflag = 0
qatmflag = 0
# global
# lock
import threading
# lock
def getimage(image_index, image_name, global_lock_1, global_lock_2) -> None:
print ("begin get image......" + str(image_index))
saveflag = 1
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
try:
while image_index.value < 10:
global_lock_1.acquire()
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
image_index.value += 1
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# save image
color_image = cv2.resize(color_image, (320, 240))
cv2.imwrite("handsample/1.jpg", color_image)
image_name.value = str(image_index.value)+".jpg"
print ("SAVE! - " + image_name.value)
global_lock_2.release()
finally:
# Stop streaming
pipeline.stop()
def showimage():
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
try:
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
# Stack both images horizontally
images = np.hstack((color_image, depth_colormap))
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
finally:
# Stop streaming
pipeline.stop()
def GYH(image_index, image_name, global_lock_1, global_lock_2):
parser = argparse.ArgumentParser(description='QATM Pytorch Implementation')
parser.add_argument('--cuda', action='store_true')
parser.add_argument('-s', '--sample_image', default='sample/1.jpg')
parser.add_argument('-t', '--template_images_dir', default='template/')
parser.add_argument('--alpha', type=float, default=25)
parser.add_argument('--thresh_csv', type=str, default='thresh_template.csv')
args = parser.parse_args()
template_dir = args.template_images_dir
image_path = args.sample_image
print("define model...")
model = CreateModel(model=models.vgg19(pretrained=True).features, alpha=args.alpha, use_cuda=args.cuda)
while image_index.value < 10:
global_lock_1.release()
global_lock_2.acquire()
for image_file in os.listdir(Path(image_path)):
# 多个模板匹配,暂时不需要
# dataset,每个元素有:一个模板,一个相同的目标图片,一个图片名
# image
# image_raw
# image_name
# template
# template_name
# template_h
# template_w
# thresh
dataset = ImageDataset(Path(template_dir), image_path+"/"+image_file, thresh_csv='thresh_template.csv')
# print("calculate score..." + image_file)
# scores, w_array, h_array, thresh_list = run_multi_sample(model, dataset)
# print("nms..." + image_file)
# boxes, indices = nms_multi(scores, w_array, h_array, thresh_list)
# _ = plot_result_multi(dataset.image_raw, boxes, indices, show=False, save_name="result-"+str(image_index.value)+".png")
# print("result-" + "result-"+str(image_index.value)+".png" + " was saved")
# 模拟一张图片匹配
w_array = 0
h_array = 0
thresh = 0.8
score = run_one_sample(model, dataset[0]['template'], dataset[0]['image'], dataset[0]['image_name'])
w_array = dataset[0]['template_w']
h_array = dataset[0]['template_h']
boxes = nms(score, w_array, h_array, thresh)
_ = plot_result(dataset[0]['image_raw'], boxes, show=False, save_name="result-"+str(image_index.value)+".png", color=(0,255,0))
if __name__ == '__main__':
global_lock_1 = multiprocessing.Lock()
global_lock_2 = multiprocessing.Lock()
image_index = multiprocessing.Value('d', 0)
image_name = multiprocessing.Value(c_wchar_p, '1')
p1 = multiprocessing.Process(target=getimage, args=[image_index, image_name, global_lock_1, global_lock_2])
p2 = multiprocessing.Process(target=GYH, args=[image_index, image_name, global_lock_1, global_lock_2])
global_lock_1.acquire()
global_lock_2.acquire()
p1.start()
p2.start()
p1.join()
p2.join()
# -------------------------------------------- #
# p1 = multiprocessing.Process(target = getimage, args=[global_lock_1, global_lock_2])
# p1.start()
# p1.join()
# p2 = multiprocessing.Process(target=GYH)
# p2.start()
# p2.join()
# -------------------------------------------- #
| 2.21875
| 2
|
examples/single_tbst_database.py
|
shunsvineyard/forest-python
| 8
|
12782797
|
"""The module demonstrates using threaded binary trees to implement ordered index."""
from typing import Any
from forest.binary_trees import single_threaded_binary_trees
from forest.binary_trees import traversal
class MyDatabase:
"""Example using threaded binary trees to build index."""
def __init__(self) -> None:
self._left_bst = single_threaded_binary_trees.LeftThreadedBinaryTree()
self._right_bst = single_threaded_binary_trees.RightThreadedBinaryTree()
def _persist(self, payload: Any) -> str:
"""Fake function pretent storing data to file system.
Returns
-------
str
Path to the payload.
"""
return f"path_to_{payload}"
def insert_data(self, key: Any, payload: Any) -> None:
"""Insert data.
Parameters
----------
key: Any
Unique key for the payload
payload: Any
Any data
"""
path = self._persist(payload=payload)
self._left_bst.insert(key=key, data=path)
self._right_bst.insert(key=key, data=path)
def dump(self, ascending: bool = True) -> traversal.Pairs:
"""Dump the data.
Parameters
----------
ascending: bool
The order of data.
Yields
------
`Pairs`
The next (key, data) pair.
"""
if ascending:
return self._right_bst.inorder_traverse()
else:
return self._left_bst.reverse_inorder_traverse()
if __name__ == "__main__":
# Initialize the database.
my_database = MyDatabase()
# Add some items.
my_database.insert_data("Adam", "adam_data")
my_database.insert_data("Bob", "bob_data")
my_database.insert_data("Peter", "peter_data")
my_database.insert_data("David", "david_data")
# Dump the items in ascending order.
print("Ascending...")
for contact in my_database.dump():
print(contact)
print("\nDescending...")
# Dump the data in decending order.
for contact in my_database.dump(ascending=False):
print(contact)
| 3.75
| 4
|
App/backend/app/serial/events.py
|
UWO-Aero-Design/gnd-station
| 4
|
12782798
|
<reponame>UWO-Aero-Design/gnd-station
#Websocket
from flask import session,jsonify
from flask_socketio import emit,send
from .. import socketio
import time
import random
from app import database
from app.database import databasehelperclass,queryDatabase
from .. import dbase
from .. import serialDataOut
from .. import serialDataIn
from .. import currentState
from app.serial import builder
from app.serial.builder import *
from app.serial import definitions
from app.serial.definitions import *
# from builder import *
# from definitions import *
import eventlet
eventlet.monkey_patch()
random.seed()
point = 0
#Scale values
PITOTSCALE = 1000
IMUSCALE = 100
GPSLATLONSCALE = 10000000
GPSALTSCALE = 10
GPSSPEEDSCALE = 100
ENVIROSCALE = 100
# Event handler that can be passed to the serial task in order to handle a receive event
def post_serial_read(app,data = None):
print('Serial receive')
global currentState
print("Current Point: ",currentState.point)
print("'Current Flight: ",currentState.flight)
currentState.point = currentState.point + 1
global serialDataIn
PitotData = data[0]
serialDataIn.PitotData = PitotData
#print(PitotData)
IMUData = data[1]
serialDataIn.IMUData = IMUData
#print(IMUData)
GPSData = data[2]
serialDataIn.GPSData = GPSData
GPSData.lat = 27.94 * 10000000
GPSData.lon = -81 * 10000000
print(GPSData)
EnviroData = data[3]
EnviroData.pressure = EnviroData.pressure + random.randint(0,10)
serialDataIn.EnviroData = EnviroData
print(EnviroData)
BatteryData = data[4]
serialDataIn.BatteryData = BatteryData
#print(BatteryData)
StatusData = data[6]
serialDataIn.StatusData = StatusData
#print(StatusData)
ServoData = data[7]
serialDataIn.ServoData = ServoData
#print(ServoData)
#print("Recording:",currentState.recording)
#Database insertions and websocket messages
#All database access should be inside app context since this is running in a background thread
with app.app_context():
if currentState.recording == True:
databaseObj = databasehelperclass.pointtable(currentState.flight,currentState.point)
databaseinsertion(databaseObj)
if PitotData is not None:
PitotData.differential_pressure = PitotData.differential_pressure / PITOTSCALE
jsonData = {'differentialpressure':PitotData.differential_pressure}
#print(jsonData)
socketio.emit('PitotChannel',jsonData)
socketio.emit('connectStatus','Connected')
serialDataIn.PitotData = PitotData
if currentState.recording == True:
databaseObj = databasehelperclass.pitottubetable(float(PitotData.differential_pressure),
currentState.flight,currentState.point)
databaseinsertion(databaseObj)
if IMUData is not None:
IMUData.ax = IMUData.ax / IMUSCALE
IMUData.ay = IMUData.ay / IMUSCALE
IMUData.az = IMUData.az / IMUSCALE
IMUData.gx = IMUData.gx / IMUSCALE
IMUData.gy = IMUData.gy / IMUSCALE
IMUData.gz = IMUData.gz / IMUSCALE
IMUData.mx = IMUData.mx / IMUSCALE
IMUData.my = IMUData.my / IMUSCALE
IMUData.mz = IMUData.mz / IMUSCALE
IMUData.yaw = IMUData.yaw / IMUSCALE
IMUData.pitch = IMUData.pitch / IMUSCALE
IMUData.roll = IMUData.roll / IMUSCALE
jsonData = {'ax':IMUData.ax,
'ay':IMUData.ay,
'az':IMUData.az,
'gx':IMUData.gx,
'gy':IMUData.gy,
'gz':IMUData.gz,
'mx':IMUData.mx,
'my':IMUData.my,
'mz':IMUData.mz,
'yaw':IMUData.yaw,
'pitch':IMUData.pitch,
'roll':IMUData.roll}
# print(jsonData)
socketio.emit('IMUChannel',jsonData)
socketio.emit('connectStatus','Connected')
serialDataIn.IMUData = IMUData
if currentState.recording == True:
databaseObj = databasehelperclass.imuvaluestable(float(IMUData.ax),float(IMUData.ay),float(IMUData.az),
float(IMUData.yaw),float(IMUData.pitch),float(IMUData.roll),
float(IMUData.mx),float(IMUData.my),float(IMUData.mz),
float(IMUData.gx),float(IMUData.gy),float(IMUData.gz),
currentState.flight,currentState.point)
databaseinsertion(databaseObj)
if GPSData is not None:
GPSData.lat = GPSData.lat / GPSLATLONSCALE
GPSData.lon = GPSData.lon / GPSLATLONSCALE
GPSData.altitude = GPSData.altitude / GPSALTSCALE
GPSData.speed = GPSData.speed / GPSSPEEDSCALE
jsonData = {'lat':GPSData.lat,
'lon':GPSData.lon,
'altitude':GPSData.altitude,
'speed':GPSData.speed,
'time':GPSData.time,
'satellites':GPSData.satellites,
'date':GPSData.date}
# print(jsonData)
socketio.emit('GPSChannel',jsonData)
socketio.emit('connectStatus','Connected')
serialDataIn.GPSData = GPSData
if currentState.recording == True:
databaseObj = databasehelperclass.gpsvaluetable(float(GPSData.lat) + point,float(GPSData.lon),float(GPSData.speed),
float(GPSData.satellites),float(GPSData.altitude),float(GPSData.time),
int(GPSData.date),currentState.flight,currentState.point)
databaseinsertion(databaseObj)
if EnviroData is not None:
EnviroData.humidity = EnviroData.humidity / ENVIROSCALE
EnviroData.pressure = EnviroData.pressure / ENVIROSCALE - 10
EnviroData.temperature = EnviroData.temperature / ENVIROSCALE
# jsonData = {'pressure':EnviroData.pressure,
# 'humidity':EnviroData.humidity,
# 'temperature':EnviroData.temperature}
jsonData = {'altitude':EnviroData.pressure,
'temperature':EnviroData.humidity,
'humidity':EnviroData.temperature}
# print(jsonData)
socketio.emit('EnviroChannel',jsonData)
socketio.emit('connectStatus','Connected')
serialDataIn.EnviroData = EnviroData
if currentState.recording == True:
databaseObj = databasehelperclass.environmentalsensortable(float(EnviroData.pressure),
float(EnviroData.humidity),
float(EnviroData.temperature),
currentState.flight,currentState.point)
databaseinsertion(databaseObj)
if BatteryData is not None:
jsonData = {'voltage':BatteryData.voltage,
'current':BatteryData.current}
# print(jsonData)
socketio.emit('BatteryChannel',jsonData)
socketio.emit('connectStatus','Connected')
if currentState.recording == True:
databaseObj = databasehelperclass.batterystatustable(float(BatteryData.voltage),
float(BatteryData.current),
currentState.flight,currentState.point)
databaseinsertion(databaseObj)
if StatusData is not None:
jsonData = {'rrsi':StatusData.rssi,
'state':StatusData.state}
# print(jsonData)
socketio.emit('StatusChannel',jsonData)
socketio.emit('connectStatus','Connected')
if currentState.recording == True:
databaseObj = databasehelperclass.systemstatustable(float(StatusData.rssi),
float(StatusData.state),
currentState.flight,currentState.point)
databaseinsertion(databaseObj)
if ServoData is not None:
jsonData = {'servo0':ServoData.servo0,
'servo1':ServoData.servo1,
'servo2':ServoData.servo2,
'servo3':ServoData.servo3,
'servo4':ServoData.servo4,
'servo5':ServoData.servo5,
'servo6':ServoData.servo6,
'servo7':ServoData.servo7,
'servo8':ServoData.servo8,
'servo9':ServoData.servo9,
'servo10':ServoData.servo10,
'servo11':ServoData.servo11,
'servo12':ServoData.servo12,
'servo13':ServoData.servo13,
'servo14':ServoData.servo14,
'servo15':ServoData.servo15}
# print(jsonData)
socketio.emit('ServoChannel',jsonData)
socketio.emit('connectStatus','Connected')
if currentState.recording == True:
databaseObj = databasehelperclass.servodatatable(float(ServoData.servo0),
float(ServoData.servo1),
float(ServoData.servo2),
float(ServoData.servo3),
float(ServoData.servo4),
float(ServoData.servo5),
float(ServoData.servo6),
float(ServoData.servo7),
float(ServoData.servo8),
float(ServoData.servo9),
float(ServoData.servo10),
float(ServoData.servo11),
float(ServoData.servo12),
float(ServoData.servo13),
float(ServoData.servo14),
float(ServoData.servo15),
currentState.flight,currentState.point)
databaseinsertion(databaseObj)
#time.sleep(0.1)
# The plan is here to take data that is parsed from the serial port and add it to the DB
# Event handler that is called before a write. should return a message to send over serial or None
def pre_serial_write(app,data = None):
#print('Serial write data gather')
global serialDataOut
builder = MessageBuilder()
c = Commands()
c.drop = serialDataOut.cmdDrop
c.servos = serialDataOut.cmdServo
c.pitch = serialDataOut.cmdPitch
builder.add(c)
#i = IMU()
#i.ax = 10
# p = Pitot()
# p.differential_pressure = 255
# i = IMU()
# i.ax = 31245
# g = GPS()
# g.lat = 31245
# g.lon = 31245
# e = Enviro()
# e.pressure = 31245
# e.humidity = 31245
# e.temperature = 31245
# print(uint16_to_bytes(31245))
# builder.add(p)
# builder.add(i)
# builder.add(e)
write_val = builder.build(0,serialDataOut.destination)
#print(write_val)
#print(len(write_val))
#TODO: Preprocessing stuff
#Replace serialDataOut with string of bytes
return write_val
# The plan here is to return a string of bytes to send over the serial port
def databaseinsertion(obj):
#databasehelperclass.db.session.add(obj)
#databasehelperclass.db.session.commit()
dbase.session.add(obj)
dbase.session.commit()
| 2.703125
| 3
|
scripts/build_sdk_ios.py
|
kokorinosoba/xamarin_sdk
| 10
|
12782799
|
from scripting_utils import *
def build(version, root_dir, ios_submodule_dir, with_test_lib):
# ------------------------------------------------------------------
# paths
srcdir = '{0}/sdk'.format(ios_submodule_dir)
lib_out_dir = '{0}/ios/AdjustSdk.Xamarin.iOS/Resources'.format(root_dir)
sdk_static_framework = '{0}/Frameworks/Static/AdjustSdk.framework'.format(srcdir)
adjust_api_path = '{0}/Adjust/Adjust.m'.format(srcdir)
# ------------------------------------------------------------------
# Removing old iOS SDK binary
debug_green('Removing old iOS SDK binary ...')
remove_file_if_exists('{0}/libAdjust.a'.format(lib_out_dir))
# ------------------------------------------------------------------
# Appending SDK prefix to source code
debug_green('Appending SDK prefix to source code ...')
replace_text_in_file(adjust_api_path,
'self.activityHandler = [[ADJActivityHandler alloc]',
'[adjustConfig setSdkPrefix:@"xamarin{0}"];self.activityHandler = [[ADJActivityHandler alloc]'.format(version))
replace_text_in_file(adjust_api_path,
'return [[Adjust getInstance] sdkVersion];',
'return [NSString stringWithFormat: @"xamarin{0}@%@", [[Adjust getInstance] sdkVersion]];'.format(version))
# ------------------------------------------------------------------
# Building new iOS SDK binary
debug_green('Building new iOS SDK binary ...')
change_dir(srcdir)
xcode_build('AdjustStatic')
# ------------------------------------------------------------------
# Removing SDK prefix from source code
debug_green('Removing SDK prefix from source code ...')
replace_text_in_file(adjust_api_path,
'[adjustConfig setSdkPrefix:@"xamarin{0}"];self.activityHandler = [[ADJActivityHandler alloc]'.format(version),
'self.activityHandler = [[ADJActivityHandler alloc]')
replace_text_in_file(adjust_api_path,
'return [NSString stringWithFormat: @"xamarin{0}@%@", [[Adjust getInstance] sdkVersion]];'.format(version),
'return [[Adjust getInstance] sdkVersion];')
# ------------------------------------------------------------------
# Copying the generated binary to lib out dir
debug_green('Copying the generated binary to {0} ...'.format(lib_out_dir))
copy_file(sdk_static_framework + '/Versions/A/AdjustSdk', lib_out_dir + '/libAdjust.a')
if with_test_lib:
# ------------------------------------------------------------------
# Test Library paths
set_log_tag('IOS-TEST-LIB-BUILD')
debug_green('Building Test Library started ...')
test_static_framework = '{0}/Frameworks/Static/AdjustTestLibrary.framework'.format(srcdir)
test_lib_out_dir = '{0}/ios/Test/TestLib/Resources'.format(root_dir)
# ------------------------------------------------------------------
# Removing old iOS SDK binary
debug_green('Removing old iOS SDK binary ...')
remove_file_if_exists('{0}/libAdjustTest.a'.format(test_lib_out_dir))
# ------------------------------------------------------------------
# Building new iOS SDK binary
debug_green('Building new iOS SDK binary ...')
change_dir('{0}/AdjustTests/AdjustTestLibrary'.format(srcdir))
xcode_build('AdjustTestLibraryStatic')
copy_file(test_static_framework + '/Versions/A/AdjustTestLibrary', test_lib_out_dir + '/libAdjustTest.a')
| 1.773438
| 2
|
19 - Drawing Live Graphs.py
|
mayankdcoder/Matplotlib
| 1
|
12782800
|
<filename>19 - Drawing Live Graphs.py
# Drawing live graphs
# If the data reservoir is getting updated regularly then use this for graphing in real time.
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.style as style
style.use('fivethirtyeight')
figure = plt.figure()
axes1 = figure.add_subplot(1, 1, 1)
def animate(i):
graph_data = open('example.txt', 'r').read()
lines = graph_data.split('\n')
x = []
y = []
for line in lines:
alpha, beta = line.split(',')
x.append(alpha)
y.append(beta)
axes1.clear()
axes1.plot(x, y)
ani = animation.FuncAnimation(figure, animate, interval=1000)
plt.show()
| 3.359375
| 3
|
opendc-web/opendc-web-api/opendc/api/traces.py
|
Timovanmilligen/opendc
| 32
|
12782801
|
<filename>opendc-web/opendc-web-api/opendc/api/traces.py
# Copyright (c) 2021 AtLarge Research
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from flask_restful import Resource
from opendc.exts import requires_auth
from opendc.models.trace import Trace as TraceModel, TraceSchema
class TraceList(Resource):
"""
Resource for the list of traces to pick from.
"""
method_decorators = [requires_auth]
def get(self):
"""Get all available Traces."""
traces = TraceModel.get_all()
data = TraceSchema().dump(traces.obj, many=True)
return {'data': data}
class Trace(Resource):
"""
Resource representing a single trace.
"""
method_decorators = [requires_auth]
def get(self, trace_id):
"""Get trace information by identifier."""
trace = TraceModel.from_id(trace_id)
trace.check_exists()
data = TraceSchema().dump(trace.obj)
return {'data': data}
| 1.875
| 2
|
functions_lib.py
|
surajpaib/CrowdEstimationchallenge
| 0
|
12782802
|
<gh_stars>0
import cv2
import numpy as np
class CrowdCounter(object):
def __init__(self):
self.params = None
def mutlifile_read(self, *args):
for arg in args:
yield cv2.imread(arg)
def multifile_write(self, *args):
for arg in args:
cv2.imwrite(arg[0], arg[1])
return
def background_subtraction(self, img1, img2, img3):
diff1 = cv2.absdiff(img1, img2)
diff2 = cv2.absdiff(img2, img3)
diff3 = cv2.absdiff(img3, img1)
return diff1, diff2, diff3
def high_pass_filtering(self, img):
im = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
f = np.fft.fft2(im)
fshift = np.fft.fftshift(f)
rows, cols = im.shape
crow, ccol = rows / 2, cols / 2
fshift[crow - 30:crow + 30, ccol - 30:ccol + 30] = 0
f_ishift = np.fft.ifftshift(fshift)
img_back = np.fft.ifft2(f_ishift)
img_back = np.abs(img_back)
return img_back
def morph_operations(self, kernel, im, operation):
if operation == "open":
return cv2.morphologyEx(im, cv2.MORPH_OPEN, kernel)
if operation == "close":
return cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)
if operation == "erode":
return cv2.morphologyEx(im, cv2.MORPH_ERODE, kernel)
if operation == "dilate":
return cv2.morphologyEx(im, cv2.MORPH_OPEN, kernel)
def blob_detect_set_params(self):
params = cv2.SimpleBlobDetector_Params()
# Detect circles
params.filterByCircularity = True
params.minCircularity = 0.15
params.maxCircularity = 0.8
# Threshold for splitting images
params.minThreshold = 10
params.maxThreshold = 500
# filter by color
params.filterByColor = False
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.5
# Filter by Inertia
params.filterByArea = True
params.minArea = 100
params.filterByInertia = True
params.minInertiaRatio = 0.1
params.maxInertiaRatio = 0.5
self.params = params
return
def run_blob_detector(self, processed_im, original_im):
blob = cv2.SimpleBlobDetector(self.params)
keypoint = blob.detect(processed_im)
im_with_keypoints = cv2.drawKeypoints(original_im, keypoint, np.array([]), (255, 0, 0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
return im_with_keypoints, keypoint
| 2.34375
| 2
|
label_process/code/label_preprocessing.py
|
tisssu10086/Gibbon_call_detection
| 0
|
12782803
|
import os
import numpy as np
import pandas as pd
'''This script is for preprocessing the label, finding the mistake in it and stroe label in a unified format in processed_label dic'''
file_dic_Extra = os.listdir('../../label/Extra_Labels')
file_dic_Train = os.listdir('../../label/Train_labels')
file_dic_Test = os.listdir('../../label/Test_labels')
#store the gibbon call duration distribution
duration_dist = np.array([])
duration_dist2 = np.array([])
for file_name in file_dic_Extra: # go through the Extra_Labels dictionary
if file_name[0] == 'g':
gibbon_timestamps = pd.read_csv('../../label/Extra_Labels/' + file_name, sep=',')
duration = np.asarray(gibbon_timestamps['Duration'])
duration_dist = np.concatenate((duration_dist, duration), axis = 0)
# test the whether the duration equals to 'end' - 'start'
duration2 = np.asarray(gibbon_timestamps['End'] - gibbon_timestamps['Start'])
duration_dist2 = np.concatenate((duration_dist2, duration2), axis = 0)
if duration.size != 0 :
if min(duration) <= 0:
print(file_name, 'has wrong record')
gibbon_timestamps.to_csv('../../label/processed_label/' + file_name[2:], index = 0)
for file_name in file_dic_Train: # go through the Train_Labels dictionary
if file_name[0] == 'g':
gibbon_timestamps = pd.read_csv('../../label/Train_Labels/' + file_name, sep=',')
duration = np.asarray(gibbon_timestamps['Duration'])
duration_dist = np.concatenate((duration_dist, duration), axis = 0)
# test the whether the duration equals to 'end' - 'start'
duration2 = np.asarray(gibbon_timestamps['End'] - gibbon_timestamps['Start'])
duration_dist2 = np.concatenate((duration_dist2, duration2), axis = 0)
if duration.size != 0:
if min(duration) <= 0:
print(file_name, 'has wrong record')
gibbon_timestamps.to_csv('../../label/processed_label/' + file_name[2:], index = 0)
# result show that duration equals to 'end' - 'start'
test_duration = duration_dist2 == duration_dist
duration_test_result = np.where(test_duration == False)
if duration_test_result[0].size == 0:
print('duration equals to end - star')
else:
print('duration record typo exist')
for file_name in file_dic_Test: # go through the Test_Labels dictionary and save data to processed label dictionary
gibbon_timestamps = pd.read_csv('../../label/Test_Labels/' + file_name, sep=',')
gibbon_timestamps['End'] = gibbon_timestamps['Start'] + gibbon_timestamps['Duration']
gibbon_timestamps = gibbon_timestamps[['Start', 'End', 'Duration']]
if duration.size != 0 :
if min(duration) <= 0:
print(file_name, 'has wrong record')
gibbon_timestamps.to_csv('../../label/processed_label/' + file_name[:-9] + '.data', index = 0)
# g_HGSM3BD_0+1_20160305_060000.data has wrong record
# g_HGSM3AC_0+1_20160312_055400.data has wrong record
# this two file has minus or equals to zero duration because of typo, these error have been fixed in processed-label manually.
| 2.65625
| 3
|
measuredivergence/copyrandom.py
|
tedunderwood/measureperspective
| 4
|
12782804
|
import shutil, os, glob
import pandas as pd
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
metafile = 'partitionmeta/meta' + str(ratio) + '.csv'
df = pd.read_csv(metafile, index_col = 'docid')
for i in df.index:
if df.loc[i, 'tags'] != 'random':
continue
outpath = 'mix/' + str(ratio) + '/' + i + '.tsv'
shutil.copyfile('../data/' + i + '.tsv', outpath)
| 2.140625
| 2
|
projects/cookie/platform/taobao/taobao_shop_goods.py
|
kingking888/crawler-pyspider
| 1
|
12782805
|
<reponame>kingking888/crawler-pyspider
import json
import random
import time
from cookie.model.data import Data as CookieData
from cookie.config import *
from crawl_taobao_goods_migrate.model.result import Result
from crawl_taobao_goods_migrate.page.goods_details import GoodsDetails
from pyspider.libs.webdriver import Webdriver
from bs4 import BeautifulSoup
class TaobaoShopGoods:
"""
用h5页面抓取淘宝店铺的商品内容
"""
URL = 'https://shop{}.taobao.com/search.htm?search=y&orderType=newOn_desc'
def __init__(self, shop_id_list):
self.__driver = Webdriver().set_headless().get_driver()
self.__driver.set_page_load_timeout(30)
self.__page_nums = 0
self.__current_page = 1
self.__shop_crawled_status = False
self.__shop_id_list = shop_id_list
self.__url = None
def catch_all_goods(self):
for shop_id in self.__shop_id_list:
# self.set_browser_cookie(shop_id)
self.__page_nums = 0
self.__current_page = 1
self.__shop_crawled_status = self.shop_crawled_status(shop_id)
self.__url = self.URL.format(shop_id)
self.__driver.get(self.__url)
time.sleep(5)
print('开始抓取店铺:{} 的商品内容'.format(shop_id))
# 递归抓取下一页
self.catch_next_page(shop_id)
def set_browser_cookie(self, shop_id):
"""
设置淘宝店铺的cookies
:param shop_id:
:return:
"""
self.__driver.get(self.URL.format(shop_id))
cookies = json.loads(
CookieData.get(CookieData.CONST_PLATFORM_TAOBAO_SHOP, CookieData.CONST_USER_TAOBAO_SHOP[0][0]))
for _c in cookies:
self.__driver.add_cookie(_c)
def catch_next_page(self, shop_id):
"""
判断是否有下一页
:return:
"""
try:
print('抓取第: {} 页'.format(self.__current_page))
result = self.__driver.page_source
soup = BeautifulSoup(result, 'lxml')
# 获取总页码
if not self.__page_nums:
self.__page_nums = int(soup.find('span', class_='page-info').get_text().split('/', 1)[1].strip())
# 获取商品ID
all_goods = soup.find_all('dl', class_='item')
for _g in all_goods:
goods_url = _g.find('a', class_='J_TGoldData')['href']
goods_id = goods_url.split('id=', 1)[1].split('&', 1)[0]
crawl_url = 'https://item.taobao.com/item.htm?id={}'.format(goods_id)
print('解析商品: {}'.format(crawl_url))
GoodsDetails(crawl_url).enqueue()
if self.__shop_crawled_status and self.__current_page >= SHOP_CRAWLED_PAGES:
print('全量抓取过的店铺,只抓取前: {} 页'.format(SHOP_CRAWLED_PAGES))
else:
self.__driver.find_element_by_css_selector("[class='J_SearchAsync next']").click()
time.sleep(random.randint(5, 10))
self.__current_page += 1
self.catch_next_page(shop_id)
except Exception as e:
if self.__current_page < self.__page_nums or self.__current_page == 1:
print('获取下一页失败: {}, 退出'.format(e))
else:
print('已到达最后一页第 {} 页,退出: {}'.format(self.__current_page, e))
# 更改被抓取店铺的状态
Result().update_shop_crawled_status(shop_id, True)
print('已更改店铺的抓取状态')
def shop_crawled_status(self, shop_id):
"""
店铺是否已完整抓取的状态
:return:
"""
shop = Result().find_shop_by_id(shop_id)
status = shop.get('result').get('crawled', False) if shop else False
print('status: {}'.format(status))
return status
def __del__(self):
"""
最后会销毁所有chrome进程
:return:
"""
try:
self.__driver.close()
except:
pass
| 2.671875
| 3
|
ci/recipe/RecipeRepoReader.py
|
andrsd/civet
| 29
|
12782806
|
<gh_stars>10-100
#!/usr/bin/env python
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
import os, fnmatch
from ci.recipe.RecipeReader import RecipeReader
class InvalidDependency(Exception):
pass
class InvalidRecipe(Exception):
pass
class RecipeRepoReader(object):
"""
Reads all the recipes in a repository
"""
def __init__(self, recipe_dir):
"""
Constructor.
Input:
recipe_dir: str: Path to the recipe repo.
"""
super(RecipeRepoReader, self).__init__()
self.recipe_dir = recipe_dir
self.recipes = self.read_recipes()
def get_recipe_files(self):
"""
Searches the recipe repo for *.cfg files.
This returns ALL recipe files found.
Return:
list[str]: Of paths to recipes
"""
recipes = []
recipes_dir = os.path.join(self.recipe_dir, "recipes")
for root, dirnames, files in os.walk(recipes_dir):
for filename in fnmatch.filter(files, "*.cfg"):
path = os.path.join(root, filename)
recipes.append(os.path.relpath(path, self.recipe_dir))
return recipes
def read_recipes(self):
"""
Converts all the recipes found by get_recipe_files() and converts them into dicts
Return:
list of recipe dicts
"""
all_recipes = []
for recipe_file in self.get_recipe_files():
reader = RecipeReader(self.recipe_dir, recipe_file)
recipe = reader.read()
if recipe:
all_recipes.append(recipe)
else:
raise InvalidRecipe(recipe_file)
if not self.check_dependencies(all_recipes):
raise InvalidDependency("Invalid dependencies!")
return all_recipes
def check_dependencies(self, all_recipes):
ret = True
for recipe in all_recipes:
# the reader already checks for file existence.
# We need to check for the same build user, repo and event type
if not recipe["active"]:
continue
if not self.check_depend(recipe, all_recipes, "push_dependencies", "trigger_push", "trigger_push_branch"):
ret = False
if not self.check_depend(recipe, all_recipes, "manual_dependencies", "trigger_manual", "trigger_manual_branch"):
ret = False
if not self.check_depend(recipe, all_recipes, "pullrequest_dependencies", "trigger_pull_request", None):
ret = False
return ret
def check_depend(self, recipe, all_recipes, dep_key, trigger_key, branch_key, alt_branch=None):
ret = True
for dep in recipe[dep_key]:
for dep_recipe in all_recipes:
if dep_recipe["filename"] == dep:
branch_same = True
if branch_key:
branch_same = dep_recipe[branch_key] == recipe[branch_key]
if not branch_same and alt_branch and recipe[alt_branch]:
branch_same = dep_recipe[branch_key] == recipe[alt_branch]
if (not branch_same
or not dep_recipe["active"]
or dep_recipe["build_user"] != recipe["build_user"]
or dep_recipe["repository"] != recipe["repository"]
or not dep_recipe[trigger_key]):
print("Recipe: %s: has invalid %s : %s" % (recipe["filename"], dep_key, dep))
ret = False
break
return ret
if __name__ == "__main__":
# import json
dirname = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.dirname(dirname)
try:
reader = RecipeRepoReader(parent_dir)
#print(json.dumps(reader.recipes, indent=2))
except Exception as e:
print("Recipe repo is not valid: %s" % e)
| 2.421875
| 2
|
ProteinGraphML/MLTools/MetapathFeatures/featureBuilder.py
|
JessBinder/ProteinGraphML
| 10
|
12782807
|
<reponame>JessBinder/ProteinGraphML
import os,re
import itertools
import logging
import pandas as pd
from .nodes import ProteinInteractionNode
def getMetapaths(proteinGraph,start):
children = getChildren(proteinGraph.graph,start)
if start in proteinGraph.childParentDict.keys(): # if we've got parents, lets remove them from this search
children = list( set(children) - set(proteinGraph.childParentDict[start]) )
proteinMap = {
True:set(),
False:set()
}
for c in children:
p = filterNeighbors(proteinGraph.graph,c,True)
n = filterNeighbors(proteinGraph.graph,c,False)
posPaths = len(p)
negPaths = len(n)
for pid in p:
proteinMap[True].add(pid)
for pid in n:
proteinMap[False].add(pid)
return proteinMap
# new graph stuff, things below have been removed
def filterNeighbors(graph,start,association): # hard coded ... "association"
return [a for a in graph.adj[start] if "association" in graph.edges[(start,a)].keys() and graph.edges[(start,a)]["association"] == association]
def getChildren(graph,start): # hard coded ... "association"
return [a for a in graph.adj[start] if "association" not in graph.edges[(start,a)].keys()]
def getTrainingProteinIds(disease,proteinGraph):
'''
This function returns the protein ids for True and False labels.
'''
paths = getMetapaths(proteinGraph,disease) #a dictionary with 'True' and 'False' as keys and protein_id as values
return paths[True], paths[False]
def metapathFeatures(disease, proteinGraph, featureList, idDescription, staticFeatures=None, staticDir=None, test=False, loadedLists=None):
# we compute a genelist....
# get the proteins
# for each of the features, compute their metapaths, given an object, and graph+list... then they get joined
#print(len(proteinGraph.graph.nodes))
G = proteinGraph.graph # this is our networkx api
if loadedLists is not None:
trueP = loadedLists[True]
falseP = loadedLists[False]
try:
unknownP = loadedLists['unknown']
except:
unknownP = []
else:
paths = getMetapaths(proteinGraph,disease) #a dictionary with 'True' and 'False' as keys and protein_id as values
trueP = paths[True]
falseP = paths[False]
unknownP = []
logging.info("(metapathFeatures) PREPARING TRUE ASSOCIATIONS: {0}".format(len(trueP)))
logging.info("(metapathFeatures) PREPARING FALSE ASSOCIATIONS: {0}".format(len(falseP)))
logging.info("(metapathFeatures) PREPARING UNKNOWN ASSOCIATIONS: {0}".format(len(unknownP)))
logging.info("(metapathFeatures) NODES IN GRAPH: {0}".format(len(G.nodes)))
logging.info("(metapathFeatures) EDGES IN GRAPH: {0}".format(len(G.edges)))
proteinNodes = [pro for pro in list(G.nodes) if ProteinInteractionNode.isThisNode(pro)] #if isinstance(pro,int)] # or isinstance(pro,np.integer)]
if len(proteinNodes) == 0:
raise Exception('No protein nodes detected in graph')
logging.info("(metapathFeatures) DETECTED PROTEINS: {0}".format(len(proteinNodes)))
nodeListPairs = []
for n in featureList:
nodeListPairs.append((n,[nval for nval in list(G.nodes) if n.isThisNode(nval)]))
metapaths = []
flog = 'metapath_features.log'
logging.info("(metapathFeatures) Metapath features logfile: {0}".format(flog))
fh = open(flog, 'w') # file to save nodes used for metapaths
for pair in nodeListPairs:
nodes = pair[1]
nonTrueAssociations = set(proteinNodes) - trueP
#print(len(G.nodes), len(nodes), len(trueP), len(nonTrueAssociations))
METAPATH = pair[0].computeMetapaths(G, nodes, trueP, nonTrueAssociations, idDescription, fh)
METAPATH = (METAPATH - METAPATH.mean())/METAPATH.std()
logging.info("(metapathFeatures) METAPATH FRAME {0}x{1} for {2}".format(METAPATH.shape[0], METAPATH.shape[1], pair[0]))
metapaths.append(METAPATH)
fh.close()
if test:
fullList = list(proteinNodes)
df = pd.DataFrame(fullList, columns=['protein_id'])
df = df.set_index('protein_id')
else:
if (len(unknownP) == 0):
fullList = list(itertools.product(trueP,[1])) + list(itertools.product(falseP,[0]))
else:
fullList = list(itertools.product(trueP,[1])) + list(itertools.product(falseP,[0])) + list(itertools.product(unknownP,[-1]))
df = pd.DataFrame(fullList, columns=['protein_id', 'Y'])
df = df.set_index('protein_id')
for metapathframe in metapaths:
# YOU CAN USE THESE TO GET A SUM IF NEED BE
#print(metapathframe.shape)
#print(sum(metapathframe.sum(axis=1)))
df = df.join(metapathframe,on="protein_id")
if staticFeatures is not None:
df = joinStaticFeatures(df, staticFeatures, staticDir)
return df
def joinStaticFeatures(df, features, datadir):
#datadir = os.getcwd()+'/ProteinGraphML/MLTools/StaticFeatures/'
for feature in features:
try: #newer, TSVs
df_this = pd.read_csv(datadir+"/"+feature+".tsv", '\t')
except: #older, CSVs
df_this = pd.read_csv(datadir+"/"+feature+".csv")
#
df_this = df_this.set_index('protein_id')
df_this = df_this.drop(df_this.columns[0], axis=1)
#
if feature == "gtex" or feature == "ccle": # Kludge: all normed but hpa.
df_this = (df_this - df_this.mean())/df_this.std()
df = df.join(df_this, on="protein_id")
return df
| 2.6875
| 3
|
scripts/convert_to_frames.py
|
TechieBoy/deepfake-detection
| 0
|
12782808
|
import os
import cv2
from concurrent.futures import ProcessPoolExecutor
import torch
from facenet_pytorch import MTCNN
from tqdm import tqdm
from PIL import Image
import pickle
from face_detection import RetinaFace
from bisect import bisect_left
from collections import Counter
import math
def delete_folders():
"""Deletes the frames folder from each directory in folder_list"""
from shutil import rmtree
for f in folder_list:
folder_to_delete = os.path.join(f, "frames")
rmtree(folder_to_delete)
def create_folders():
"""
Creates a folder called frames in each directory and creates subfolders for
each video in the frames folder.
"""
for f in folder_list:
os.mkdir(os.path.join(f, "frames"))
for fil in os.listdir(f):
fil = fil.split(".")[0]
if fil != "metadata" and fil != "frames":
os.mkdir(os.path.join(f, "frames", fil))
def convert_video_to_frames(input_path, output_folder):
"""Extract all frames from a video"""
count = 0
cap = cv2.VideoCapture(input_path)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
cv2.imwrite(os.path.join(output_folder, f"frame_{count}.png"), frame)
count += 1
cap.release()
def find_max_face(input_image):
"""
Finds face in input_image with maximum confidence and returns it
Adds padding of 15px around face
"""
detection = cv.detect_face(input_image)
if detection is not None:
faces, confidences = detection
if confidences:
max_conf = max(confidences)
face = faces[confidences.index(max_conf)]
(startX, startY) = face[0], face[1]
(endX, endY) = face[2], face[3]
height, width, _ = input_image.shape
y_top = max(startY - 15, 0)
x_top = max(startX - 15, 0)
y_bot = min(endY + 15, height)
x_bot = min(endX + 15, width)
return input_image[y_top:y_bot, x_top:x_bot]
return None
def convert_video_to_frames_periodic(name_prefix, input_path, output_folder, dt):
"""Captures a frame every dt milliseconds"""
count = 0
cap = cv2.VideoCapture(input_path)
success, image = cap.read()
while success:
cap.set(cv2.CAP_PROP_POS_MSEC, (count * dt))
success, frame = cap.read()
cv2.imwrite(os.path.join(output_folder, f"{name_prefix}_frame_{count}.png"), frame)
count += 1
cap.release()
def convert_video_to_face_frames_periodic(name_prefix, input_path, output_folder, dt):
"""Captures a frame and tries to detect and save a face in it every dt milliseconds"""
count = 0
num_face = 0
cap = cv2.VideoCapture(input_path)
success, image = cap.read()
while success:
cap.set(cv2.CAP_PROP_POS_MSEC, (count * dt))
success, frame = cap.read()
face = find_max_face(frame)
if face is not None:
cv2.imwrite(os.path.join(output_folder, f"{name_prefix}_face_{num_face}.png"), face)
num_face += 1
count += 1
if num_face < 5:
print(name_prefix + f" has {num_face} faces")
cap.release()
def create_frames(executor):
for f in folder_list:
print(f"In folder {f}")
for video in os.listdir(f):
if video != "metadata.json" and video != "frames":
# print(f"Processing video {video}")
input_path = os.path.join(f, video)
video_folder = video.split(".")[0]
output_folder = os.path.join(f, "frames", video_folder)
executor.submit(convert_video_to_face_frames_periodic, video_folder, input_path, output_folder, 1000)
# convert_video_to_face_frames_periodic(video_folder, input_path, output_folder, 800)
def convert_with_mtcnn_parallel(detector, base_folder, folder):
print(folder)
def func(video):
return convert_video_to_frames_per_frame(os.path.join(folder, video), 10)
video_list = os.listdir(folder)
video_list.remove("metadata.json")
video_list.remove("frames")
video_list.remove("audio")
with ProcessPoolExecutor(20) as pool:
frame_list = pool.map(func, video_list, chunksize=1)
for video, frames in zip(video_list, frame_list):
base_video = video.split(".")[0]
detect_faces_mtcnn_and_save(detector, base_folder, base_video, frames)
def get_frame_count(cap):
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
return num_frames
def get_exact_frames(cap, frame_indices):
"""Gets all frames with the indices in frame indices (0 based)"""
frames = []
for index in frame_indices:
cap.set(cv2.CAP_PROP_POS_FRAMES, index)
ret, frame = cap.read()
if ret:
frames.append(frame)
return frames
def get_exact_frames_for_optical_flow(cap, frame_indices):
"""Gets all frames and 4 ahead with the indices in frame indices (0 based)"""
frames = []
index_list = []
for index in frame_indices:
for i in range(4):
idx = index + i
cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
ret, frame = cap.read()
if ret:
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
height, width, channels = image.shape
image = cv2.resize(image, (width // 2, height // 2), interpolation=cv2.INTER_AREA)
frames.append(image)
index_list.append(idx)
return frames, index_list
def load_model(device):
device = torch.device(device)
detector = MTCNN(device=device, keep_all=True, select_largest=False, post_process=False)
return detector
def mtcnn_detect(detector, frames, path, vid_name):
data = []
def get_dist(px,py,x,y):
return abs(px - x) + abs(py - y)
def get_min_coords(s, x,y):
min_set = max(s, key=lambda k:get_dist(k[0], k[1], x,y))
return min_set[0], min_set[1], min_set[2]
def get_avg_coords(s):
x,y = 0.0,0.0
for dd in s:
px,py,*rest = dd
x += px
y += py
tot = len(s)
return x/tot, y/tot
def add_to_closest_set(x,y,area,bi,bj):
min_dist = float('inf')
idx = -1
for i, s in enumerate(data):
px,py,pa = get_min_coords(s,x,y)
dist = get_dist(px,py,x,y)
areas = sorted([pa, area])
if dist > 175 or (areas[1] / areas[0]) > 1.3:
continue
if dist < min_dist:
dist = min_dist
idx = i
if idx == -1:
stuff = (x,y,area,bi,bj,)
ss = set()
ss.add(stuff)
data.append(ss)
else:
data[idx].add((x,y,area,bi,bj,))
stored_frames = []
def get_box(face_box, shape, padding=15):
(startX, startY) = int(face_box[0]), int(face_box[1])
(endX, endY) = int(face_box[2]), int(face_box[3])
height, width, _ = shape
y_top = max(startY - padding, 0)
x_top = max(startX - padding, 0)
y_bot = min(endY + padding, height)
x_bot = min(endX + padding, width)
return y_top, y_bot, x_top, x_bot
frames_boxes, frames_confidences = detector.detect([Image.fromarray(x) for x in frames], landmarks=False)
for batch_idx, (frame_boxes, frame_confidences) in enumerate(zip(frames_boxes, frames_confidences)):
frame = frames[batch_idx]
stored_frames.append(frame_boxes)
if (frame_boxes is not None) and (len(frame_boxes) > 0):
frame_locations = []
for j, (face_box, confidence) in enumerate(zip(frame_boxes, frame_confidences)):
(y, yb, x, xb) = get_box(face_box, frame.shape, 0)
area = (yb - y) * (xb - x)
if not data:
stuff = (x,y,area,batch_idx,j,)
ss = set()
ss.add(stuff)
data.append(ss)
else:
add_to_closest_set(x,y,area,batch_idx,j)
count = 0
for i, d in enumerate(data):
if len(d) > 9:
for f in d:
rx,ry,area,i,j = f
frame = frames[i]
box = stored_frames[i][j]
(y, yb, x, xb) = get_box(box, frame.shape, 10)
face_extract = frame[y : yb, x : xb]
pa = f'{path}/{vid_name}_{len(d)}_{count}.png'
cv2.imwrite(pa,cv2.cvtColor(face_extract, cv2.COLOR_RGB2BGR))
count += 1
def convert_video_to_frames_per_frame(capture, per_n):
num_frames = get_frame_count(capture)
frames = []
for i in range(0, num_frames):
ret = capture.grab()
if i % per_n == 0:
ret, image = capture.retrieve()
if ret:
height, width, channels = image.shape
image = cv2.resize(image, (width // 2, height // 2), interpolation=cv2.INTER_AREA)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
frames.append(image)
return frames
def load_model_retina(device):
return RetinaFace(gpu_id=0)
def detect_faces_mtcnn_and_save(detector, base_folder, base_video, frames, filenames=None):
pil_images = [Image.fromarray(frame) for frame in frames]
if filenames is None:
filenames = [os.path.join(base_folder, f"{base_video}_face_{i}.png") for i, _ in enumerate(pil_images)]
faces = detector(pil_images, filenames)
return faces
def convert_video_to_frames_with_mtcnn(detector, base_folder, folder):
print(folder)
for video in tqdm(os.listdir(folder)):
name = video.split(".")
try:
name, extension = name[0], name[1]
except IndexError:
continue
if extension == "mp4":
try:
capture = cv2.VideoCapture(os.path.join(folder, video))
total_frames = get_frame_count(capture)
frame_begin = 10
frame_end = total_frames - 8
begin_indices = [i for i in range(frame_begin, frame_end, total_frames // 4)]
frames, indices = get_exact_frames_for_optical_flow(capture, begin_indices)
new_video_folder = os.path.join(base_folder, name)
os.mkdir(new_video_folder)
filenames = [os.path.join(new_video_folder, f"{name}_face_{i}.png") for i in indices]
detect_faces_mtcnn_and_save(detector, new_video_folder, name, frames, filenames)
capture.release()
except Exception as e:
print(video)
print(e)
continue
if __name__ == "__main__":
# base_folder = "/home/teh_devs/deepfake/raw/test_vids"
"""
Rescaled by 4 need testing
"""
from glob import glob
storage_dir = '/home/teh_devs/deepfake/dataset/revamp'
folder_list = []
print("Doing first 5 folders")
for i in range(0, 5):
folder_list.append(f"/home/teh_devs/deepfake/raw/dfdc_train_part_{i}")
detector = load_model(device="cuda:0")
# f = '/home/teh_devs/deepfake/raw/dfdc_train_part_4/srqogltgnx.mp4'
for f in folder_list:
print(f)
videos = glob(f + '/*.mp4')
for vid in tqdm(videos, ncols=0):
try:
vid_name = vid.split('/')[-1].split('.')[0]
capture = cv2.VideoCapture(vid)
frames = convert_video_to_frames_per_frame(capture, 10)
new_folder = os.path.join(storage_dir, vid_name)
os.mkdir(new_folder)
mtcnn_detect(detector, frames, new_folder, vid_name)
capture.release()
except Exception as e:
print(e)
# for f in folder_list:
# convert_video_to_frames_with_mtcnn(detector, base_folder, f)
| 2.46875
| 2
|
test/binaries/foo_v1.py
|
drmikecrowe/cod
| 405
|
12782809
|
#!/usr/bin/env python3
"""
Usage: foo [OPTION]...
--foo1 useful option foo
--bar1 useful option bar
"""
import sys
if __name__ == "__main__":
print(__doc__, file=sys.stderr)
| 1.710938
| 2
|
panos_update_panorama_upload/content_update_panorama_upload.py
|
scotchoaf/fw_content_update
| 0
|
12782810
|
<filename>panos_update_panorama_upload/content_update_panorama_upload.py
# Copyright (c) 2018, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: <NAME> <<EMAIL>>
'''
Palo Alto Networks content_update_panorama_upload.py
uses panorama install content updates to a managed firewall
does both content/threat and antivirus updates
This software is provided without support, warranty, or guarantee.
Use at your own risk.
'''
import argparse
import sys
import time
from datetime import datetime, timedelta
import pan.xapi
from xml.etree import ElementTree as etree
def get_job_id(s):
'''
extract job-id from pan-python string xml response
regex parse due to pan-python output join breaking xml rules
:param s is the input string
:return: simple string with job id
'''
return s.split('<job>')[1].split('</job>')[0]
def get_job_status(s):
'''
extract status and progress % from pan-python string xml response
regex parse due to pan-python output join breaking xml rules
:param s is the input string
:return: status text and progress %
'''
status = s.split('<status>')[1].split('</status>')[0]
progress = s.split('<progress>')[1].split('</progress>')[0]
return status, progress
def check_job_status(fw, results):
'''
periodically check job status in the firewall
:param fw is fw object being queried
:param results is the xml-string results returned for job status
'''
# initialize to null status
status = ''
job_id = get_job_id(results)
# check job id status and progress
while status != 'FIN':
fw.op(cmd='<show><jobs><id>{0}</id></jobs></show>'.format(job_id))
status, progress = get_job_status(fw.xml_result())
if status != 'FIN':
print('job {0} in progress [ {1}% complete ]'.format(job_id, progress), end='\r', flush=True)
time.sleep(5)
print('\njob {0} is complete'.format(job_id))
def get_latest_content(fw, kind):
'''
check panorama to get latest content files
panorama upload doesn't have a latest option as with the firewall
:param fw: device object for api calls
:param type: type of content update to check
:return:
'''
# call to panorama to check content file name
fw.op(cmd='<request><batch><{0}><info/></{0}></batch></request>'.format(kind))
results = fw.xml_result()
contents = etree.fromstring(results)
# set a year old best date to find the latest one
bestdate = datetime.now() - timedelta(days=365)
if kind == 'anti-virus':
filetype = 'antivirus'
if kind == 'content':
filetype = 'contents'
for item in contents:
# only consider all-contents file and if downloaded
if item[7].text == 'yes' and 'all-{0}'.format(filetype) in item[2].text:
itemdate = datetime.strptime(item[5].text.rsplit(' ', 1)[0],'%Y/%m/%d %H:%M:%S')
# get the latest date and associated filename
if itemdate > bestdate:
bestdate = itemdate
latestfile = item[2].text
return latestfile
def update_content(fw, type, sn, filename):
'''
check, download, and install latest content updates
:param fw is the fw object being updated
:param type is update type - content or anti-virus
'''
# install latest content
# this model assume that panorama has latest content downloads
print('installing latest {0} updates to {1}'.format(type, sn))
print('using file {0}'.format(filename))
fw.op(cmd='<request><batch><{0}><upload-install><devices>{1}</devices>'
'<file>{2}</file></upload-install>'
'</{0}></batch></request>'.format(type, sn, filename))
results = fw.xml_result()
if '<job>' in results:
check_job_status(fw, results)
def main():
'''
simple set of api calls to update fw to latest content versions
'''
# python skillets currently use CLI arguments to get input from the operator / user. Each argparse argument long
# name must match a variable in the .meta-cnc file directly
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--panorama", help="IP address of Panorama", type=str)
parser.add_argument("-u", "--username", help="Panorama Username", type=str)
parser.add_argument("-p", "--password", help="Panorama Password", type=str)
parser.add_argument("-s", "--serial_number", help="Firewall Serial Number", type=str)
args = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
parser.exit()
exit(1)
# this is actually the panorama ip and will fix
fw_ip = args.panorama
username = args.username
password = <PASSWORD>
serial_number = args.serial_number
# create fw object using pan-python class
# fw object is actually a panorama object so an api device object
fw = pan.xapi.PanXapi(api_username=username, api_password=password, hostname=fw_ip)
# get panorama api key
api_key = fw.keygen()
print('updating content for NGFW serial number {0}'.format(serial_number))
# !!! updates require panorama mgmt interface with internet access
# update ngfw to latest content and av versions
# passing in the serial number for device to update
for item in ['content', 'anti-virus']:
filename = get_latest_content(fw, item)
update_content(fw, item, serial_number, filename)
print('\ncontent update complete')
if __name__ == '__main__':
main()
| 2.140625
| 2
|
tests/integration/helper.py
|
covx/graypy_v6
| 181
|
12782811
|
<filename>tests/integration/helper.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""helper functions for testing graypy with a local Graylog instance"""
from time import sleep
from uuid import uuid4
import requests
def get_unique_message():
return str(uuid4())
DEFAULT_FIELDS = [
"message",
"full_message",
"source",
"level",
"func",
"file",
"line",
"module",
"logger_name",
]
BASE_API_URL = 'http://127.0.0.1:9000/api/search/universal/relative?query=message:"{0}"&range=300&fields='
def get_graylog_response(message, fields=None):
"""Search for a given log message (with possible additional fields)
within a local Graylog instance"""
fields = fields if fields else []
tries = 0
while True:
try:
return _parse_api_response(
api_response=_get_api_response(message, fields), wanted_message=message
)
except ValueError:
sleep(2)
if tries == 5:
raise
tries += 1
def _build_api_string(message, fields):
return BASE_API_URL.format(message) + "%2C".join(set(DEFAULT_FIELDS + fields))
def _get_api_response(message, fields):
url = _build_api_string(message, fields)
api_response = requests.get(
url, auth=("admin", "admin"), headers={"accept": "application/json"}
)
return api_response
def _parse_api_response(api_response, wanted_message):
assert api_response.status_code == 200
print(api_response.json())
for message in api_response.json()["messages"]:
if message["message"]["message"] == wanted_message:
return message["message"]
raise ValueError(
"wanted_message: '{}' not within api_response: {}".format(
wanted_message, api_response
)
)
| 2.3125
| 2
|
examples/long_example.py
|
AdrieanKhisbe/logupdate.py
| 5
|
12782812
|
<gh_stars>1-10
from logupdate import logupdate
from time import sleep
logupdate("This is gonna be...")
sleep(1)
logupdate("This is gonna be a very very very long example")
sleep(3)
logupdate("This is gonna be a very very very long example with very very long lines that span over terminal size")
sleep(3)
logupdate(
"""This is gonna be a very very very long example with very very long lines that span over terminal size
And that use also multilines!""")
sleep(3)
logupdate.clear()
logupdate("Voilà").done()
| 1.929688
| 2
|
tests/test_system.py
|
adamzhang1987/bt-python-sdk
| 4
|
12782813
|
import unittest
import warnings
from pybt.system import System
from pybt.exceptions import InvalidAPIKey
from .config import CONFIG
class ClientTestCase(unittest.TestCase):
def setUp(self):
warnings.simplefilter('ignore', ResourceWarning)
self.api = System(CONFIG.get("panel_address"), CONFIG.get("api_key"))
def test_api_key_error(self):
with self.assertRaises(InvalidAPIKey):
api_err_key = System(CONFIG.get("panel_address"), "somewords"+CONFIG.get("api_key"))
api_err_key.get_system_total()
def test_get_system_total(self):
self.assertIsInstance(self.api.get_system_total(), dict)
self.assertIn("system", self.api.get_system_total())
self.assertIn("version", self.api.get_system_total())
def test_get_disk_info(self):
self.assertIsInstance(self.api.get_disk_info(), list)
self.assertIn("filesystem", self.api.get_disk_info()[0])
self.assertIn("type", self.api.get_disk_info()[0])
def test_get_net_work(self):
self.assertIsInstance(self.api.get_net_work(), dict)
self.assertIn("network", self.api.get_net_work())
def test_get_task_count(self):
self.assertIsInstance(self.api.get_task_count(), int)
def test_update_panel(self):
self.assertIsInstance(self.api.update_panel(), dict)
self.assertIn("status", self.api.update_panel())
self.assertIn("version", self.api.update_panel().get('msg'))
| 2.390625
| 2
|
softwares/houdini_wizard/export/modeling.py
|
Wizard-collab/wizard_2
| 1
|
12782814
|
# coding: utf-8
# Author: <NAME>
# Contact: <EMAIL>
# Python modules
import traceback
import os
import logging
logger = logging.getLogger(__name__)
# Wizard modules
from houdini_wizard import wizard_tools
from houdini_wizard import wizard_export
# Houdini modules
def main():
scene = wizard_export.save_or_save_increment()
try:
out_nodes_dic = {'wizard_modeling_output_LOD1':'LOD1',
'wizard_modeling_output_LOD2':'LOD2',
'wizard_modeling_output_LOD3':'LOD3'}
for out_node_name in out_nodes_dic.keys():
if wizard_tools.check_out_node_existence(out_node_name):
export_name = out_nodes_dic[out_node_name]
wizard_export.trigger_before_export_hook('modeling')
wizard_export.export(stage_name='modeling', export_name=export_name, out_node=out_node_name)
except:
logger.error(str(traceback.format_exc()))
finally:
wizard_export.reopen(scene)
| 2.078125
| 2
|
code/data_analysis_2.py
|
PrideLee/CCFDF-Personalized-Matching-Model-of-Packages-for-Telecom-Users
| 0
|
12782815
|
<filename>code/data_analysis_2.py
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
def scater_service_type(att):
type_0 = raw_data[raw_data["current_service"] == 89016252][att].tolist()
type_1 = raw_data[raw_data["current_service"] == 89016253][att].tolist()
type_2 = raw_data[raw_data["current_service"] == 89016259][att].tolist()
type_3 = raw_data[raw_data["current_service"] == 89950166][att].tolist()
type_4 = raw_data[raw_data["current_service"] == 89950167][att].tolist()
type_5 = raw_data[raw_data["current_service"] == 89950168][att].tolist()
type_6 = raw_data[raw_data["current_service"] == 99999825][att].tolist()
type_7 = raw_data[raw_data["current_service"] == 99999826][att].tolist()
type_8 = raw_data[raw_data["current_service"] == 99999827][att].tolist()
type_9 = raw_data[raw_data["current_service"] == 99999828][att].tolist()
type_10 = raw_data[raw_data["current_service"] == 99999830][att].tolist()
type_11 = raw_data[raw_data["current_service"] == 90063345][att].tolist()
type_12 = raw_data[raw_data["current_service"] == 90109916][att].tolist()
type_13 = raw_data[raw_data["current_service"] == 90155946][att].tolist()
type_14 = raw_data[raw_data["current_service"] == 99104722][att].tolist()
x_1 = [1 + random.random() for i in range(len(type_0))]
x_2 = [3 + random.random() for i in range(len(type_1))]
x_3 = [5 + random.random() for i in range(len(type_2))]
x_4 = [7 + random.random() for i in range(len(type_3))]
x_5 = [9 + random.random() for i in range(len(type_4))]
x_6 = [11 + random.random() for i in range(len(type_5))]
x_7 = [13 + random.random() for i in range(len(type_6))]
x_8 = [15 + random.random() for i in range(len(type_7))]
x_9 = [17 + random.random() for i in range(len(type_8))]
x_10 = [19 + random.random() for i in range(len(type_9))]
x_11 = [21 + random.random() for i in range(len(type_10))]
x_12 = [23 + random.random() for i in range(len(type_11))]
x_13 = [25 + random.random() for i in range(len(type_12))]
x_14 = [27 + random.random() for i in range(len(type_13))]
x_15 = [29 + random.random() for i in range(len(type_14))]
plt.scatter(x_1, type_0, c="red", s=0.03)
plt.scatter(x_2, type_1, c="red", s=0.03)
plt.scatter(x_3, type_2, c="red", s=0.03)
plt.scatter(x_4, type_3, c="red", s=0.03)
plt.scatter(x_5, type_4, c="red", s=0.03)
plt.scatter(x_6, type_5, c="red", s=0.03)
plt.scatter(x_7, type_6, c="red", s=0.03)
plt.scatter(x_8, type_7, c="red", s=0.03)
plt.scatter(x_9, type_8, c="red", s=0.03)
plt.scatter(x_10, type_9, c="red", s=0.03)
plt.scatter(x_11, type_10, c="red", s=0.03)
plt.scatter(x_12, type_11, c="red", s=0.03)
plt.scatter(x_13, type_12, c="red", s=0.03)
plt.scatter(x_14, type_13, c="red", s=0.03)
plt.scatter(x_15, type_14, c="red", s=0.03)
plt.xlabel('service_type')
plt.ylabel(att)
plt.grid(True)
# plt.title("service_type-1_total_fee scatter")
plt.show()
def bbox(att):
type_0 = raw_data[raw_data["current_service"] == 89016252][att].tolist()
type_1 = raw_data[raw_data["current_service"] == 89016253][att].tolist()
type_2 = raw_data[raw_data["current_service"] == 89016259][att].tolist()
type_3 = raw_data[raw_data["current_service"] == 89950166][att].tolist()
type_4 = raw_data[raw_data["current_service"] == 89950167][att].tolist()
type_5 = raw_data[raw_data["current_service"] == 89950168][att].tolist()
type_6 = raw_data[raw_data["current_service"] == 99999825][att].tolist()
type_7 = raw_data[raw_data["current_service"] == 99999826][att].tolist()
type_8 = raw_data[raw_data["current_service"] == 99999827][att].tolist()
type_9 = raw_data[raw_data["current_service"] == 99999828][att].tolist()
type_10 = raw_data[raw_data["current_service"] == 99999830][att].tolist()
type_11 = raw_data[raw_data["current_service"] == 90063345][att].tolist()
type_12 = raw_data[raw_data["current_service"] == 90109916][att].tolist()
type_13 = raw_data[raw_data["current_service"] == 90155946][att].tolist()
type_14 = raw_data[raw_data["current_service"] == 99104722][att].tolist()
y = np.transpose(np.array([type_0, type_1, type_2, type_3, type_4, type_5, type_6, type_7, type_8, type_9, type_10, type_11, type_12, type_13, type_14]))
# y = np.transpose(np.array(
# [type_0, type_1, type_2, type_3, type_4, type_5, type_6, type_7, type_8, type_9, type_10, type_11, type_12,
# type_13]))
labels = ["89016252", "89016253", "89016259", "89950166", "89950167", "89950168", "99999825", "99999826", "99999827", "99999828", "99999830", "90063345", "90109916", "90155946", "99104722"]
# labels = ["89016252", "89016253", "89016259", "89950166", "89950167", "89950168", "99999825", "99999826",
# "99999827", "99999828", "99999830", "90063345", "90109916", "90155946"]
plt.boxplot(y, labels=labels, sym='o')
plt.grid(True)
plt.show()
def binary_value_distribution(att):
type_0 = raw_data[raw_data["current_service"] == 89016252][att]
type_1 = raw_data[raw_data["current_service"] == 89016253][att]
type_2 = raw_data[raw_data["current_service"] == 89016259][att]
type_3 = raw_data[raw_data["current_service"] == 89950166][att]
type_4 = raw_data[raw_data["current_service"] == 89950167][att]
type_5 = raw_data[raw_data["current_service"] == 89950168][att]
type_6 = raw_data[raw_data["current_service"] == 99999825][att]
type_7 = raw_data[raw_data["current_service"] == 99999826][att]
type_8 = raw_data[raw_data["current_service"] == 99999827][att]
type_9 = raw_data[raw_data["current_service"] == 99999828][att]
type_10 = raw_data[raw_data["current_service"] == 99999830][att]
type_11 = raw_data[raw_data["current_service"] == 90063345][att]
type_12 = raw_data[raw_data["current_service"] == 90109916][att]
type_13 = raw_data[raw_data["current_service"] == 90155946][att]
type_14 = raw_data[raw_data["current_service"] == 99104722][att]
print(type_0.value_counts())
print(type_1.value_counts())
print(type_2.value_counts())
print(type_3.value_counts())
print(type_4.value_counts())
print(type_5.value_counts())
print(type_6.value_counts())
print(type_7.value_counts())
print(type_8.value_counts())
print(type_9.value_counts())
print(type_10.value_counts())
print(type_11.value_counts())
print(type_12.value_counts())
print(type_13.value_counts())
print(type_14.value_counts())
def dimensionality_reduction(par):
new_list = []
par_list = raw_data[par].tolist()
for i in par_list:
if i != 0:
new_list.append(1)
else:
new_list.append(0)
return new_list
def parallel_coordinates(dataframe, paralist):
service_type = dataframe["current_service"].tolist()
num = len(service_type)
list_12 = []
list_13 = []
list_14 = []
for i in range(num):
if service_type[i] == 12:
list_12.append(i)
if service_type[i] == 13:
list_13.append(i)
if service_type[i] == 14:
list_14.append(i)
list_14_sample_num = round(len(list_14) * 0.68)
list_14_sample = random.sample(list_14, list_14_sample_num)
dataframe_12 = dataframe.iloc[list_12]
dataframe_13 = dataframe.iloc[list_13]
dataframe_14 = dataframe.iloc[list_14_sample]
par_num = len(paralist)
x_type = [i*3 for i in range(par_num)]
x_12, y_12 = parallel_coordinates_part(dataframe_12, x_type, paralist)
num_12 = len(dataframe_12)
print(1)
for i in range(num_12):
plt.plot(x_12[i], y_12[i], 'r')
plt.hold
x_13, y_13 = parallel_coordinates_part(dataframe_13, x_type, paralist)
num_13 = len(dataframe_13)
print(2)
for i in range(num_13):
plt.plot(x_13[i], y_13[i], 'g')
plt.hold
x_14, y_14 = parallel_coordinates_part(dataframe_14, x_type, paralist)
num_14 = len(dataframe_14)
print(3)
for i in range(num_14):
plt.plot(x_14[i], y_14[i], 'b')
plt.hold
plt.show()
def parallel_coordinates_part(dataframe, x, parlist):
num = len(dataframe)
att = [[m + random.random() for m in x] for n in range(num)]
y = [[dataframe.iloc[n][m] for m in parlist] for n in range(num)]
return att, y
def pre(dataframe, paralist):
# dataframe only include ther data of current_service = 12, 13, 14.
service_type = dataframe["current_service"].tolist()
num = len(service_type)
list_12 = []
list_13 = []
list_14 = []
for i in range(num):
if service_type[i] == 12:
list_12.append(i)
if service_type[i] == 13:
list_13.append(i)
if service_type[i] == 14:
list_14.append(i)
dataframe_12 = dataframe.iloc[list_12]
dataframe_13 = dataframe.iloc[list_13]
dataframe_14 = dataframe.iloc[list_14]
paralist_12 = []
paralist_13 = []
paralist_14 = []
for i in paralist:
paralist_12.append(sorted(dataframe_12[i].tolist()))
paralist_13.append(sorted(dataframe_13[i].tolist()))
paralist_14.append(sorted(dataframe_14[i].tolist()))
para_num = len(paralist)
num_12 = len(paralist_12[0])
bound_12 = max(round(num_12 * 0.001), 100)
para_12_bound = [paralist_12[j][num_12] - paralist_12[j][0] for j in range(para_num)]
pre_12 = [[] for i in range(para_num)]
num_13 = len(paralist_13[0])
bound_13 = max(round(num_13 * 0.001), 100)
para_13_bound = [paralist_13[j][num_13] - paralist_13[j][0] for j in range(para_num)]
pre_13 = [[] for i in range(para_num)]
num_14 = len(paralist_14[0])
bound_14 = max(round(num_14 * 0.001), 100)
para_14_bound = [paralist_14[j][num_14] - paralist_14[j][0] for j in range(para_num)]
pre_14 = [[] for i in range(para_num)]
for i in range(num):
for j in range(para_num):
temp = dataframe.iloc[i][paralist[j]]
for k in range(num_12):
if temp < paralist_12[j][k]:
paralist_12[j].insert(k, temp)
break
up = min(k + bound_12, num_12)
down = max(k - bound_12, 0)
pre_12[j].append(round((paralist_12[j][up] - paralist_12[j][down])/para_12_bound, 6))
for k in range(num_13):
if temp < paralist_13[j][k]:
paralist_13[j].insert(k, temp)
break
up = min(k + bound_13, num_13)
down = max(k - bound_13, 0)
pre_13[j].append(round((paralist_13[j][up] - paralist_13[j][down])/para_13_bound, 6))
for k in range(num_14):
if temp < paralist_14[j][k]:
paralist_14[j].insert(k, temp)
break
up = min(k + bound_14, num_14)
down = max(k - bound_14, 0)
pre_14[j].append(round((paralist_14[j][up] - paralist_14[j][down])/para_14_bound, 6))
for j in range(para_num):
rank_att = []
for i in range(num):
a = pre_12[j][i]
b = pre_13[j][i]
c = pre_14[j][i]
temp_list = [a, b, c]
list_sort = sorted(temp_list)
rank = [list_sort.index(k) for k in temp_list]
rank_att.append(rank[0] * 100 + rank[1] + rank[2])
dataframe[paralist[j] + "probability"] = rank_att
return dataframe, pre_12, pre_13, pre_14
# raw_data = pd.read_csv(r"E:\CCFDF\plansmatching\data\raw data\train\train_1.csv", encoding="utf-8", low_memory=False)
# print(raw_data["service_type"].value_counts())
# binary_value_distribution("service_type")
# scater_service_type("service_type")
# print(raw_data["is_mix_service"].value_counts())
# is_mix_service_1 = raw_data[raw_data["is_mix_service"] == 1]["current_service"]
# print(is_mix_service_1.value_counts())
# online_time_qu = raw_data[raw_data["online_time"] < 64]["service_type"]
# scater_service_type("online_time")
# bbox("online_time")
# print(online_time_qu.value_counts())
# scater_service_type("4_total_fee")
# bbox("4_total_fee")
# scater_service_type("month_traffic")
# bbox("month_traffic")
# month_traffic_3 = raw_data[raw_data["service_type"] == 3]["month_traffic"]
# print(month_traffic_3.value_counts())
# month_traffic_3 = raw_data[raw_data["service_type"] == 1]["month_traffic"]
# print(month_traffic_3.value_counts())
# binary_value_distribution("many_over_bill")
# binary_value_distribution("contract_type")
# scater_service_type("contract_type")
# scater_service_type("contract_time")
# binary_value_distribution("contract_time")
# binary_value_distribution("is_promise_low_consume")
# binary_value_distribution("net_service")
# scater_service_type("pay_times")
# scater_service_type("pay_num")
# bbox("pay_times")
# bbox("pay_num")
# temp = raw_data["pay_num"].tolist()
# error = [i for i in range(len(temp)) if temp[i]>40000]
# print(error)
# scater_service_type("last_month_traffic")
# binary_value_distribution("last_month_traffic")
# bbox("last_month_traffic")
# temp = raw_data["2_total_fee"].tolist()
# error = [i for i in range(len(temp)) if temp[i] == '\\N']
# float_fee_2 = [float(i) for i in temp]
# print([i for i in range(len(float_fee_2)) if float_fee_2[i] < 0])
# scater_service_type("2_total_fee")
# bbox("2_total_fee")
# temp = raw_data["3_total_fee"].tolist()
# error = [i for i in range(len(temp)) if temp[i] == '\\N']
# scater_service_type("3_total_fee")
# bbox("3_total_fee")
# scater_service_type("local_trafffic_month")
# bbox("local_trafffic_month")
# temp = raw_data["local_trafffic_month"].tolist()
# error = [i for i in range(len(temp)) if temp[i]>300000]
# print(error)
# scater_service_type("local_caller_time")
# bbox("local_caller_time")
# temp = raw_data["local_caller_time"].tolist()
# error = [i for i in range(len(temp)) if temp[i]>5000]
# print(error)
# binary_value_distribution("local_caller_time")
# scater_service_type("service1_caller_time")
# bbox("service1_caller_time")
# binary_value_distribution("service1_caller_time")
# scater_service_type("service2_caller_time")
# bbox("service2_caller_time")
# temp = raw_data["service2_caller_time"].tolist()
# error = [i for i in range(len(temp)) if temp[i]>8000]
# print(error)
# binary_value_distribution("service1_caller_time")
# temp = raw_data["gender"].tolist()
# error = [i for i in range(len(temp)) if temp[i] == '\\N']
# print(error)
# binary_value_distribution("gender")
# scater_service_type("age")
# temp = raw_data["age"].tolist()
# error = [i for i in range(len(temp)) if temp[i] == '\\N']
# print(error)
# bbox("age")
# binary_value_distribution("complaint_level")
# binary_value_distribution("former_complaint_num")
# bbox("former_complaint_num")
# scater_service_type("former_complaint_fee")
# bbox("former_complaint_fee")
# temp = raw_data["former_complaint_fee"].tolist()
# error = [i for i in range(len(temp)) if temp[i] > 10**9]
# print(error)
# binary_value_distribution("former_complaint_fee")
# dis_1 = raw_data[raw_data["service_type"] == 1]['former_complaint_fee'].tolist()
# dis_3 = raw_data[raw_data["service_type"] == 3]['former_complaint_fee'].tolist()
# dis_4 = raw_data[raw_data["service_type"] == 4]['former_complaint_fee'].tolist()
# sit_1 = [i for i in range(len(dis_1)) if ((dis_1[i] != 0) & (dis_1[i] < 10**10))]
# sit_3 = [i for i in range(len(dis_3)) if ((dis_3[i] != 0) & (dis_3[i] < 10**10))]
# sit_4 = [i for i in range(len(dis_4)) if ((dis_4[i] != 0) & (dis_4[i] < 10**10))]
# y_1 = [dis_1[i] for i in sit_1]
# y_3 = [dis_3[i] for i in sit_3]
# y_4 = [dis_4[i] for i in sit_4]
# print(np.mean(y_1))
# print(np.mean(y_3))
# print(np.mean(y_4))
# y = np.transpose(np.array([y_1, y_3, y_4]))
# labels = ["service_type_1", "service_type_3", "service_type_4"]
# plt.boxplot(y, labels=labels, sym='o')
# plt.grid(True)
# plt.show()
# print(binary_value_distribution("current_service"))
# temp = raw_data["1_total_fee"].tolist()
# error = [i for i in range(len(temp)) if temp[i]>4000]
# value = [temp[i] for i in error]
# print(value)
# temp = raw_data["month_traffic"].tolist()
# error = [i for i in range(len(temp)) if temp[i] > 120000]
# value = [temp[i] for i in error]
# print(value)
# bbox("contract_time")
# scater_service_type("pay_num")
# bbox("pay_num")
#
# raw_data = pd.read_csv(r"E:\CCFDF\plansmatching\data\raw data\train\train_1.csv", encoding="utf-8",
# low_memory=False)
# binary_value_distribution("service_type")
# raw_data = pd.read_csv(r"E:\CCFDF\plansmatching\data\raw data\train\class_2_sup_add_0_balance.csv", encoding="utf-8",
# low_memory=False)
# index = raw_data["current_service"].tolist()
# select_type = [12, 13, 14]
# index_num = len(index)
# select_data = [i for i in range(index_num) if index[i] in select_type]
# select_label = [index[i] for i in select_data]
# site_total = range(0, index_num)
# noisy_nom = round(0.3 * index_num)
# noisy = random.sample(site_total, noisy_nom)
# noisy_real = [i for i in noisy if i not in select_data]
# noisy_real_num = len(noisy_real)
# noisy_label = [0] * noisy_real_num
# select_total = select_data + noisy_real
# label_total = select_label + noisy_label
# select_sample = raw_data.iloc[select_total]
# select_sample["current_service_new"] = label_total
# select_sample.to_csv(r"E:\CCFDF\plansmatching\data\raw data\train\small_class_12_13_14_others.csv")
#
# b = np.mean([0.7064220183486238, 0.8772348033373063, 0.5764192139737991, 0.6644951140065146, 0.8432098765432099, 0.7623400365630713, 0.8747913188647747, 0.8597285067873304, 0.6574074074074074, 0.5882352941176471, 0.6779661016949153])
# print(b)
#
# r_3 = [0.7095435684647303, 0.8888888888888888, 0.7058823529411764, 0.6552901023890785, 0.8222778473091366, 0.7463837994214079, 0.8519195612431445, 0.8584269662921349, 0.6600331674958542, 0.611023622047244, 0.6757425742574258]
# print(np.mean(r_3))
raw_data = pd.read_csv(r"E:\CCFDF\plansmatching\data\raw data\train\class_2_sup_add_0_correct.csv", encoding="utf-8",
low_memory=False)
attri_list = ['online_time_norm',
'local_trafffic_month_norm',
'service2_caller_time_norm', 'age_norm',
'fee_mean_norm', 'fee_mean_2_norm',
'fee_fluctuate_norm', 'month_traffic_norm', 'contract_time_norm', 'pay_num_norm',
'last_month_traffic_norm', 'local_trafffic_month_norm', 'local_caller_time_norm',
'service1_caller_time_norm']
parallel_coordinates(raw_data, attri_list)
# raw_data = pd.read_csv(r"E:\CCFDF\plansmatching\data\raw data\train\class_2_sup_add_0_correct.csv", encoding="utf-8",
# low_memory=False)
# service_type = raw_data["current_service"].tolist()
# num_total = len(raw_data)
# select_type = [12, 13, 14]
# sel_site = [i for i in range(num_total) if service_type[i] in select_type]
# select_dataframe = raw_data.iloc[sel_site]
# select_dataframe.to_csv(r"E:\CCFDF\plansmatching\data\raw data\train\class_2_sup_add_12_13_14_correct.csv")
#
| 2.78125
| 3
|
spartan/expr/__init__.py
|
MaggieQi/spartan
| 0
|
12782816
|
#!/usr/bin/env python
"""
Definitions of expressions and optimizations.
In Spartan, operations are not performed immediately. Instead, they are
represented using a graph of `Expr` nodes. Expression graphs can be
evaluated using the `Expr.evaluate` or `Expr.force` methods.
The `base` module contains the definition of `Expr`, the base class for all
types of expressions. It also defines subclasses for wrapping common
Python values: lists (`ListExpr`), dicts (`DictExpr`) and tuples ((`TupleExpr`).
Operations are built up using a few high-level operations -- these all
live in their own modules:
* Create a new distributed array `spartan.expr.ndarray`
* Map over an array :py:mod:`spartan.expr.map` and `spartan.expr.shuffle`
* Reduce over an array `spartan.expr.reduce`
* Apply a stencil/convolution to an array `spartan.expr.stencil`
* Slicing/indexing `spartan.expr.index`.
Optimizations on DAGs live in `spartan.expr.optimize`.
"""
from base import Expr, evaluate, optimized_dag, glom, eager, lazify, as_array, force, NotShapeable, newaxis
from .builtins import *
from .assign import assign
from .map import map, map2
from .map_with_location import map_with_location
from .region_map import region_map
from .tile_operation import tile_operation
from .ndarray import ndarray
from .outer import outer
from .reduce import reduce
from .shuffle import shuffle
from .scan import scan
from .write_array import write, from_numpy, from_file, from_file_parallel
from .checkpoint import checkpoint
from .fio import save, load, pickle, unpickle, partial_load, partial_unpickle
from .reshape import reshape
from .retile import retile
from .transpose import transpose
from .dot import dot
from .sort import sort, argsort, argpartition, partition
Expr.outer = outer
Expr.sum = sum
Expr.mean = mean
Expr.astype = astype
Expr.ravel = ravel
Expr.argmin = argmin
Expr.argmax = argmax
| 2.609375
| 3
|
data_preprocessing/utils/match_lat_lon.py
|
facebookresearch/Context-Aware-Representation-Crop-Yield-Prediction
| 12
|
12782817
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def match_lat_lon(lats_from, lons_from, lats_to, lons_to, expand=0):
i_lat_start = i_lat_end = i_lon_start = i_lon_end = 0
for i in range(len(lats_from)):
if abs(lats_from[i] - lats_to[0]) < 0.00001:
i_lat_start = i - expand
if abs(lats_from[i] - lats_to[-1]) < 0.00001:
i_lat_end = i + expand
for i in range(len(lons_from)):
if abs(lons_from[i] - lons_to[0]) < 0.00001:
i_lon_start = i - expand
if abs(lons_from[i] - lons_to[-1]) < 0.00001:
i_lon_end = i + expand
return i_lat_start, i_lat_end, i_lon_start, i_lon_end
| 3.359375
| 3
|
tests/declarative.py
|
drowse314-dev-ymat/lexical-knowledge-base-for-japanese-civil-law
| 1
|
12782818
|
<gh_stars>1-10
# encoding: utf-8
from attest import (
Tests, assert_hook,
raises, contextmanager,
)
import rdflib
from lkbutils import declarative
from lkbutils.relationprovider import RedundantRelation, Cyclic
termloader_unit = Tests()
relationloader_unit = Tests()
@contextmanager
def new_rdflib_termloader(**options):
try:
yield declarative.RDFLibTermLoader(**options)
finally:
pass
@contextmanager
def new_rdflib_relationloader(**options):
try:
yield declarative.RDFLibRelationLoader(**options)
finally:
pass
class Fixtures(object):
"""Namespace for fixtures."""
class NS(object):
pass
# law terms
law_terms = NS()
law_terms.flat = [
u'抵当権', u'質権', u'詐害行為取消権', u'制限行為能力者',
]
law_terms.struct = {
u'権利': [
{u'物権': [u'抵当権', u'質権']},
{u'請求権': [u'詐害行為取消権']},
],
u'人': [u'制限行為能力者'],
}
law_terms.identifiers = {
u'teitouken': u'抵当権',
u'shichiken': u'質権',
u'sagaikouitorikeshiken': u'詐害行為取消権',
u'seigenkouinouryokumono': u'制限行為能力者',
}
# general properties
basic_properties = NS()
basic_properties.flat = [
u'hyper', u'part_of{attribute}', u'contrary',
]
basic_properties.identifiers = {
u'hyper': u'hyper',
u'attribute': u'part_of',
u'contrary': u'contrary',
}
# japanese prefectures
jp_prefectures = NS()
jp_prefectures.flat_yaml = (
u"terms:\n"
u" - 京都\n"
u" - 奈良\n"
u" - 島根\n"
u" - 神奈川\n"
u" - 福島\n"
)
jp_prefectures.struct_yaml = (
u"terms:\n"
u" 府:\n"
u" 政令指定都市がある:\n"
u" - 京都\n"
u" 県:\n"
u" 政令指定都市がある:\n"
u" - 神奈川\n"
u" ない:\n"
u" - 奈良\n"
u" - 島根\n"
u" - 福島\n"
)
jp_prefectures.identifiers = {
u'kyouto': u'京都',
u'nara': u'奈良',
u'shimane': u'島根',
u'kanagawa': u'神奈川',
u'fukushima': u'福島',
}
# predicate-style python funcs
python_predicates = NS()
python_predicates.yaml = (
u"options:\n"
u" as_property: yes\n"
u"terms:\n"
u" - isinstance{_type}\n"
u" - issubclass\n"
u" - hasattr{_has}\n"
)
python_predicates.identifiers = {
u'_type': u'isinstance',
u'issubclass': u'issubclass',
u'_has': u'hasattr',
}
# world & japanese rivers
world_rivers = NS()
world_rivers.definition_yaml = (
u"options:\n"
u" romanize: yes\n"
u"terms:\n"
u" egypt:\n"
u" - nile\n"
u" brazil:\n"
u" - amazon\n"
u" china:\n"
u" - 長江\n"
u" japan:\n"
u" 中部地方:\n"
u" - 信濃川\n"
)
world_rivers.identifiers = {
u'nile': u'nile', u'amazon': u'amazon',
u'choukou': u'長江', u'shinanokawa': u'信濃川',
}
world_rivers.prop_definition_yaml = (
u"options:\n"
u" romanize: yes\n"
u"load_options:\n"
u" as_property: yes\n"
u"terms:\n"
u" - longer than\n"
u" - wider than\n"
)
world_rivers.prop_identifiers = {
u'longer_than': u'longer than',
u'wider_than': u'wider than',
}
# 出世魚
shusse_uo = NS()
shusse_uo.core_relation = u'shusse_uo'
shusse_uo.core_relation_identifier = u'shusse'
shusse_uo.terms = [
u'shusse',
u'wakashi', u'inada', u'warasa', u'buri',
]
shusse_uo.relation_pairs = [
(u'wakashi', u'inada'),
(u'inada', u'warasa'),
(u'warasa', u'buri'),
]
shusse_uo.additions = NS()
shusse_uo.additions.addcycle = [(u'buri', u'wakashi')]
shusse_uo.additions.redundant = [(u'warasa', u'buri')]
# US geo. relation configs
us_geo_rel_cfg = NS()
us_geo_rel_cfg.yaml = (
u"options:\n"
u" dry: yes\n"
u" nointerlinks: no\n"
u" acyclic: no\n"
u"relations:\n"
u" next_to:\n"
u" options:\n"
u" acyclic: yes\n"
u" pairs:\n"
u" 南:\n"
u" - missisippi arkansas\n"
u" 北西:\n"
u" - washington oregon\n"
u" far_from:\n"
u" options:\n"
u" dry: no\n"
u" pairs:\n"
u" - alabama nebraska\n"
)
us_geo_rel_cfg.relations = NS()
us_geo_rel_cfg.relations.next_to = u'next_to'
us_geo_rel_cfg.relations.far_from = u'far_from'
us_geo_rel_cfg.expects = NS()
us_geo_rel_cfg.expects.attr_casts = {
u'relation': unicode,
u'options': dict,
u'pairs': set,
}
us_geo_rel_cfg.expects.next_to = {
u'relation': u'next_to',
u'options': {u'dry': True, u'nointerlinks': False, u'acyclic': True},
u'pairs': [
(u'washington', u'oregon'),
(u'missisippi', u'arkansas'),
],
}
us_geo_rel_cfg.expects.far_from = {
u'relation': u'far_from',
u'options': {u'dry': False, u'nointerlinks': False, u'acyclic': False},
u'pairs': [
(u'alabama', u'nebraska'),
],
}
# US geo. relation definitions
def_us_geo_rels = NS()
def_us_geo_rels.terms = [
u'next_to',
u'tikai',
u'missisippi', u'arkansas', u'tennessee', u'alabama',
]
def_us_geo_rels.definition_yaml = (
u"options:\n"
u" dry: yes\n"
u" nointerlinks: yes\n"
u" acyclic: no\n"
u"relations:\n"
u" next_to:\n"
u" options:\n"
u" acyclic: yes\n"
u" pairs:\n"
u" 南:\n"
u" - missisippi arkansas\n"
u" - arkansas tennessee\n"
u" - tennessee alabama\n"
u" tikai:\n"
u" pairs:\n"
u" - missisippi arkansas\n"
u" - arkansas tennessee\n"
u" - tennessee alabama\n"
)
def_us_geo_rels.relations = NS()
def_us_geo_rels.relations.next_to = u'next_to'
def_us_geo_rels.relations.tikai = u'tikai'
def_us_geo_rels.relation_pairs = [
(u'missisippi', u'arkansas'),
(u'arkansas', u'tennessee'),
(u'tennessee', u'alabama'),
]
def_us_geo_rels.additions = NS()
def_us_geo_rels.additions.redundant = [(u'missisippi', u'arkansas')]
def_us_geo_rels.additions.addcycle = [(u'alabama', u'missisippi')]
def rdflib_getlabel(graph, node):
return list(graph.objects(subject=node, predicate=rdflib.RDFS.label))[0].value
@termloader_unit.test
def load_terms_from_data():
"""Load terms directly from data."""
# flat
with new_rdflib_termloader(romanize=True) as termloader:
termloader.load(Fixtures.law_terms.flat)
ns = termloader.ns
graph = termloader.graph
for id_label in Fixtures.law_terms.identifiers:
node = getattr(ns, id_label)
assert id_label in ns
assert isinstance(node, rdflib.BNode)
assert (rdflib_getlabel(graph, node) ==
Fixtures.law_terms.identifiers[id_label])
# structured
with new_rdflib_termloader(romanize=True) as termloader:
termloader.load(Fixtures.law_terms.struct)
ns = termloader.ns
graph = termloader.graph
for id_label in Fixtures.law_terms.identifiers:
node = getattr(ns, id_label)
assert id_label in ns
assert isinstance(node, rdflib.BNode)
assert (rdflib_getlabel(graph, node) ==
Fixtures.law_terms.identifiers[id_label])
# properties
with new_rdflib_termloader(romanize=True) as termloader:
termloader.load(Fixtures.basic_properties.flat, as_property=True)
ns = termloader.ns
graph = termloader.graph
triples = list(termloader.graph.triples((None, None, None)))
for id_label in Fixtures.basic_properties.identifiers:
node = getattr(ns, id_label)
assert id_label in ns
assert isinstance(node, rdflib.BNode)
assert (rdflib_getlabel(graph, node) ==
Fixtures.basic_properties.identifiers[id_label])
assert (node, rdflib.RDF.type, rdflib.RDF.Property) in triples
@termloader_unit.test
def load_terms_from_yaml():
"""Load terms from YAML representation."""
# flat
with new_rdflib_termloader(romanize=True) as termloader:
termloader.load_yaml(Fixtures.jp_prefectures.flat_yaml)
ns = termloader.ns
graph = termloader.graph
for id_label in Fixtures.jp_prefectures.identifiers:
node = getattr(ns, id_label)
assert id_label in ns
assert isinstance(node, rdflib.BNode)
assert (rdflib_getlabel(graph, node) ==
Fixtures.jp_prefectures.identifiers[id_label])
# structured
with new_rdflib_termloader(romanize=True) as termloader:
termloader.load_yaml(Fixtures.jp_prefectures.struct_yaml)
ns = termloader.ns
graph = termloader.graph
for id_label in Fixtures.jp_prefectures.identifiers:
node = getattr(ns, id_label)
assert id_label in ns
assert isinstance(getattr(ns, id_label), rdflib.BNode)
assert (rdflib_getlabel(graph, node) ==
Fixtures.jp_prefectures.identifiers[id_label])
# properties
with new_rdflib_termloader(romanize=True) as termloader:
termloader.load_yaml(Fixtures.python_predicates.yaml)
ns = termloader.ns
graph = termloader.graph
triples = list(termloader.graph.triples((None, None, None)))
for id_label in Fixtures.python_predicates.identifiers:
node = getattr(ns, id_label)
assert id_label in ns
assert isinstance(node, rdflib.BNode)
assert (rdflib_getlabel(graph, node) ==
Fixtures.python_predicates.identifiers[id_label])
assert (node, rdflib.RDF.type, rdflib.RDF.Property) in triples
@termloader_unit.test
def load_terms_from_yaml_on_demand():
"""Load terms from YAML representation using declarative.load_terms."""
termloader = declarative.rdflib_load_terms(Fixtures.world_rivers.definition_yaml)
ns = termloader.ns
graph = termloader.graph
for id_label in Fixtures.world_rivers.identifiers:
node = getattr(ns, id_label)
assert id_label in ns
assert isinstance(node, rdflib.BNode)
assert (rdflib_getlabel(graph, node) ==
Fixtures.world_rivers.identifiers[id_label])
@termloader_unit.test
def load_properties_from_yaml_on_demand():
"""Load properties from YAML representation using declarative.load_terms."""
termloader = declarative.rdflib_load_terms(Fixtures.world_rivers.prop_definition_yaml)
ns = termloader.ns
graph = termloader.graph
triples = list(graph.triples((None, None, None)))
for id_label in Fixtures.world_rivers.prop_identifiers:
node = getattr(ns, id_label)
assert id_label in ns
assert isinstance(node, rdflib.BNode)
assert (rdflib_getlabel(graph, node) ==
Fixtures.world_rivers.prop_identifiers[id_label])
assert (node, rdflib.RDF.type, rdflib.RDF.Property) in triples
@termloader_unit.test
def toplevel_termloader():
"""lkbutils.declarative.load_terms is accessible from top-level."""
from lkbutils import rdflib_load_terms
class MockRDFLibNamespace(object):
def __init__(self, names):
self.namespace = self.create_ns(names)
@property
def ns(self):
return self.namespace
def create_ns(self, names):
class NS:
pass
ns = NS()
for name in names:
setattr(ns, name, rdflib.BNode())
return ns
@relationloader_unit.test
def load_relations_from_data():
"""Load node relations directly from structured data."""
nodeprovider = MockRDFLibNamespace(Fixtures.shusse_uo.terms)
with new_rdflib_relationloader(nodeprovider=nodeprovider,
relation=Fixtures.shusse_uo.core_relation_identifier,
dry=True, acyclic=True) as relloader:
relloader.load(Fixtures.shusse_uo.relation_pairs)
triples = list(relloader.graph.triples((None, None, None)))
for relsrc, reldest in Fixtures.shusse_uo.relation_pairs:
noderel = (getattr(nodeprovider.ns, relsrc),
nodeprovider.ns.shusse,
getattr(nodeprovider.ns, reldest))
assert noderel in triples
with raises(Cyclic):
relloader.load(Fixtures.shusse_uo.additions.addcycle)
with raises(RedundantRelation):
relloader.load(Fixtures.shusse_uo.additions.redundant)
@relationloader_unit.test
def relation_configs_from_yaml():
"""
Parse YAML representation & generate configs. to create RelationLoader.
"""
# mapping {relation => config}
relation_definitions = declarative.rdflib_load_relcfg(Fixtures.us_geo_rel_cfg.yaml)
config_attr_casts = Fixtures.us_geo_rel_cfg.expects.attr_casts
parsed_next_to_cfg = relation_definitions[Fixtures.us_geo_rel_cfg.relations.next_to]
for config_attr in config_attr_casts:
cast = config_attr_casts[config_attr]
assert (
cast(parsed_next_to_cfg[config_attr]) ==
cast(Fixtures.us_geo_rel_cfg.expects.next_to[config_attr])
)
parsed_far_from_cfg = relation_definitions[Fixtures.us_geo_rel_cfg.relations.far_from]
for config_attr in config_attr_casts:
cast = config_attr_casts[config_attr]
assert (
cast(parsed_far_from_cfg[config_attr]) ==
cast(Fixtures.us_geo_rel_cfg.expects.far_from[config_attr])
)
@relationloader_unit.test
def load_relations_from_yaml():
"""Load node relations from YAML representation."""
nodeprovider = MockRDFLibNamespace(Fixtures.def_us_geo_rels.terms)
relloaders = declarative.rdflib_load_relations(
Fixtures.def_us_geo_rels.definition_yaml,
nodeprovider=nodeprovider,
)
relloader_next_to = relloaders[Fixtures.def_us_geo_rels.relations.next_to]
relloader_tikai = relloaders[Fixtures.def_us_geo_rels.relations.tikai]
# pairs loaded
triples_next_to = list(relloader_next_to.graph.triples((None, None, None)))
for relsrc, reldest in Fixtures.def_us_geo_rels.relation_pairs:
noderel = (getattr(nodeprovider.ns, relsrc),
nodeprovider.ns.next_to,
getattr(nodeprovider.ns, reldest))
assert noderel in triples_next_to
triples_tikai = list(relloader_tikai.graph.triples((None, None, None)))
for relsrc, reldest in Fixtures.def_us_geo_rels.relation_pairs:
noderel = (getattr(nodeprovider.ns, relsrc),
nodeprovider.ns.tikai,
getattr(nodeprovider.ns, reldest))
assert noderel in triples_tikai
# rules
with raises(RedundantRelation):
relloader_next_to.load(Fixtures.def_us_geo_rels.additions.redundant)
with raises(Cyclic):
relloader_next_to.load(Fixtures.def_us_geo_rels.additions.addcycle)
with raises(RedundantRelation):
relloader_tikai.load(Fixtures.def_us_geo_rels.additions.redundant)
relloader_tikai.load(Fixtures.def_us_geo_rels.additions.addcycle)
@relationloader_unit.test
def toplevel_relationloader():
"""lkbutils.declarative.load_relations is accessible from top-level."""
from lkbutils import rdflib_load_relations
| 2.234375
| 2
|
detector.py
|
neutrons/Qikr
| 0
|
12782819
|
<filename>detector.py
import mcvine, mcvine.components
from mcni.AbstractComponent import AbstractComponent
from mcni.utils import conversion
import numpy as np
import os
from mcni import neutron_buffer, neutron
class Detector(AbstractComponent):
"2D detector center a (0,0,0) and perpendicular to z"
def __init__(self, name, xwidth, yheight, dx, dy, outfile, tofbinsize=0.1):
self.name = name
assert xwidth > 0 and yheight > 0 and dx>0 and dy>0
self.xwidth = xwidth
self.yheight = yheight
self.dx = dx
self.dy = dy
self.Nx = int(xwidth/dx)
self.Ny = int(yheight/dy)
print (self.Nx, self.Ny)
self.outfile = outfile
self.tofbinsize = tofbinsize
return
def process(self, neutrons):
if not len(neutrons):
return
from mcni.neutron_storage import neutrons_as_npyarr, ndblsperneutron # number of doubles per neutrons thats means each neutron is represented by x, y, z, vx, vy, vz, s1, s2, t, t0, p (10 double variables)
arr = neutrons_as_npyarr(neutrons) #converting the input neutrons to array
arr.shape = -1, ndblsperneutron
x = arr[:, 0]; y = arr[:, 1]; z = arr[:, 2]
vx = arr[:, 3]; vy = arr[:, 4]; vz = arr[:, 5]
s1 = arr[:, 6]; s2 = arr[:, 7];
t = arr[:, 8]; t0 = t.copy()
p = arr[:, 9]
# propagate to Z = 0
self._propagateToZ0(x, y, z, vx, vy, vz, t)
# Filter
ftr = (x >= -self.xwidth / 2) * (x < self.xwidth / 2) \
* (y >= -self.yheight / 2) * (y < self.yheight / 2) \
* (t > t0)
#
xindex = (x+self.xwidth/2)//self.dx; xindex[xindex<0] = 0; xindex[xindex>=self.Nx]=self.Nx-1
yindex = (y+self.yheight/2)//self.dy; yindex[yindex<0] = 0; yindex[yindex>=self.Ny]=self.Ny-1
index = yindex + xindex * self.Ny
N = ftr.sum()
from mccomponents.detector.event_utils import datatype
events = np.zeros(N, dtype=datatype)
events['pixelID'] = index[ftr]
events['tofChannelNo']=t[ftr]*1e6/self.tofbinsize
events['p'] = p[ftr]
self._save(events)
return
def _save(self, events):
outdir = self._getOutputDirInProgress()
np.save(os.path.join(outdir, self.outfile), events)
return
def _propagateToZ0(self, x, y, z, vx, vy, vz, t):
dt = -z / vz
x += vx * dt
y += vy * dt
z[:] = 0
t += dt
return
| 3.078125
| 3
|
py/text2img.py
|
walker-zheng/code
| 4
|
12782820
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
'''
# 使用 convert(ImageMagick) 转换png为gif图片:
ls |sed 's/\(.*\).png/convert \1.png -flatten -channel A -threshold 0% \1.gif/g'
# cx-freeze打包:
Python setup.py build
Python setup.py bdist_msi
'''
from os import mkdir
from os import walk
from os import path
from os import getcwd
import sys
from math import floor
from codecs import open
# from pathlib import Path
# from inspect import getsourcefile
# from os.path import abspath
import pygame
def AAfilledRoundedRect(surface,rect,color,radius=0.4):
"""
AAfilledRoundedRect(surface,rect,color,radius=0.4)
surface : destination
rect : rectangle
color : rgb or rgba
radius : 0 <= radius <= 1
"""
rect = pygame.Rect(rect)
color = pygame.Color(*color)
alpha = color.a
color.a = 0
pos = rect.topleft
rect.topleft = 0,0
rectangle = pygame.Surface(rect.size,pygame.SRCALPHA)
circle = pygame.Surface([min(rect.size)*3]*2,pygame.SRCALPHA)
pygame.draw.ellipse(circle,(0,0,0),circle.get_rect(),0)
circle = pygame.transform.smoothscale(circle,[int(min(rect.size)*radius)]*2)
radius = rectangle.blit(circle,(0,0))
radius.bottomright = rect.bottomright
rectangle.blit(circle,radius)
radius.topright = rect.topright
rectangle.blit(circle,radius)
radius.bottomleft = rect.bottomleft
rectangle.blit(circle,radius)
rectangle.fill((0,0,0),rect.inflate(-radius.w,0))
rectangle.fill((0,0,0),rect.inflate(0,-radius.h))
rectangle.fill(color,special_flags=pygame.BLEND_RGBA_MAX)
rectangle.fill((255,255,255,alpha),special_flags=pygame.BLEND_RGBA_MIN)
return surface.blit(rectangle,pos)
def splitByLen(string, width):
return [string[x:x+width] for x in range(0, len(string), width)]
def generate_pic(hasBackgroud, frontColor):
# import os.path
# try:
# dir_path = os.path.dirname(os.path.abspath(__file__))
# except NameError: # We are the main py2exe script, not a module
# import sys
# dir_path = os.path.dirname(os.path.abspath(sys.argv[0]))
# dir_path = path.dirname(path.realpath(__file__))
# dir_path = Path(__file__).parent
# dir_path = abspath(getsourcefile(lambda:0))
# if getattr(sys, 'text2img', False):
# # The application is frozen
# dir_path = path.dirname(sys.executable)
# Print("found install path:" + dir_path)
path_prefix = getcwd()
pygame.init()
fontPath = path.join(path_prefix, "fonts\\")
text = u'获取测试文本长度哈哈'
line_len = len(text)
fontSize = 15
fontHeight = 200 # 35 40 50 # 单字高度 越大字越清
fontEdge = 0.25 # 图片边距
picEdge = 1600 # 240 # 图片边长 单行字数 = picEdge/fontHeight
dst_scale = 240/picEdge
width_plus = fontHeight * fontEdge
height_plus = fontHeight * fontEdge
radius_default = 0.5
color_white = (255, 255, 255, 255)
color_gray = (204, 204, 204, 255)
color_black = (0, 0, 0, 0)
isSmoooth = True
if hasBackgroud:
color_bg = color_gray
color_fg = frontColor
image_bg = "-bg"
else:
color_bg = None
color_fg = color_black
image_bg = ""
imagePath = path.join(path_prefix, "images\\")
Print(u"图片将生成在目录:\t\t\t\t\t" + imagePath)
mkdir(imagePath) if not path.exists(imagePath) else None
input_file = path.join(path_prefix,"1.txt")
if not path.exists(input_file):
Print(u"[退出]当前目录无文件:\t\t\t\t" + input_file)
return
else:
Print(u"以文件内容为输入:\t\t\t\t\t" + input_file)
if not path.exists(fontPath):
Print(u"[退出]未找到字体:\t\t\t\t\t" + fontPath)
return
else:
Print(u"搜索字体:\t\t\t\t\t\t\t" + fontPath)
for _,_,filenames in walk(path.join(fontPath)):
fontCount = 0
for filename in filenames:
font = pygame.font.Font(path.join("fonts", filename), fontSize)
_rtext = font.render(text, isSmoooth, color_fg, color_bg)
_width, _height = _rtext.get_size()
while _height < fontHeight:
fontSize += 1
font = pygame.font.Font(path.join("fonts", filename), fontSize)
_rtext = font.render(text, isSmoooth, color_fg, color_bg)
_width, _height = _rtext.get_size()
if hasBackgroud:
echoBG= u"带"
else:
echoBG= u"无"
Print(u"使用["+ str(fontSize).zfill(3) + "]号字体" + echoBG + "背景色:\t\t\t" + path.join(fontPath, filename))
fontCount += 1
width_one = _width/len(text)
line_len = floor(picEdge/(width_one+2*fontEdge))
imagePath_font = imagePath + path.splitext(filename)[0]
imagePath_big = imagePath_font + "\\big" + image_bg
imagePath_small = imagePath_font + "\\small" + image_bg
imagePath_huge = imagePath_font + "\\huge" + image_bg
mkdir(imagePath_font) if not path.exists(imagePath_font) else None
mkdir(imagePath_huge) if not path.exists(imagePath_huge) else None
mkdir(imagePath_big) if not path.exists(imagePath_big) else None
mkdir(imagePath_small) if not path.exists(imagePath_small) else None
Print(u"将生成最大[" + str(picEdge) + "]pix的图片:\t\t\t" + imagePath_huge)
Print(u"将生成[" + str(picEdge*dst_scale) + "x" + str(picEdge*dst_scale) + "]pix的微信图片:\t" + imagePath_big)
Print(u"将生成[" + str(picEdge*dst_scale/2) + "x" + str(picEdge*dst_scale/2) + "]pix的微信图片:\t" + imagePath_small)
count = 0
for line in open(input_file, mode='r', encoding='utf-8'):
line = line.strip("\n")
if len(line) == 0:
continue
lines = [line]
if len(line) > line_len:
lines = splitByLen(line, line_len)
rtext1 = pygame.Surface((width_one * len(lines[0]) + width_plus * 2, _height * len(lines) + height_plus * 2), pygame.SRCALPHA)
rtext1.set_alpha(0)
if hasBackgroud:
AAfilledRoundedRect(rtext1, rtext1.get_rect(), color_bg, 0.5)
line_count = 0
for every in lines:
rtext = font.render(every, isSmoooth, color_fg, color_bg)
rtext1.blit(rtext, (height_plus, width_plus + line_count * _height))
line_count += 1
pygame.image.save(rtext1, imagePath_huge + "\\" + str(count).zfill(2) + ".png")
Print(u"保存图片:\t\t\t\t\t\t\t" + imagePath_huge + "\\" + str(count).zfill(2) + ".png")
width_save = floor(picEdge*dst_scale)
height_save = floor(picEdge*dst_scale*rtext1.get_height()/rtext1.get_width())
rtext2 = pygame.transform.smoothscale(rtext1, (width_save, height_save))
rtext3 = pygame.Surface((picEdge*dst_scale, picEdge*dst_scale), pygame.SRCALPHA)
rtext3.set_alpha(0)
rtext3.blit(rtext2, (0, (picEdge*dst_scale - rtext2.get_height())/2))
pygame.image.save(rtext3, imagePath_big + "\\" + str(count).zfill(2) + ".png")
Print(u"保存图片:\t\t\t\t\t\t\t" + imagePath_big + "\\" + str(count).zfill(2) + ".png")
rtext2 = pygame.transform.smoothscale(rtext3, (floor(rtext3.get_width()/2), floor(rtext3.get_height()/2)))
pygame.image.save(rtext2, imagePath_small + "\\" + str(count).zfill(2) + ".png")
Print(u"保存图片:\t\t\t\t\t\t\t" + imagePath_small + "\\" + str(count).zfill(2) + ".png")
count += 1
__DEBUG__ = True
def Print(string):
print(string) if __DEBUG__ else None
generate_pic(True, (0, 0, 0, 0))
generate_pic(False, (0, 0, 0, 0))
| 2.515625
| 3
|
warpfield/telescope.py
|
xr0038/jasmine_warpfield
| 0
|
12782821
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from dataclasses import dataclass, field
from typing import Callable, List
from astropy.coordinates import SkyCoord, Longitude, Latitude, Angle
from astropy.time import Time
from astropy.units.quantity import Quantity
from astropy.wcs import WCS
from astropy.visualization.wcsaxes import WCSAxesSubplot
from scipy.spatial.transform import Rotation
from matplotlib.patches import Rectangle
from shapely.geometry import Polygon, Point
from shapely.geometry import MultiPoint
from shapely.prepared import prep
from descartes.patch import PolygonPatch
from scipy.optimize import least_squares
import matplotlib.pyplot as plt
import astropy.units as u
import numpy as np
import pandas as pd
import sys
from .util import get_projection
def identity_transformation(position):
''' An identity transformation function.
This function is an fallback function for the image distortion.
The function requires a tuple of two arrays. The first and second elements
are the x- and y-positions on the focal plane without any distortion,
respectively. This function returns the positions as they are.
Parameters:
position: A numpy.array with the shape of (2, Nsrc). The first element
contains the x-positions, while the second element contains
the y-positions.
Return:
A numpy.ndarray of the input coordinates.
'''
return np.array(position)
@dataclass
class Optics(object):
''' Definition of optical components.
Attributes:
pointing (SkyCoord) : the latitude of the telescope pointing.
position_angle (Angle) : the position angle of the telescope.
focal_length (Quantity): the focal length of the telescope in meter.
diameter (Quantity) : the diameter of the telescope in meter.
valid_region (Polygon) : the valid region of the focal plane.
margin (Quantity) : the margin of the valid region (buffle).
distortion (function) : a function to distort the focal plane image.
'''
pointing: SkyCoord
position_angle: Angle = Angle(0.0, unit='degree')
focal_length: Quantity = 7.3*u.m
diameter: Quantity = 0.4*u.m
valid_region: Polygon = Point(0,0).buffer(30000)
margin: Quantity = 5000*u.um
distortion: Callable = identity_transformation
@property
def scale(self):
''' A conversion factor from sky to focal plane in degree/um. '''
return (1.0*u.rad/self.focal_length).to(u.deg/u.um)
@property
def center(self):
''' A dummy position to defiine the center of the focal plane. '''
return SkyCoord(0*u.deg,0*u.deg,frame='icrs')
@property
def pointing_angle(self):
''' Angle set to define the pointing position and orientation. '''
## use the ICRS frame in calculation.
icrs = self.pointing.icrs
## calculate position angle in the ICRS frame.
north = self.pointing.directional_offset_by(0.0,1*u.arcsec)
delta = self.pointing.icrs.position_angle(north)
position_angle = -self.position_angle.rad-delta.rad
return np.array((icrs.ra.rad,-icrs.dec.rad,position_angle))
def set_distortion(self, distortion):
''' Assign a distortion function.
The argument of the distortion function should be a numpy.array with
the shape of (2, Nsrc). The first element contains the x-positions,
while the second element contains the y-positions.
Parameters:
distortion (function): a function to distort focal plane image.
'''
self.distortion = distortion
def block(self, position):
''' Block sources by a certain radius.
Parameters:
position (ndarray): source positions on the focal plane w/o distortion.
Return:
A boolean array to indicate which sources are inside the field-of-view.
'''
mp = MultiPoint(position.T)
polygon = prep(self.valid_region.buffer(self.margin.to_value(u.um)))
return np.array([not polygon.contains(p) for p in mp.geoms])
def imaging(self, sources, epoch=None):
''' Map celestial positions onto the focal plane.
Parameters:
sources (SkyCoord): the coordinates of sources.
epoch (Time): the epoch of the observation.
Return:
A `DataFrame` instance. The DataFrame contains four columns: the "x" and
"y" columns are the positions on the focal plane in micron, and the "ra"
and "dec" columns are the original celestial positions in the ICRS frame.
'''
try:
if epoch is not None:
sources = sources.apply_space_motion(epoch)
except Exception as e:
print('No proper motion information is available.', file=sys.stderr)
print('The positions are not updated to new epoch.', file=sys.stderr)
icrs = sources.transform_to('icrs')
xyz = icrs.cartesian.xyz
r = Rotation.from_euler('zyx', -self.pointing_angle)
pqr = r.as_matrix() @ xyz
if pqr.ndim==1: pqr = np.expand_dims(pqr,axis=1)
obj = SkyCoord(pqr.T, obstime=epoch,
representation_type='cartesian').transform_to('icrs')
obj.representation_type = 'spherical'
proj = get_projection(self.center,self.scale.to_value())
pos = np.array(obj.to_pixel(proj, origin=0))
blocked = self.block(pos)
pos = self.distortion(pos)
return pd.DataFrame({
'x': pos[0], 'y': pos[1],
'ra': icrs.ra, 'dec': icrs.dec,
'blocked': blocked
})
@dataclass
class PixelDisplacement(object):
''' Definition of the pixel non-uniformity.
Attributes:
dx (ndarray): a two dimensional array with the same size of the detector.
each element contains the x-displacement of the pixel.
dy (ndarray): a two dimensional array with the same size of the detector.
each element contains the y-displacement of the pixel.
'''
dx: np.ndarray = None
dy: np.ndarray = None
def initialize(self, naxis1, naxis2):
''' Initialize the displacement array with zeros.
Parameters:
naxis1 (int): the detector size along with NAXIS1.
naxis2 (int): the detector size along with NAXIS2.
'''
self.dx = np.zeros((naxis2, naxis1))
self.dy = np.zeros((naxis2, naxis1))
def evaluate(self, x, y):
''' Evaluate the source position displacement.
Parameters:
position (ndarray): a numpy.ndarray with the shape of (2, N(sources)).
the first array contains the x-coordinates, while
the second does the y-coordinates.
Note:
Not implemented yet.
'''
return (x,y)
@dataclass
class Detector(object):
''' Definition of a detector.
Attributes:
naxis1 (int) : detector pixels along with NAXIS1.
naxis2 (int) : detector pixels along with NAXIS2.
pixel_scale (Quantity): nominal detector pixel scale.
offset_dx (Quantity) : the offset along with the x-axis.
offset_dy (Quantity) : the offste along with the y-axis.
position_angle (Angle): the position angle of the detector.
displacement (PixelDisplacement):
an instance to define the displacements of the sources due to
the pixel non-uniformity.
'''
naxis1: int = 4096
naxis2: int = 4096
pixel_scale: Quantity = 10*u.um
offset_dx: Quantity = 0*u.um
offset_dy: Quantity = 0*u.um
position_angle: Angle = Angle(0.0, unit='degree')
displacement: PixelDisplacement = None
def __post_init__(self):
if self.displacement is None:
self.displacement = PixelDisplacement()
self.displacement.initialize(self.naxis1,self.naxis2)
@property
def width(self):
''' The physical width of the detector. '''
return self.naxis1*self.pixel_scale.to_value(u.um)
@property
def height(self):
''' The physical height of the detector. '''
return self.naxis2*self.pixel_scale.to_value(u.um)
@property
def xrange(self):
''' The x-axis range of the detector. '''
return np.array((-self.width/2,self.width/2))
@property
def yrange(self):
''' The y-axis range of the detector. '''
return np.array((-self.height/2,self.height/2))
@property
def patch(self):
''' The footprint of the detector on the focal plane as a patch. '''
c,s = np.cos(self.position_angle.rad),np.sin(self.position_angle.rad)
x0,y0 = self.offset_dx.to_value(u.um),self.offset_dy.to_value(u.um)
x1 = x0 - (+ self.width*c - self.height*s)/2
y1 = y0 - (+ self.width*s + self.height*c)/2
return Rectangle((x1,y1), width=self.width, height=self.height,
angle=self.position_angle.deg, ec='r', linewidth=2, fill=False)
@property
def footprint(self):
''' The footprint of the detector on the focal plane. '''
c,s = np.cos(self.position_angle.rad),np.sin(self.position_angle.rad)
x0,y0 = self.offset_dx.to_value(u.um),self.offset_dy.to_value(u.um)
x1 = x0 - (+ self.width*c - self.height*s)/2
y1 = y0 - (+ self.width*s + self.height*c)/2
x2 = x0 - (- self.width*c - self.height*s)/2
y2 = y0 - (- self.width*s + self.height*c)/2
x3 = x0 - (- self.width*c + self.height*s)/2
y3 = y0 - (- self.width*s - self.height*c)/2
x4 = x0 - (+ self.width*c + self.height*s)/2
y4 = y0 - (+ self.width*s - self.height*c)/2
return Polygon(([x1,y1],[x2,y2],[x3,y3],[x4,y4]))
def align(self, x, y):
''' Align the source position to the detector.
Parameters:
x (Series): the x-coordinates on the focal plane.
y (Series): the y-coordinates on the focal plane.
Return:
The tuple of the x- and y-positions of the sources, which are remapped
onto the detector coordinates.
'''
c,s = np.cos(-self.position_angle.rad),np.sin(-self.position_angle.rad)
dx,dy = x-self.offset_dx.to_value(u.um), y-self.offset_dy.to_value(u.um)
return c*dx-s*dy, s*dx+c*dy
def capture(self, position):
''' Calculate the positions of the sources on the detector.
Parameters:
position (DataFrame): the positions of the sources on the focal plane.
the "x" and "y" columns are respectively the x-
and y-positions of the sources in units of micron.
Return:
A list of `DataFrame`s which contains the positions on the detectors.
The number of the `DataFrame`s are the same as the detectors.
The "x" and "y" columns are the positions on each detector. The "ra"
and "dec" columns are the original positions in the ICRS frame.
'''
x,y = self.align(position.x, position.y)
x,y = self.displacement.evaluate(x,y)
position.x = x
position.y = y
bf = ~position.blocked
xf = ((self.xrange[0] < x) & (x < self.xrange[1]))
yf = ((self.yrange[0] < y) & (y < self.yrange[1]))
return position.loc[xf&yf&bf,:]
@dataclass
class Telescope(object):
''' An imaginary telescope instance.
The `Telescope` class is composed of an `Optics` instance and a list of
`Detector` instances. This instance organizes the alignment of the detectors
and converts the coordinates of the astronomical sources into the positions
on the detectors.
Attributes:
pointing (SkyCoord)
position_angle (Angle):
'''
pointing: SkyCoord = None
position_angle: Angle = None
optics: Optics = None
detectors: List[Detector] = None
def __post_init__(self):
if self.optics is None:
self.optics = Optics(self.pointing, self.position_angle)
else:
self.pointing = self.optics.pointing
self.position_angle = self.optics.position_angle
if self.detectors is None:
self.detectors = [Detector(),]
assert self.optics is not None
assert self.detectors is not None
def set_distortion(self, distortion):
''' Set a distortion function to the optics.
Parameters:
distortion (function): a function to distort focal plane image.
'''
self.optics.set_distortion(distortion)
def get_footprints(self, **options):
''' Obtain detector footprints on the sky.
Options:
frame (string): specify the coordinate of the footprint.
limit (bool): limit the footprints within the valid region.
patch (bool): obtain PolygonPatch instead of Polygon.
'''
frame = options.pop('frame', self.pointing.frame.name)
limit = options.pop('limit', True)
patch = options.pop('patch', False)
if self.pointing.frame.name == 'galactic':
l0 = self.pointing.galactic.l
b0 = self.pointing.galactic.b
else:
l0 = self.pointing.icrs.ra
b0 = self.pointing.icrs.dec
def generate(e):
frame = self.pointing.frame
def func(x):
pos = x.reshape((-1,2))
p0 = SkyCoord(pos[:,0], pos[:,1], frame=frame, unit=u.deg)
res = self.optics.imaging(p0)
return (e-res[['x','y']].to_numpy()).flatten()
return func
footprints = []
valid_region = self.optics.valid_region
for d in self.detectors:
fp = valid_region.intersection(d.footprint) if limit else d.footprint
edge = np.array(fp.boundary.coords[0:-1])
p0 = np.tile([l0.deg,b0.deg],edge.shape[0])
func = generate(edge)
res = least_squares(func, p0)
pos = res.x.reshape((-1,2))
sky = SkyCoord(pos[:,0]*u.deg,pos[:,1]*u.deg,
frame=self.pointing.frame.name)
if frame == 'galactic':
sky = sky.galactic
pos = Polygon(np.stack([sky.l.deg,sky.b.deg]).T)
else:
sky = sky.icrs
pos = Polygon(np.stack([sky.ra.deg,sky.dec.deg]).T)
footprints.append(PolygonPatch(pos, **options) if patch else pos)
return footprints
def overlay_footprints(self, axis, **options):
''' Display the footprints on the given axis.
Parameters:
axis (WCSAxesSubplot):
An axis instance with a WCS projection.
Options:
frame (string): the coodinate frame.
label (string): the label of the footprints.
color (Color): color of the footprint edges.
'''
label = options.pop('label', None)
color = options.pop('color','C2')
frame = options.pop('frame', self.pointing.frame.name)
if isinstance(axis, WCSAxesSubplot):
options['tranform'] = axis.get_transform(frame)
for footprint in self.get_footprints(frame=frame, **options):
v = np.array(footprint.boundary.coords)
axis.plot(v[:,0], v[:,1], c=color, label=label, **options)
return axis
def display_focal_plane(
self, sources=None, epoch=None, axis=None, **options):
''' Display the layout of the detectors.
Show the layout of the detectors on the focal plane. The detectors are
illustrated by the red rectangles. If the `sources` are provided, the
detectors are overlaid on the sources on the focal plane.
Parameters:
sources (SkyCoord): the coordinates of astronomical sources.
epoch (Time) : the observation epoch.
'''
markersize = options.pop('markersize', 1)
marker = options.pop('marker', 'x')
figsize = options.pop('figsize', (8,8))
if axis is None:
fig = plt.figure(figsize=figsize)
axis = fig.add_subplot(111)
axis.set_aspect(1.0)
axis.add_patch(PolygonPatch(
self.optics.valid_region, color=(0.8,0.8,0.8), alpha=0.2))
if sources is not None:
position = self.optics.imaging(sources, epoch)
axis.scatter(position.x,position.y,markersize,marker=marker)
for d in self.detectors:
axis.add_patch(d.patch)
axis.autoscale_view()
axis.grid()
axis.set_xlabel('Displacement on the focal plane ($\mu$m)', fontsize=14)
axis.set_ylabel('Displacement on the focal plane ($\mu$m)', fontsize=14)
if axis is None: fig.tight_layout()
def observe(self, sources, epoch=None):
''' Observe astronomical sources.
Map the sky coordinates of astronomical sources into the physical
positions on the detectors of the telescope.
Parameters:
sources (SkyCoord): a list of astronomical sources.
epoch (Time): the datetime of the observation.
Return:
A numpy.ndarray with the shape of (N(detector), 2, N(source)).
The first index specifies the detector of the telescope.
A two dimensional array is assigned for each detector. The first
line is the coordinates along the NAXIS1 axis, and the second one
is the coordinates along the NAXIS2 axis.
'''
position = self.optics.imaging(sources, epoch)
fov = []
for det in self.detectors:
fov.append(det.capture(position))
return fov
| 2.953125
| 3
|
tests/hurricane_yz/plot_hurricane_yz.py
|
drreynolds/sundials_manyvector_demo
| 2
|
12782822
|
#!/usr/bin/env python3
#------------------------------------------------------------
# Programmer(s): <NAME> @ SMU
#------------------------------------------------------------
# Copyright (c) 2019, Southern Methodist University.
# All rights reserved.
# For details, see the LICENSE file.
#------------------------------------------------------------
# matplotlib-based plotting utility function for
# hurricane test problem in the yz-plane
# imports
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
from utilities_euler3D import *
# determine if running interactively
if __name__=="__main__":
showplots = False
else:
showplots = True
# set view for surface plots
elevation = 15
angle = 20
# set test constants
rho0 = 1.0
v0 = 10.0
Amp = 25.0
gamma = 2.0
yl = -1.0
yr = 1.0
zl = -1.0
zr = 1.0
# utility function to create analytical solution
def analytical_solution(t,ny,nz):
if (t == 0):
t = 1e-14
p0prime = Amp*gamma*rho0**(gamma-1.0)
rthresh = 2.0*t*np.sqrt(p0prime)
rho = np.zeros((ny,nz), dtype=float)
my = np.zeros((ny,nz), dtype=float)
mz = np.zeros((ny,nz), dtype=float)
dy = (yr-yl)/ny
dz = (zr-zl)/nz
for j in range(nz):
for i in range(ny):
y = (i+0.5)*dy + yl
z = (j+0.5)*dz + zl
r = np.sqrt(y*y + z*z)
if (r == 0.0): # protect against division by zero
r = 1e-14
costheta = y/r
sintheta = z/r
if (r < rthresh):
rho[i,j] = r*r / (8*Amp*t*t)
my[i,j] = rho[i,j] * (y + z) / (2*t)
mz[i,j] = rho[i,j] * (z - y) / (2*t)
else:
rho[i,j] = rho0
my[i,j] = rho0 * ( 2*t*p0prime*costheta +
np.sqrt(2*p0prime)*np.sqrt(r*r-2*t*t*p0prime)*sintheta )/r
mz[i,j] = rho0 * ( 2*t*p0prime*sintheta -
np.sqrt(2*p0prime)*np.sqrt(r*r-2*t*t*p0prime)*costheta )/r
return [rho, my, mz]
# load solution data
nx, ny, nz, nchem, nt, xgrid, ygrid, zgrid, tgrid, rho, mx, my, mz, et, chem = load_data()
# output general information to screen
print('Generating plots for data set:')
print(' ny: ', ny)
print(' nz: ', nz)
print(' nt: ', nt)
# determine extents of plots
minmaxrho = [0.9*rho.min(), 1.1*rho.max()]
if (rho.min() == rho.max()):
minmaxrho = [rho.min()-0.1, rho.max()+0.1]
minmaxmy = [0.9*my.min(), 1.1*my.max()]
if (my.min() == my.max()):
minmaxmy = [my.min()-0.1, my.max()+0.1]
minmaxmz = [0.9*mz.min(), 1.1*mz.max()]
if (mz.min() == mz.max()):
minmaxmz = [mz.min()-0.1, mz.max()+0.1]
minmaxet = [0.9*et.min(), 1.1*et.max()]
if (et.min() == et.max()):
minmaxet = [et.min()-0.1, et.max()+0.1]
# generate plots of solution
for tstep in range(nt):
numfigs = 0
print('time step', tstep+1, 'out of', nt)
# get true solutions
rhotrue, mytrue, mztrue = analytical_solution(tgrid[tstep],ny,nz)
# set string constants for current time, mesh sizes
tstr = repr(tstep)
nystr = repr(ny)
nzstr = repr(nz)
# extract 2D velocity fields (computed and true)
U = my[nx//2,:,:,tstep]/rho[nx//2,:,:,tstep]
Utrue = mytrue/rhotrue
V = mz[nx//2,:,:,tstep]/rho[nx//2,:,:,tstep]
Vtrue = mztrue/rhotrue
speed = np.sqrt(U**2 + V**2)
speedtrue = np.sqrt(Utrue**2 + Vtrue**2)
# set filenames for graphics
rhosurf = 'rho_surface.' + repr(tstep).zfill(4) + '.png'
etsurf = 'et_surface.' + repr(tstep).zfill(4) + '.png'
vstr = 'velocity.' + repr(tstep).zfill(4) + '.png'
rhocont = 'rho_contour.' + repr(tstep).zfill(4) + '.png'
etcont = 'et_contour.' + repr(tstep).zfill(4) + '.png'
rho1dout = 'rho1d.' + repr(tstep).zfill(4) + '.png'
my1dout = 'my1d.' + repr(tstep).zfill(4) + '.png'
mz1dout = 'my1d.' + repr(tstep).zfill(4) + '.png'
sp1dout = 'speed1d.' + repr(tstep).zfill(4) + '.png'
# set y and z meshgrid objects
Y,Z = np.meshgrid(ygrid,zgrid)
# surface plots
numfigs += 1
fig = plt.figure(numfigs)
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(Y, Z, rho[nx//2,:,:,tstep], rstride=1, cstride=1,
cmap=cm.jet, linewidth=0, antialiased=True, shade=True)
ax.set_xlabel('y'); ax.set_ylabel('z'); ax.set_zlim((minmaxrho[0], minmaxrho[1]))
ax.view_init(elevation,angle)
plt.title(r'$\rho(y,z)$ at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr)
plt.savefig(rhosurf)
numfigs += 1
fig = plt.figure(numfigs)
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(Y, Z, et[nx//2,:,:,tstep], rstride=1, cstride=1,
cmap=cm.jet, linewidth=0, antialiased=True, shade=True)
ax.set_xlabel('y'); ax.set_ylabel('z'); ax.set_zlim((minmaxet[0], minmaxet[1]))
ax.view_init(elevation,angle)
plt.title(r'$e_t(y,z)$ at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr)
plt.savefig(etsurf)
# stream plots
numfigs += 1
fig = plt.figure(numfigs,figsize=(12,4))
ax1 = fig.add_subplot(121)
lw = speed / speed.max()
ax1.streamplot(Y, Z, U, V, color='b', linewidth=lw)
ax1.set_xlabel('y'); ax1.set_ylabel('z'); ax1.set_aspect('equal')
ax2 = fig.add_subplot(122)
lw = speedtrue / speedtrue.max()
ax2.streamplot(Y, Z, Utrue, Vtrue, color='k', linewidth=lw)
ax2.set_xlabel('y'); ax2.set_ylabel('z'); ax2.set_aspect('equal')
plt.suptitle(r'$\mathbf{v}(y,z)$ (left) vs $\mathbf{v}_{true}(y,z)$ (right) at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr)
plt.savefig(vstr)
# contour plots
# numfigs += 1
# fig = plt.figure(numfigs,figsize=(12,4))
# ax1 = fig.add_subplot(121)
# ax1.contourf(Y, Z, rho[nx//2,:,:,tstep])
# plt.colorbar(); ax1.set_xlabel('y'); ax1.set_ylabel('z'); ax1.set_axis('equal')
# ax2 = fig.add_subplot(122)
# ax2.contourf(Y, Z, rhotrue)
# ax2.colorbar(); ax2.set_xlabel('y'); ax2.set_ylabel('z'); ax2.set_axis('equal')
# plt.suptitle(r'$\rho(y,z)$ (left) vs $\rho_{true}(y,z)$ (right) at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr)
# plt.savefig(rhocont)
# numfigs += 1
# fig = plt.figure(numfigs)
# plt.contourf(Y, Z, et[nx//2,:,:,tstep])
# plt.colorbar(); plt.xlabel('y'); plt.ylabel('z'); plt.axis('equal')
# plt.title(r'$e_t(y,z)$ at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr)
# plt.savefig(etcont)
# line/error plots
rho1d = rho[nx//2,:,nz//2,tstep]
my1d = my[nx//2,:,nz//2,tstep]
mz1d = mz[nx//2,:,nz//2,tstep]
sp1d = speed[:,nz//2]
rhotrue1d = rhotrue[:,nz//2]
mytrue1d = mytrue[:,nz//2]
mztrue1d = mztrue[:,nz//2]
sptrue1d = speedtrue[:,nz//2]
numfigs += 1
fig = plt.figure(numfigs,figsize=(12,4))
ax1 = fig.add_subplot(121)
ax1.plot(ygrid,rho1d,'b--',ygrid,rhotrue1d,'k-')
ax1.legend(('computed','analytical'))
ax1.set_xlabel('y'); ax1.set_ylabel(r'$\rho(y)$')
ax2 = fig.add_subplot(122)
ax2.semilogy(ygrid,np.abs(rho1d-rhotrue1d)+1e-16)
ax2.set_xlabel('y'); ax2.set_ylabel(r'$|\rho-\rho_{true}|$')
plt.suptitle(r'$\rho(y)$ and error at output ' + tstr + ', mesh = ' + nystr)
plt.savefig(rho1dout)
numfigs += 1
fig = plt.figure(numfigs,figsize=(12,4))
ax1 = fig.add_subplot(121)
ax1.plot(ygrid,my1d,'b--',ygrid,mytrue1d,'k-')
ax1.legend(('computed','analytical'))
ax1.set_xlabel('y'); ax1.set_ylabel(r'$m_y(y)$')
ax2 = fig.add_subplot(122)
ax2.semilogy(ygrid,np.abs(my1d-mytrue1d)+1e-16)
ax2.set_xlabel('y'); ax2.set_ylabel(r'$|m_y-m_{y,true}|$')
plt.suptitle(r'$m_y(y)$ and error at output ' + tstr + ', mesh = ' + nystr)
plt.savefig(my1dout)
numfigs += 1
fig = plt.figure(numfigs,figsize=(12,4))
ax1 = fig.add_subplot(121)
ax1.plot(ygrid,mz1d,'b--',ygrid,mztrue1d,'k-')
ax1.legend(('computed','analytical'))
ax1.set_xlabel('y'); ax1.set_ylabel(r'$m_z(y)$')
ax2 = fig.add_subplot(122)
ax2.semilogy(ygrid,np.abs(mz1d-mztrue1d)+1e-16)
ax2.set_xlabel('y'); ax2.set_ylabel(r'$|m_z-m_{z,true}|$')
plt.suptitle(r'$m_z(y)$ and error at output ' + tstr + ', mesh = ' + nystr)
plt.savefig(mz1dout)
numfigs += 1
fig = plt.figure(numfigs,figsize=(12,4))
ax1 = fig.add_subplot(121)
ax1.plot(ygrid,sp1d,'b--',ygrid,sptrue1d,'k-')
ax1.legend(('computed','analytical'))
ax1.set_xlabel('y'); ax1.set_ylabel('s(y)')
ax2 = fig.add_subplot(122)
ax2.semilogy(ygrid,np.abs(sp1d-sptrue1d)+1e-16)
ax2.set_xlabel('y'); ax2.set_ylabel(r'$|s-s_{true}|$')
plt.suptitle(r'$s(y)$ and error at output ' + tstr + ', mesh = ' + nystr)
plt.savefig(sp1dout)
if (showplots):
plt.show()
for i in range(1,numfigs+1):
plt.figure(i), plt.close()
##### end of script #####
| 2.90625
| 3
|
tests/test_installation_commands.py
|
figufema/TesteClone
| 1,521
|
12782823
|
# -*- coding: utf-8 -*-
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the google.colab._installation_commands package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import unittest
import IPython
from IPython.utils import io
from google.colab import load_ipython_extension
MOCKED_COMMANDS = {
'pip install pandas':
"""
Requirement already satisfied: pandas in /usr/local/lib/python2.7/dist-packages (0.22.0)
Requirement already satisfied: pytz>=2011k in /usr/local/lib/python2.7/dist-packages (from pandas) (2018.9)
Requirement already satisfied: python-dateutil in /usr/local/lib/python2.7/dist-packages (from pandas) (2.5.3)
Requirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python2.7/dist-packages (from pandas) (1.16.2)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python2.7/dist-packages (from python-dateutil->pandas) (1.11.0)
""",
'pip install -U numpy':
"""
Collecting numpy
Downloading https://files.pythonhosted.org/packages/c4/33/8ec8dcdb4ede5d453047bbdbd01916dbaccdb63e98bba60989718f5f0876/numpy-1.16.2-cp27-cp27mu-manylinux1_x86_64.whl (17.0MB)
100% |============================| 17.0MB 660kB/s
fastai 0.7.0 has requirement torch<0.4, but you'll have torch 1.0.1.post2 which is incompatible.
albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.8 which is incompatible.
featuretools 0.4.1 has requirement pandas>=0.23.0, but you'll have pandas 0.22.0 which is incompatible.
Installing collected packages: numpy
Found existing installation: numpy 1.14.6
Uninstalling numpy-1.14.6:
Successfully uninstalled numpy-1.14.6
Successfully installed numpy-1.16.2
"""
}
class MockInteractiveShell(IPython.InteractiveShell):
"""Interactive shell that mocks some commands."""
def system(self, cmd):
if cmd in MOCKED_COMMANDS:
sys.stderr.write('')
sys.stdout.write(MOCKED_COMMANDS[cmd])
self.user_ns['_exit_code'] = 0
else:
return super(MockInteractiveShell, self).system(cmd)
class InstallationCommandsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(InstallationCommandsTest, cls).setUpClass()
cls.ip = MockInteractiveShell()
load_ipython_extension(cls.ip)
def testPipMagicPandas(self):
output = self.run_cell('%pip install pandas')
self.assertEqual([], output.outputs)
self.assertEqual('', output.stderr)
self.assertIn('pandas', output.stdout)
def testPipMagicNumpy(self):
output = self.run_cell('%pip install -U numpy')
self.assertEqual([], output.outputs)
self.assertEqual('', output.stderr)
self.assertIn('numpy', output.stdout)
def run_cell(self, cell_contents):
with io.capture_output() as captured:
self.ip.run_cell(cell_contents)
return captured
| 1.664063
| 2
|
galini/relaxations/continuous.py
|
michaelbynum/galini
| 0
|
12782824
|
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A relaxation that removes integrality constraints on variables."""
from galini.core import Variable, Domain
from galini.relaxations.relaxation import Relaxation, RelaxationResult
class ContinuousRelaxation(Relaxation):
def relaxed_problem_name(self, problem):
return problem.name + '_continuous'
def relax_variable(self, problem, variable):
return Variable(
variable.name,
problem.lower_bound(variable),
problem.upper_bound(variable),
Domain.REAL,
)
def relax_objective(self, problem, objective):
return RelaxationResult(objective)
def relax_constraint(self, problem, constraint):
return RelaxationResult(constraint)
| 2.4375
| 2
|
relevanceai/dataset/write/__init__.py
|
RelevanceAI/RelevanceAI
| 21
|
12782825
|
<filename>relevanceai/dataset/write/__init__.py
from relevanceai.dataset.write.write import Write
| 1.289063
| 1
|
ideas/web/doc-viewer/service/utils/hasher.py
|
ctfcup/2019-task-based
| 1
|
12782826
|
from math import sin
from wsgi import HASH_SALT
def calculate_hash(data: str) -> str:
return _calculate_inner(HASH_SALT + data)
def _calculate_inner(data: str) -> str:
A = 0x12345678
B = 0x9ABCDEF0
C = 0xDEADDEAD
D = 0xC0FEC0FE
E = 0xFEEDBEAF
X = [int(0xFFFFFFFF * sin(i)) & 0xFFFFFFFF for i in range(256)]
def F(X, Y, Z):
return ((~X & Z) | (~X & Z)) & 0xFFFFFFFF
def G(X, Y, Z):
return ((X & Z) | (~Z & Y)) & 0xFFFFFFFF
def H(X, Y, Z):
return (X ^ Y ^ Z) & 0xFFFFFFFF
def I(X, Y, Z):
return (Y ^ (~Z | X)) & 0xFFFFFFFF
def ROL(X, Y):
return (X << Y | X >> (32 - Y)) & 0xFFFFFFFF
for i, ch in enumerate(data):
k, l = ord(ch), i & 0x1f
A = (B + ROL(A + F(B, C, D) + X[k], l)) & 0xFFFFFFFF
B = (C + ROL(B + G(C, D, E) + X[k], l)) & 0xFFFFFFFF
C = (D + ROL(C + H(E, A, B) + X[k], l)) & 0xFFFFFFFF
D = (E + ROL(D + I(C, D, E) + X[k], l)) & 0xFFFFFFFF
E = (A + ROL(E + F(A, B, C) + X[k], l)) & 0xFFFFFFFF
return "".join([hex(x)[2:].zfill(8) for x in [A, B, C, D, E]])
| 2.640625
| 3
|
warning_exceptions.py
|
lmokto/allexceptions
| 0
|
12782827
|
'''
Warning Categories
There are also several exceptions defined for use with the warnings module.
Warning
The base class for all warnings.
UserWarning
Base class for warnings coming from user code.
DeprecationWarning
Used for features no longer being maintained.
PendingDeprecationWarning
Used for features that are soon going to be deprecated.
SyntaxWarning
Used for questionable syntax.
RuntimeWarning
Used for events that happen at runtime that might cause problems.
FutureWarning
Warning about changes to the language or library that are coming at a later time.
ImportWarning
Warn about problems importing a module.
UnicodeWarning
Warn about problems with unicode text.
'''
| 1.648438
| 2
|
comch/cubical/__init__.py
|
smimic/comch
| 4
|
12782828
|
<gh_stars>1-10
from .cubical import Cube
from .cubical import CubicalElement
from .cubical import Cubical
| 1.117188
| 1
|
khan/formatter/__init__.py
|
globocom/mongo-top
| 0
|
12782829
|
<reponame>globocom/mongo-top
import sys
import inspect
from .table_top import TopTable
from .table_replication import ReplicationTable
def formatter_factory(command_name):
klasses = inspect.getmembers(sys.modules[__name__], inspect.isclass)
for klass in klasses:
if klass[1].__formatter_name__ == command_name:
return klass[1]
else:
raise AttributeError('Unknown method: {}'.format(command_name))
| 2.140625
| 2
|
tests/test_consumer_group.py
|
Yelp/yelp_kafka
| 40
|
12782830
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import time
from multiprocessing import Process
import mock
import pytest
from kafka.common import ConsumerTimeout
from kafka.common import KafkaUnavailableError
from yelp_kafka.config import KafkaConsumerConfig
from yelp_kafka.consumer_group import ConsumerGroup
from yelp_kafka.consumer_group import KafkaConsumerGroup
from yelp_kafka.consumer_group import MultiprocessingConsumerGroup
from yelp_kafka.error import ConsumerGroupError
from yelp_kafka.error import PartitionerError
from yelp_kafka.error import PartitionerZookeeperError
from yelp_kafka.error import ProcessMessageError
@mock.patch('yelp_kafka.consumer_group.Partitioner', autospec=True)
class TestConsumerGroup(object):
topic = 'topic1'
def test__consume(self, mock_partitioner, config):
group = ConsumerGroup(self.topic, config, mock.Mock())
group.consumer = mock.MagicMock()
group.consumer.__iter__.return_value = [
mock.sentinel.message1,
mock.sentinel.message2
]
group.consume(refresh_timeout=1)
assert group.process.call_args_list == [
mock.call(mock.sentinel.message1),
mock.call(mock.sentinel.message2)
]
mock_partitioner.return_value.refresh.assert_called_once_with()
def test__consume_partitioner_errors(self, mock_partitioner, config):
group = ConsumerGroup(self.topic, config, mock.Mock())
group.consumer = mock.MagicMock()
group.consumer.__iter__.return_value = [
mock.sentinel.message1,
mock.sentinel.message2
]
mock_partitioner.return_value.refresh.side_effect = PartitionerError("Boom")
with pytest.raises(PartitionerError):
group.consume(refresh_timeout=1)
mock_partitioner.return_value.refresh.side_effect = PartitionerZookeeperError("Boom")
with pytest.raises(PartitionerZookeeperError):
group.consume(refresh_timeout=1)
def test__consume_error(self, mock_partitioner, config):
group = ConsumerGroup(self.topic, config, mock.Mock(side_effect=Exception("Boom!")))
group.consumer = mock.MagicMock()
group.consumer.__iter__.return_value = [
mock.sentinel.message1,
mock.sentinel.message2
]
with pytest.raises(ProcessMessageError):
group.consume(refresh_timeout=1)
@mock.patch('yelp_kafka.consumer_group.KafkaSimpleConsumer', autospec=True)
def test__acquire(self, mock_consumer, _, config):
group = ConsumerGroup(self.topic, config, mock.Mock())
partitions = {self.topic: [0, 1]}
group._acquire(partitions)
args, _ = mock_consumer.call_args
topic, _, partitions = args
assert topic == self.topic
assert partitions == [0, 1]
mock_consumer.return_value.connect.assert_called_once_with()
@mock.patch('yelp_kafka.consumer_group.KafkaSimpleConsumer', autospec=True)
def test__acquire_no_partitions_assigned(self, mock_consumer, _, config):
group = ConsumerGroup(self.topic, config, mock.Mock())
partitions = {}
group._acquire(partitions)
assert not mock_consumer.called
@mock.patch('yelp_kafka.consumer_group.KafkaSimpleConsumer', autospec=True)
def test__release(self, mock_consumer, _, config):
group = ConsumerGroup(self.topic, config, mock.Mock())
partitions = {self.topic: [0, 1]}
group._acquire(partitions)
group._release(partitions)
mock_consumer.return_value.close.assert_called_once_with()
class TestKafkaConsumerGroup(object):
@pytest.fixture
def example_partitions(self):
return {'a': 'b'}
topic = 'topic1'
group = 'my_group'
def test___init__string_topics(self):
with pytest.raises(AssertionError):
KafkaConsumerGroup(self.topic, None)
def test__should_keep_trying_no_timeout(self, cluster):
config = KafkaConsumerConfig(
self.group,
cluster,
consumer_timeout_ms=-1
)
consumer = KafkaConsumerGroup([], config)
long_time_ago = time.time() - 1000
assert consumer._should_keep_trying(long_time_ago)
@mock.patch('time.time')
def test__should_keep_trying_not_timed_out(self, mock_time, cluster):
mock_time.return_value = 0
config = KafkaConsumerConfig(
self.group,
cluster,
consumer_timeout_ms=1000
)
consumer = KafkaConsumerGroup([], config)
almost_a_second_ago = time.time() - 0.8
assert consumer._should_keep_trying(almost_a_second_ago)
@mock.patch('time.time')
def test__should_keep_trying_timed_out(self, mock_time, cluster):
mock_time.return_value = 0
config = KafkaConsumerConfig(
self.group,
cluster,
consumer_timeout_ms=1000
)
consumer = KafkaConsumerGroup([], config)
over_a_second_ago = time.time() - 1.2
assert not consumer._should_keep_trying(over_a_second_ago)
def test__auto_commit_enabled_is_enabled(self, cluster):
config = KafkaConsumerConfig(
self.group,
cluster,
auto_commit_enable=True
)
consumer = KafkaConsumerGroup([], config)
assert consumer._auto_commit_enabled()
def test__auto_commit_enabled_not_enabled(self, cluster):
config = KafkaConsumerConfig(
self.group,
cluster,
auto_commit_enable=False
)
consumer = KafkaConsumerGroup([], config)
assert not consumer._auto_commit_enabled()
@mock.patch('yelp_kafka.consumer_group.Partitioner')
@mock.patch('yelp_kafka.consumer_group.KafkaConsumer')
def test_next(self, mock_consumer, mock_partitioner, cluster):
config = KafkaConsumerConfig(
self.group,
cluster,
consumer_timeout_ms=500
)
consumer = KafkaConsumerGroup([], config)
consumer.partitioner = mock_partitioner()
consumer.consumer = mock_consumer()
def fake_next():
time.sleep(1)
raise ConsumerTimeout()
consumer.consumer.next.side_effect = fake_next
# The mock KafkaConsumer.next (called fake_next above) takes longer than
# consumer_timeout_ms, so we should get a ConsumerTimeout from
# KafkaConsumerGroup
with pytest.raises(ConsumerTimeout):
consumer.next()
consumer.consumer.next.assert_called_once_with()
consumer.partitioner.refresh.assert_called_once_with()
def test__acquire_has_consumer(
self,
cluster,
example_partitions,
mock_post_rebalance_cb
):
config = KafkaConsumerConfig(
self.group,
cluster,
post_rebalance_callback=mock_post_rebalance_cb
)
consumer = KafkaConsumerGroup([], config)
consumer.consumer = mock.Mock()
consumer._acquire(example_partitions)
consumer.consumer.set_topic_partitions.assert_called_once_with(example_partitions)
mock_post_rebalance_cb.assert_called_once_with(example_partitions)
@mock.patch('yelp_kafka.consumer_group.KafkaConsumer')
def test__acquire_has_no_consumer(self, mock_consumer, cluster, example_partitions):
config = KafkaConsumerConfig(self.group, cluster)
consumer = KafkaConsumerGroup([], config)
consumer._acquire(example_partitions)
mock_consumer.assert_called_once_with(example_partitions, **consumer.config)
def test__release(
self,
cluster,
example_partitions,
mock_pre_rebalance_cb
):
config = KafkaConsumerConfig(
self.group,
cluster,
auto_commit_enable=True,
pre_rebalance_callback=mock_pre_rebalance_cb
)
consumer = KafkaConsumerGroup([], config)
mock_consumer = mock.Mock()
consumer.consumer = mock_consumer
consumer._release(example_partitions)
mock_consumer.commit.assert_called_once_with()
mock_consumer.set_topic_partitions.assert_called_once_with({})
mock_pre_rebalance_cb.assert_called_once_with(example_partitions)
def test__release_retry(self, cluster):
config = KafkaConsumerConfig(
self.group,
cluster,
auto_commit_enable=True
)
consumer = KafkaConsumerGroup([], config)
mock_consumer = mock.Mock()
mock_consumer.set_topic_partitions.side_effect = KafkaUnavailableError
consumer.consumer = mock_consumer
with pytest.raises(KafkaUnavailableError):
consumer._release({})
assert mock_consumer.set_topic_partitions.call_count == 2
class TestMultiprocessingConsumerGroup(object):
topics = ['topic1', 'topic2']
@pytest.fixture
@mock.patch('yelp_kafka.consumer_group.Partitioner', autospec=True)
def group(
self, _,
mock_pre_rebalance_cb,
mock_post_rebalance_cb
):
config = KafkaConsumerConfig(
cluster={'broker_list': ['test_broker:9292'],
'zookeeper': 'zookeeper_uri1:2181,zookeeper_uri2:2181'},
group_id='test_group',
client_id='test_client_id',
max_termination_timeout_secs=0.1,
pre_rebalance_callback=mock_pre_rebalance_cb,
post_rebalance_callback=mock_post_rebalance_cb
)
return MultiprocessingConsumerGroup(
self.topics,
config, mock.Mock()
)
@mock.patch('yelp_kafka.consumer_group.Partitioner', autospec=True)
def test_acquire(self, _, config, mock_post_rebalance_cb):
consumer_factory = mock.Mock()
mock_consumer = mock.Mock()
consumer_factory.return_value = mock_consumer
group = MultiprocessingConsumerGroup(
self.topics,
config, consumer_factory
)
partitions = {
'topic1': [0, 1, 2],
'topic2': [3]
}
with mock.patch(
'yelp_kafka.consumer_group.Process',
autospec=True
) as mock_process:
group.acquire(partitions)
assert all(consumer is mock_consumer
for consumer in group.get_consumers())
assert consumer_factory.call_count == 4
assert mock_process.call_count == 4
assert mock_process.return_value.start.call_count == 4
mock_post_rebalance_cb.assert_called_once_with(partitions)
def test_start_consumer_fail(self, group):
consumer = mock.Mock(topic='Test', partitions=[1, 2, 3])
with mock.patch(
'yelp_kafka.consumer_group.Process',
autospec=True,
) as mock_process:
mock_process.return_value.start.side_effect = Exception("Boom!")
with pytest.raises(ConsumerGroupError):
group.start_consumer(consumer)
def test_release(self, group, mock_pre_rebalance_cb):
consumer = mock.Mock()
args = {'is_alive.return_value': False}
group.consumers = [consumer, consumer]
group.consumer_procs = {
mock.Mock(spec=Process, **args): consumer,
mock.Mock(spec=Process, **args): consumer
}
with mock.patch.object(os, 'kill', autospec=True) as mock_kill:
# Release takes acquired_partitions but in this case it is not used
# so we pass None
group.release(None)
assert not mock_kill.called
assert consumer.terminate.call_count == 2
assert not group.get_consumers()
mock_pre_rebalance_cb.assert_called_once_with(None)
def test_release_and_kill_unresponsive_consumer(self, group):
consumer = mock.Mock()
args = {'is_alive.return_value': True}
group.consumer_procs = {
mock.Mock(spec=Process, **args): consumer,
mock.Mock(spec=Process, **args): consumer
}
with mock.patch.object(os, 'kill', autospec=True) as mock_kill:
# Release takes acquired_partitions but in this case it is not used
# so we pass None
group.release(None)
assert mock_kill.call_count == 2
assert consumer.terminate.call_count == 2
def test_monitor(self, group):
consumer1 = mock.Mock()
consumer2 = mock.Mock()
args1 = {'is_alive.return_value': False}
args2 = {'is_alive.return_value': True}
group.consumer_procs = {
mock.Mock(spec=Process, **args1): consumer1,
mock.Mock(spec=Process, **args2): consumer2,
}
mock_new_proc = mock.Mock()
mock_new_proc.is_alive.return_value = True
with mock.patch.object(
MultiprocessingConsumerGroup, 'start_consumer', autospec=True
) as mock_start:
mock_start.return_value = mock_new_proc
group.monitor()
assert mock_new_proc in group.consumer_procs
mock_start.assert_called_once_with(group, consumer1)
def test_get_consumers(self, group):
group.consumers = [mock.Mock(), mock.Mock]
actual = group.get_consumers()
# Test that get_consumers actually returns a copy
assert actual is not group.consumers
assert actual == group.consumers
| 1.835938
| 2
|
models.py
|
ymkjp/pytalki
| 0
|
12782831
|
<filename>models.py
# -*- coding: utf-8 -*-
from sqlalchemy import Column, Integer, String, DateTime, Boolean, DATETIME, Index
from sqlalchemy.schema import Column, ForeignKey, Table, UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation, backref
from faker import Factory
import random
from datetime import datetime
import enum_types
import utils
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
__table_args__ = {'mysql_engine': 'InnoDB'}
id = Column('id', Integer, primary_key=True)
username = Column('username', String(30))
name = Column('name', String(30), nullable=False)
created = Column('created', DATETIME, default=datetime.now, nullable=False)
modified = Column('modified', DATETIME, default=datetime.now, nullable=False)
def __init__(self, username, name):
self.username = username
self.name = name
now = datetime.now()
self.created = now
self.modified = now
# def __repr__(self):
# return "<User('id:%s, name:%s')>" % self.id, self.name
class LangProfile(Base):
__tablename__ = 'lang_profile'
__table_args__ = (
(UniqueConstraint('user_id', 'lang_code', name='unique__idx__user_id__lang_code')),
Index('idx__lang_code__is_teaching', 'lang_code', 'is_teaching'),
{'mysql_engine': 'InnoDB'}
)
id = Column('id', Integer, primary_key=True)
user_id = Column('user_id', Integer,
ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE'), nullable=False)
lang_code = Column('lang_code', utils.EnumType(enum_class=enum_types.LangCode), index=True, nullable=False)
lang_level = Column('lang_level', utils.EnumType(enum_class=enum_types.LangLevel))
is_learning = Column('is_learning', Boolean, index=True, default=False)
is_teaching = Column('is_teaching', Boolean, index=True, default=False)
user = relation("User", backref=backref('lang_profile', order_by=id))
# def __repr__(self):
# return "<LangProfile('user_id:%s,lang_code:%s,lang_level:%s')>" % (
# self.user_id, self.lang_code, self.lang_level)
class Course(Base):
__tablename__ = 'course'
__table_args__ = (
{'mysql_engine': 'InnoDB'}
)
id = Column(Integer, primary_key=True)
user_id = Column('user_id', Integer,
ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE'), nullable=False)
lang_code = Column(utils.EnumType(enum_class=enum_types.LangCode), nullable=False)
lesson_type = Column(utils.EnumType(enum_class=enum_types.LessonType))
minutes = Column(Integer)
itc = Column(Integer)
session_count = Column(Integer, default=0)
rating = Column(Integer, default=0)
user = relation("User", backref=backref('course', order_by=id))
# def __repr__(self):
# return "<Course('%s,%s')>" % self.user_id, self.lang_code
user_table = User.__table__
lang_profile_table = LangProfile.__table__
course_table = Course.__table__
metadata = Base.metadata
def init_db(engine):
Base.metadata.create_all(bind=engine)
faker = Factory.create()
def insert_dummy_data(session):
dummy_users_count = 100
for i in range(dummy_users_count):
add_one_user(session)
session.commit()
def add_one_user(session):
user = User(name=faker.name(), username=faker.user_name())
session.add(user)
session.commit()
# Every user has one native language at least
lang_code_list = random.sample(list(enum_types.LangCode), random.randint(1, 3))
add_lang(session, user, lang_code_list.pop(), enum_types.LangLevel.Native)
for lang_code in lang_code_list:
add_lang(session, user, lang_code, random.choice(list(enum_types.LangLevel)))
def add_lang(session, user, lang_code, lang_level):
is_teaching = faker.boolean() and (5 <= lang_level.value)
is_learning = faker.boolean() and (lang_level.value <= 6)
session.add(LangProfile(user_id=user.id,
lang_code=lang_code,
lang_level=lang_level,
is_learning=is_learning,
is_teaching=is_teaching,
))
if is_teaching:
for i in range(random.randint(1, 5)):
session.add(Course(user_id=user.id,
lang_code=lang_code,
lesson_type=random.choice(list(enum_types.LessonType)),
minutes=random.randint(1, 12) * 10,
itc=random.randint(1, 100) * 10,
session_count=random.randint(0, 1000),
rating=random.randint(0, 5),
))
| 2.390625
| 2
|
GCSs_filtering_and_overlapping.py
|
sutormin94/TopoI_Topo-Seq_1
| 0
|
12782832
|
###############################################
##<NAME>, 2018##
##Topo-Seq analysis##
#The script takes raw GCSs data, returns only trusted GCSs,
#computes GCSs shared between different conditions,
#draws Venn diagrams of the sets overlappings,
#writes GCSs sets.
###############################################
#######
#Packages to be imported.
#######
import os
import matplotlib.pyplot as plt
import collections
from matplotlib_venn import venn2, venn3, venn3_circles
import numpy as np
#######
#Variables to be defined.
#######
print('Variables to be defined:')
#Path to the working directory
pwd="C:\\Users\sutor\OneDrive\ThinkPad_working\Sutor\Science\TopoI-ChIP-Seq\TopA_ChIP-Seq\EcTopoI_G116S_M320V_Topo-Seq\TCS_motifs\\"
#Input data
path_to_replicas={'TopoI_Topo_Seq_1': {'Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_1_Ara_TCSs_called_thr_15.BroadPeak", 'No_Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_1_no_Ara_TCSs_called_thr_15.BroadPeak"},
'TopoI_Topo_Seq_2': {'Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_2_Ara_TCSs_called_thr_15.BroadPeak", 'No_Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_2_no_Ara_TCSs_called_thr_15.BroadPeak"},
'TopoI_Topo_Seq_3': {'Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_3_Ara_TCSs_called_thr_15.BroadPeak", 'No_Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_3_no_Ara_TCSs_called_thr_15.BroadPeak"}}
#Configuration of the output for the GCSs data in replicas.
Replicas_path_out="C:\\Users\sutor\OneDrive\ThinkPad_working\Sutor\Science\TopoI-ChIP-Seq\TopA_ChIP-Seq\EcTopoI_G116S_M320V_Topo-Seq\TCS_motifs\\Replicas_1_2_3_Tresholds_trusted_TCSs\\"
if not os.path.exists(Replicas_path_out):
os.makedirs(Replicas_path_out)
Set_name="Thr_15"
All_conditions_name="TopoI_Topo_Seq_123_TCSs_merged"
#Configuration of the output for GCSs trusted.
Out_path=Replicas_path_out + "TopoI_Topo_Seq_123_TCSs_called_thr_15.BroadPeak"
#Outpath for Venn diagrams.
plot_outpath=Replicas_path_out
#######
#Parsing raw GCSs coordinates, returns dictionary - GCSs_coordinate:N3E.
#######
def read_GCSs_file(GCSs_file_path):
GCSs_dict={}
GCSs_in=open(GCSs_file_path, 'r')
for line in GCSs_in:
line=line.rstrip().split('\t')
if line[0] not in ['GCSs_coordinate']:
GCSs_dict[int(line[1])]=float(line[6])
GCSs_in.close()
return GCSs_dict
#######
#Filter controls.
#######
def filter_controls(replicas_path_dict):
#Merges a range of replicates
TCSs_replicas_dict={}
for set_name, set_pair in replicas_path_dict.items(): #Iterates replicas
#Read files with raw GCSs
Raw_TCSs_dict_Ara=read_GCSs_file(set_pair['Ara'])
Raw_TCSs_dict_no_Ara=read_GCSs_file(set_pair['No_Ara'])
Raw_TCSs_dict_Ara_filtered={}
for TCS_coordinate, TCS_signal in Raw_TCSs_dict_Ara.items():
if TCS_coordinate not in Raw_TCSs_dict_no_Ara:
Raw_TCSs_dict_Ara_filtered[TCS_coordinate]=TCS_signal
TCSs_replicas_dict[set_name]=Raw_TCSs_dict_Ara_filtered
return TCSs_replicas_dict
#######
#Combines replicates into one GCSs table.
#######
def combine_replicates(replicas_path_dict, path_out, name):
#Filter controls.
TCSs_replicas_dict=filter_controls(replicas_path_dict)
#Merges a range of replicates
GCSs_replicas_dict={}
names_ar=[]
for key, Raw_GCSs_dict in TCSs_replicas_dict.items(): #Iterates replicas
names_ar.append(key)
for k, v in Raw_GCSs_dict.items(): #Iterates raw GCSs
#Table filling process initiation
if len(names_ar)==1:
GCSs_replicas_dict[k]=[v]
#Table filling process continuing (the table already contains at least one GCSs set)
else:
#If GCSs is already in the table
if k in GCSs_replicas_dict:
GCSs_replicas_dict[k].append(v)
#If this is the first occurrence of the element in a NON empty table.
else:
add_el=[]
for j in range(len(names_ar)-1):
add_el.append(0)
add_el.append(v)
GCSs_replicas_dict[k]=add_el
#If table body line contains less elements than header does, hence add zero.
for k, v in GCSs_replicas_dict.items():
if len(v)<len(names_ar):
GCSs_replicas_dict[k].append(0)
#Sorting the list of dictionary keys.
GCSs_replicas_dict_sorted=collections.OrderedDict(sorted(GCSs_replicas_dict.items()))
#Writes merged GCSs data
fileout=open(f'{path_out}{name}_TCSs_replicates.txt', 'w')
#TCSs_out.write(f'{Genome_ID}\t{TCSs_list_F[i][0]}\t{TCSs_list_F[i][0]+1}\tTCS_{i}_F\t10\t.\t{TCSs_list_F[i][1]}\t-1\t-1\n')
#Header
fileout.write('TCSs_coordinate\t')
for i in names_ar:
fileout.write(str(i) + '_N3E\t')
fileout.write('\n')
#Body of the table
for k, v in GCSs_replicas_dict_sorted.items():
fileout.write(str(k) + '\t')
for i in GCSs_replicas_dict_sorted[k]:
fileout.write(str(i) + '\t')
fileout.write('\n')
fileout.close()
return GCSs_replicas_dict
#Prepares GCSs table for all conditions
#combine_replicates(path_to_replicas, Replicas_path_out, All_conditions_name)
#######
#Returns only trusted GCSs - observed at least 2 times within 3 biological replicates.
#Data organization: 1. coordinate of GCSs, 2.-4. N3E values for biological replicates 1-3
#######
def trusted(ar):
av_height=0
ind=0
for i in range(len(ar)):
if ar[i]>0:
ind=ind+1
av_height=av_height+ar[i]
if ind>1:
return av_height/ind
else:
return "No signal"
def trusted_GCSs_calling(GCSs_dictionary):
ar=[]
for k, v in GCSs_dictionary.items():
if trusted(v)!="No signal":
ar.append([k, trusted(v)])
return ar
def replicas_comb_trust_wrapper(replicas_dict, path_out, name):
print('Now working with: ' + str(name))
cur_GCSs_dict=combine_replicates(replicas_dict, path_out, name)
cur_GCSs_trusted=trusted_GCSs_calling(cur_GCSs_dict)
print('Number of trusted TCSs for ' + str(name) + ' : ' + str(len(cur_GCSs_trusted)))
return cur_GCSs_trusted
TCSs_trusted=replicas_comb_trust_wrapper(path_to_replicas, Replicas_path_out, All_conditions_name)
#Antibs_GCSs_sets=[Cfx, RifCfx, Micro, Oxo]
#######
#GCSs shared between pairs of antibiotics - Cfx, Micro and Oxo and between Cfx and RifCfx.
#######
def pairs_construction(ar1, ar2):
double=[]
for i in range(len(ar1)):
for j in range(len(ar2)):
if ar1[i][0]==ar2[j][0]:
double.append([ar1[i][0], ar1[i][1], ar2[j][1]]) #GCSs coordinate, N3E_1, N3E_2
return double
#Cfx_RifCfx_shared_GCSs=pairs_construction(Cfx, RifCfx)
#print('Number of GCSs shared between Cfx and RifCfx: ' + str(len(Cfx_RifCfx_shared_GCSs)) + '\n')
#
#Cfx_Micro_shared_GCSs=pairs_construction(Cfx, Micro)
#Cfx_Oxo_shared_GCSs=pairs_construction(Cfx, Oxo)
#Micro_Oxo_shared_GCSs=pairs_construction(Micro, Oxo)
#
#print('Number of GCSs shared between Cfx and Micro: ' + str(len(Cfx_Micro_shared_GCSs)))
#print('Number of GCSs shared between Cfx and Oxo: ' + str(len(Cfx_Oxo_shared_GCSs)))
#print('Number of GCSs shared between Micro and Oxo: ' + str(len(Micro_Oxo_shared_GCSs)) + '\n')
#
#Antibs_GCSs_sets_pair_shared=[Cfx_Micro_shared_GCSs, Cfx_Oxo_shared_GCSs, Micro_Oxo_shared_GCSs]
#######
#GCSs shared between 3 antibiotics
#######
def triple_construction(ar12, ar3):
triple=[]
for i in range(len(ar12)):
for j in range(len(ar3)):
if ar12[i][0]==ar3[j][0]:
triple.append([ar12[i][0], ar12[i][1], ar12[i][2], ar3[j][1]]) #GCSs coordinate, N3E_1, N3E_2, N3E_3
return triple
#Cfx_Micro_Oxo_shared_GCSs=triple_construction(Cfx_Micro_shared_GCSs, Oxo)
#print('Number of GCSs shared between Cfx, Micro and Oxo: ' + str(len(Cfx_Micro_Oxo_shared_GCSs)) +'\n')
#######
#Parses replicas, overlaps lists of GCSs, output data for Venn diagram construction.
#######
def replicates_parsing_to_list_and_overlapping(replicas_dict, name):
#Parsing
GCSs_dict={}
for k, v in replicas_dict.items(): #Iterate replicas.
GCSs_dict[k]=[]
for c, h in read_GCSs_file(v).items(): #Iterate GCSs.
GCSs_dict[k].append([c, h])
#Overlapping
one_two=pairs_construction(GCSs_dict[name+str(1)], GCSs_dict[name+str(2)])
one_three=pairs_construction(GCSs_dict[name+str(1)], GCSs_dict[name+str(3)])
two_three=pairs_construction(GCSs_dict[name+str(2)], GCSs_dict[name+str(3)])
one_two_three=triple_construction(one_two, GCSs_dict[name+str(3)])
#Venn input description (for 3 sets): one, two, three, one_two, one_three, two_three, one_two_three
venn_input=[len(GCSs_dict[name+str(1)])-len(one_two)-len(one_three)+len(one_two_three),
len(GCSs_dict[name+str(2)])-len(one_two)-len(two_three)+len(one_two_three),
len(one_two)-len(one_two_three),
len(GCSs_dict[name+str(3)])-len(one_three)-len(two_three)+len(one_two_three),
len(one_three)-len(one_two_three), len(two_three)-len(one_two_three),
len(one_two_three)]
return venn_input
#######
#Venn diagram represents GCSs sets overlapping.
#description2: one, two, one_two
#description3: one, two, one_two, three, one_three, two_three, one_two_three
#######
#venn_data_2=[len(Cfx)-len(Cfx_RifCfx_shared_GCSs), len(RifCfx)-len(Cfx_RifCfx_shared_GCSs), len(Cfx_RifCfx_shared_GCSs)]
#venn_data_3=[len(Cfx)-len(Cfx_Micro_shared_GCSs)-len(Cfx_Oxo_shared_GCSs)+len(Cfx_Micro_Oxo_shared_GCSs),
# len(Micro)-len(Cfx_Micro_shared_GCSs)-len(Micro_Oxo_shared_GCSs)+len(Cfx_Micro_Oxo_shared_GCSs),
# len(Cfx_Micro_shared_GCSs)-len(Cfx_Micro_Oxo_shared_GCSs),
# len(Oxo)-len(Cfx_Oxo_shared_GCSs)-len(Micro_Oxo_shared_GCSs)+len(Cfx_Micro_Oxo_shared_GCSs),
# len(Cfx_Oxo_shared_GCSs)-len(Cfx_Micro_Oxo_shared_GCSs),
# len(Micro_Oxo_shared_GCSs)-len(Cfx_Micro_Oxo_shared_GCSs),
# len(Cfx_Micro_Oxo_shared_GCSs)]
#venn2(subsets = (venn_data_2), set_labels = ("Ciprofloxacin", "Rifampicin Ciprofloxacin"))
#plt.savefig(plot_outpath+'Cfx_RifCfx_venn.png', dpi=320)
#plt.close()
#
#print("Cfx Micro Oxo subsets volumes: " + str(venn_data_3))
#venn3(subsets = (venn_data_3), set_labels = ('Ciprofloxacin', 'Microcin B17', 'Oxolinic acid'))
#plt.savefig(plot_outpath+'Cfx_Micro_Oxo_venn.png', dpi=320)
#plt.close()
#
#venn3(subsets = (replicates_parsing_to_list_and_overlapping(path_to_cfx_replicas, 'Cfx_')), set_labels = ('Cfx_1', 'Cfx_2', 'Cfx_3'))
#plt.savefig(plot_outpath+'Cfx_replicas_venn.png', dpi=320)
#plt.close()
#
#venn3(subsets = (replicates_parsing_to_list_and_overlapping(path_to_rifcfx_replicas, 'RifCfx_')), set_labels = ('RifCfx_1', 'RifCfx_2', 'RifCfx_3'))
#plt.savefig(plot_outpath+'RifCfx_replicas_venn.png', dpi=320)
#plt.close()
#
#venn3(subsets = (replicates_parsing_to_list_and_overlapping(path_to_microcin_replicas, 'Micro_')), set_labels = ('Micro_1', 'Micro_2', 'Micro_3'))
#plt.savefig(plot_outpath+'Micro_replicas_venn.png', dpi=320)
#plt.close()
#
#venn3(subsets = (replicates_parsing_to_list_and_overlapping(path_to_oxo_replicas, 'Oxo_')), set_labels = ('Oxo_1', 'Oxo_2', 'Oxo_3'))
#plt.savefig(plot_outpath+'Oxo_replicas_venn.png', dpi=320)
#plt.close()
#######
#GCSs sets average N3E estimation.
#######
def average_height(ar):
av_he=0
for i in range(len(ar)):
peak_he=np.mean(ar[i][1:])
av_he=av_he+peak_he
return av_he/len(ar)
#print('Cfx average GCSs N3E: ' + str(average_height(Cfx)))
#print('Micro average GCSs N3E: ' + str(average_height(Micro)))
#print('Oxo average GCSs N3E: ' + str(average_height(Oxo)))
#print('Cfx and Micro average GCSs N3E: ' + str(average_height(Cfx_Micro_shared_GCSs)))
#print('Cfx and Oxo average GCSs N3E: ' + str(average_height(Cfx_Oxo_shared_GCSs)))
#print('Micro and Oxo average GCSs N3E: ' + str(average_height(Micro_Oxo_shared_GCSs)))
#print('Cfx, Micro and Oxo average GCSs N3E: ' + str(average_height(Cfx_Micro_Oxo_shared_GCSs)) + '\n')
#######
#Write down files with GCSs lists - trusted or shared.
#######
#All_GCSs_sets={Cfx_path: Antibs_GCSs_sets[0],
# RifCfx_path: Antibs_GCSs_sets[1],
# Micro_path: Antibs_GCSs_sets[2],
# Oxo_path: Antibs_GCSs_sets[3],
# Cfx_Micro_path: Antibs_GCSs_sets_pair_shared[0],
# Cfx_Oxo_path: Antibs_GCSs_sets_pair_shared[1],
# Micro_Oxo_path: Antibs_GCSs_sets_pair_shared[2],
# Cfx_Micro_Oxo_path: Cfx_Micro_Oxo_shared_GCSs}
def write_GCSs_file(dictionary):
for k, v in dictionary.items(): #Iterates lists to be written
v.sort(key=lambda tup: tup[0]) #Sorting lists by the zero elements of the sublists they consist of
fileout=open(k, 'w')
fileout.write('GCSs_coordinate\tN3E\n')
for i in range(len(v)):
fileout.write(str(v[i][0]) + '\t' + str(np.mean(v[i][1:])) + '\n')
fileout.close()
return
#write_GCSs_file(All_GCSs_sets)
def write_Cfx_RifCfx_shared_GCSs(ar, path):
fileout=open(path, 'w')
fileout.write('GCSs_coordinate\tCfx_N3E\tRifCfx_N3E\n')
ar.sort(key=lambda tup: tup[0])
for i in range(len(ar)):
fileout.write(str(ar[i][0]) + '\t' + str(ar[i][1]) + '\t' + str(ar[i][2]) + '\n')
fileout.close()
return
#write_Cfx_RifCfx_shared_GCSs(Cfx_RifCfx_shared_GCSs, Cfx_RifCfx_shared_GCSs_path)
#
#print('Script ended its work succesfully!')
| 2.296875
| 2
|
BeeFiles/Bee.py
|
Nezgun/Exploring-Bees
| 0
|
12782833
|
<filename>BeeFiles/Bee.py
# -*- coding: utf-8 -*-
#from DataStorageSystems.Stack import Stack
#from DataStorageSystems.LinkedList import LinkedList
class Bee(object):
def __init__(self, location, name, home):
#General Bee Metadata
self._home = home
self._name = name #Bee's ID
self.location = None #Current location => starts in hive
#importantMemory
#genericMemory linkedlist
#self.generalMemory = LinkedList()
#hiveMemory
#movementQueue
| 2.484375
| 2
|
program/transformer/if_transformer.py
|
mmsbrggr/polar
| 2
|
12782834
|
<reponame>mmsbrggr/polar
from typing import List
from singledispatchmethod import singledispatchmethod
from program.condition import TrueCond, And, Not, Condition
from program.ifstatem import IfStatem
from program.assignment import Assignment, PolyAssignment
from program.transformer.transformer import TreeTransformer
from utils import get_unique_var
class IfTransformer(TreeTransformer):
"""
Removes all if-statements from the program and moves the branch conditions into the conditions
of the assignments. So the transformer basically flattens the structure.
"""
@singledispatchmethod
def transform(self, element):
return element
@transform.register
def _(self, ifstmt: IfStatem):
conditions = ifstmt.conditions
branches: List[List[Assignment]] = ifstmt.branches
if ifstmt.else_branch:
branches.append(ifstmt.else_branch)
conditions.append(TrueCond())
# If a variable in a condition is assigned within a branch we need to store its old value at the beginning
# and use the old value in all conditions
condition_symbols = self.__get_all_symbols__(conditions)
rename_subs = {}
# Now, move the conditions of the if-statement into the conditions of the assignments
not_previous = TrueCond()
for i, branch in enumerate(branches):
current_condition = conditions[i] if ifstmt.mutually_exclusive else And(not_previous, conditions[i])
# Remember variables which appear in a condition and are assigned within the branch
current_rename_subs = {}
for assign in branch:
if assign.variable in condition_symbols:
if assign.variable in rename_subs:
current_rename_subs[assign.variable] = rename_subs[assign.variable]
else:
current_rename_subs[assign.variable] = get_unique_var(name="old")
rename_subs[assign.variable] = current_rename_subs[assign.variable]
extra_condition = current_condition.copy().simplify()
extra_condition.subs(current_rename_subs)
# Add the branch conditions to the assignments
for assign in branch:
assign.add_to_condition(extra_condition)
assign.simplify_condition()
not_previous = And(not_previous, Not(conditions[i].copy()))
all_assigns = [assign for branch in branches for assign in branch]
# Before the if-statement we need to add assignments to actually store the old values of variables
# appearing in conditions and being assigned within a branch
rename_assigns = []
for orig_var, new_var in rename_subs.items():
rename_assigns.append(PolyAssignment.deterministic(new_var, orig_var))
all_assigns = rename_assigns + all_assigns
return tuple(all_assigns)
def __get_all_symbols__(self, conditions: List[Condition]):
all_symbols = set()
for c in conditions:
all_symbols |= c.get_free_symbols()
return all_symbols
| 2.609375
| 3
|
datareturn/settings.py
|
PersonalGenomesOrg/datareturn
| 0
|
12782835
|
<gh_stars>0
import os
from django.conf import global_settings
from env_tools import apply_env
# Apply the environment variables in the .env file.
apply_env()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Required by django-allauth
'django.contrib.sites',
# Main app for this site.
'datareturn',
# Third party apps
'allauth',
'allauth.account',
'markdown_deux',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
AUTHENTICATION_BACKENDS = (
# Allow login with token instead of password.
'datareturn.backends.UserTokenBackend',
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
ROOT_URLCONF = 'datareturn.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# We use Sites and associated config to customize templates.
'datareturn.context_processors.site',
],
},
},
]
WSGI_APPLICATION = 'datareturn.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# Parse database configuration from $DATABASE_URL
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Sites required by django-allauth.
SITE_ID = 1
# Open Humans base URL. Defaults to main site, can be changed for dev purposes.
OPEN_HUMANS_SERVER = os.getenv('OPEN_HUMANS_SERVER', 'https://www.openhumans.org')
OPEN_HUMANS_REDIRECT_URI = os.getenv('OPEN_HUMANS_REDIRECT_URI')
OPEN_HUMANS_CLIENT_ID = os.getenv('OPEN_HUMANS_CLIENT_ID')
OPEN_HUMANS_CLIENT_SECRET = os.getenv('OPEN_HUMANS_CLIENT_SECRET')
# File storage on S3 and AWS credentials.
DEFAULT_FILE_STORAGE = 'datareturn.models.PrivateStorage'
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_S3_STORAGE_BUCKET_NAME')
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
STATIC_ROOT = 'staticfiles'
# Settings for django-allauth and account interactions.
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_VERIFICATION = 'none'
LOGIN_REDIRECT_URL = 'home'
############################################################
# Heroku settings
if os.getenv('HEROKU_SETUP') in ['true', 'True']:
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
DEBUG = False
ALLOWED_HOSTS = ['*']
# Email set up.
EMAIL_BACKEND = os.getenv('EMAIL_BACKEND', global_settings.EMAIL_BACKEND)
if os.getenv('EMAIL_USE_TLS') in ['true', 'True']:
EMAIL_USE_TLS = True
else:
EMAIL_USE_TLS = global_settings.EMAIL_USE_TLS
EMAIL_HOST = os.getenv('EMAIL_HOST', global_settings.EMAIL_HOST)
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER', global_settings.EMAIL_HOST_USER)
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD',
global_settings.EMAIL_HOST_PASSWORD)
EMAIL_PORT = int(os.getenv('EMAIL_PORT', str(global_settings.EMAIL_PORT)))
DEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL', global_settings.DEFAULT_FROM_EMAIL)
| 1.601563
| 2
|
recipes/recipe_modules/third_party_packages/examples/go.py
|
xinghun61/infra
| 2
|
12782836
|
<filename>recipes/recipe_modules/third_party_packages/examples/go.py
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""Recipe for Go toolchain building.
During testing, it may be useful to focus on building Go. This can be done
by running this recipe module directly.
"""
from recipe_engine.recipe_api import Property
DEPS = [
'depot_tools/cipd',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/url',
'third_party_packages',
]
PROPERTIES = {
'platform_name': Property(default=None, kind=str),
'platform_bits': Property(default=None, kind=int),
'dry_run': Property(default=True, kind=bool),
}
PLATFORMS = (
('linux', 32, 'linux-386'),
('linux', 64, 'linux-amd64'),
('mac', 64, 'mac-amd64'),
('win', 32, 'windows-386'),
('win', 64, 'windows-amd64'),
)
def RunSteps(api, platform_name, platform_bits, dry_run):
api.third_party_packages.dry_run = dry_run
if not dry_run:
api.cipd.set_service_account_credentials(
api.cipd.default_bot_service_account_credentials)
api.third_party_packages.go.package(
platform_name=platform_name,
platform_bits=platform_bits)
def GenTests(api):
go = api.third_party_packages.go
version = '1.2.3' + go.PACKAGE_VERSION_SUFFIX
for name, bits, platform in PLATFORMS:
package_name = go.PACKAGE_TEMPLATE % {'platform': platform}
yield (
api.test('%s_%d' % (name, bits)) +
api.platform('linux', 32) +
api.properties(
platform_name=name,
platform_bits=bits,
dry_run=False,
) +
api.step_data(
'cipd search %s version:%s' % (package_name, version),
api.cipd.example_search(package_name, instances=0))
)
package_name = go.PACKAGE_TEMPLATE % {'platform': 'linux-386'}
yield (
api.test('exists') +
api.platform('linux', 32) +
api.properties(
dry_run=False,
) +
api.step_data(
'cipd search %s version:%s' % (package_name, version),
api.cipd.example_search(package_name, instances=1))
)
| 1.796875
| 2
|
thesaurus.py
|
xnaas/custom-bot-commands
| 0
|
12782837
|
"""
Original author: xnaas (2022)
License: The Unlicense (public domain)
"""
import requests
from sopel import plugin, formatting
from sopel.config.types import StaticSection, ValidatedAttribute
class ThesaurusSection(StaticSection):
api_key = ValidatedAttribute("api_key", str)
def setup(bot):
bot.config.define_section("thesaurus", ThesaurusSection)
def configure(config):
config.define_section("thesaurus", ThesaurusSection)
config.thesaurus.configure_setting("api_key", "dictionaryapi.com api key")
@plugin.command("syn", "synonym")
@plugin.output_prefix("[synonym] ")
def synonyms(bot, trigger):
word = formatting.plain(trigger.group(3))
url = f"https://www.dictionaryapi.com/api/v3/references/thesaurus/json/{word}"
key = {"key": bot.config.thesaurus.api_key}
try:
synonyms = requests.get(url, params=key).json()[0]["meta"]["syns"][0]
bot.say(", ".join(synonyms), max_messages=2)
except IndexError:
bot.reply("No results.")
@plugin.command("ant", "antonym")
@plugin.output_prefix("[antonym] ")
def antonyms(bot, trigger):
word = formatting.plain(trigger.group(3))
url = f"https://www.dictionaryapi.com/api/v3/references/thesaurus/json/{word}"
key = {"key": bot.config.thesaurus.api_key}
try:
antonyms = requests.get(url, params=key).json()[0]["meta"]["ants"][0]
bot.say(", ".join(antonyms), max_messages=2)
except IndexError:
bot.reply("No results.")
| 2.296875
| 2
|
app/lists/serializers.py
|
marcosvbras/inimex-api
| 0
|
12782838
|
<reponame>marcosvbras/inimex-api
from rest_framework import serializers
from rest_framework.pagination import PageNumberPagination
from django.conf import settings
from .models import MyList
class MyListSerializer(serializers.ModelSerializer):
class Meta:
model = MyList
fields = '__all__'
class MyListPagination(PageNumberPagination):
page_size = settings.DEFAULT_PAGE_SIZE
| 2
| 2
|
mkab/config.py
|
grplyler/mkab
| 0
|
12782839
|
import os
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.path.expanduser('~/.key.json')
| 1.398438
| 1
|
migrations/versions/640_add_framework_agreement_version_to_.py
|
uk-gov-mirror/alphagov.digitalmarketplace-api
| 25
|
12782840
|
<reponame>uk-gov-mirror/alphagov.digitalmarketplace-api
"""add framework_agreement_version to Framework
Revision ID: 640
Revises: 630
Create Date: 2016-06-16 11:37:21.802880
"""
# revision identifiers, used by Alembic.
revision = '640'
down_revision = '630'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('frameworks', sa.Column('framework_agreement_version', sa.String(), nullable=True))
op.execute("""
UPDATE frameworks SET framework_agreement_version = 'v1.0' WHERE slug = 'g-cloud-8'
""")
def downgrade():
op.drop_column('frameworks', 'framework_agreement_version')
| 1.164063
| 1
|
hphp/tools/gdb/gdbutils.py
|
tmotyl/hiphop-php
| 1
|
12782841
|
"""
Assorted utilities for HHVM GDB bindings.
"""
# @lint-avoid-python-3-compatibility-imports
import collections
import functools
import gdb
#------------------------------------------------------------------------------
# Memoization.
def memoized(func):
"""Simple memoization decorator that ignores **kwargs."""
cache = {}
@functools.wraps(func)
def memoizer(*args):
if not isinstance(args, collections.Hashable):
return func(*args)
if args not in cache:
cache[args] = func(*args)
return cache[args]
return memoizer
#------------------------------------------------------------------------------
# General-purpose helpers.
def parse_argv(args):
return [gdb.parse_and_eval(arg) for arg in gdb.string_to_argv(args)]
def vstr(value):
"""Stringify a value without pretty-printing."""
for pp in gdb.pretty_printers:
try:
pp.saved = pp.enabled
except AttributeError:
pp.saved = True
pp.enabled = False
ret = unicode(value)
for pp in gdb.pretty_printers:
pp.enabled = pp.saved
return ret
#------------------------------------------------------------------------------
# Caching lookups.
@memoized
def T(name):
return gdb.lookup_type(name)
@memoized
def V(name):
return gdb.lookup_symbol(name)[0].value()
@memoized
def K(name):
return gdb.lookup_global_symbol(name).value()
| 2.4375
| 2
|
segar/tasks/billiards.py
|
fgolemo/segar
| 19
|
12782842
|
__copyright__ = "Copyright (c) Microsoft Corporation and Mila - Quebec AI Institute"
__license__ = "MIT"
"""Billiards game
"""
__all__ = ("billiards_default_config", "Billiards", "BilliardsInitialization")
import math
from typing import Optional
import numpy as np
from segar.mdps.initializations import ArenaInitialization
from segar.mdps.rewards import dead_reward_fn, l2_distance_reward_fn
from segar.mdps.tasks import Task
from segar.rendering.rgb_rendering import register_color
from segar.factors import (
Label,
Mass,
Charge,
Shape,
Text,
Circle,
GaussianNoise,
Size,
Position,
ID,
Done,
Alive,
Visible,
Velocity,
)
from segar.rules import Prior
from segar.things import Ball, Hole, Entity, Object
from segar.sim.location_priors import RandomBottomLocation
_DEFAULT_CUEBALL_MASS = 1.0
_DEFAULT_CUEBALL_CHARGE = 1.0
_DEFAULT_BALL_MASS = 1.0
_DEFAULT_BALL_SIZE = 0.2
_DEFAULT_BALL_CHARGE = 1.0
_DEFAULT_HOLE_SIZE = 0.3
_DEFAULT_DEAD_REWARD = -100.0
_HOLE_DISTANCE_THRESH = 1e-4
_MAX_BALL_AT_GOAL_VEL = None
_ACTION_RANGE = (-100, 100)
def billiard_ball_positions(
start: list[float, float], r: float = _DEFAULT_BALL_SIZE / 2 + 1e-3, n: int = 10
) -> list[list[float, float]]:
x, y = start
sq2r = math.sqrt(2.0) * r
positions = [start]
positions += [[x - sq2r, y + sq2r], [x + sq2r, y + sq2r]]
positions += [
[x - 2 * sq2r, y + 2 * sq2r],
[x, y + 2 * sq2r],
[x + 2 * sq2r, y + 2 * sq2r],
]
positions += [
[x - 3 * sq2r, y + 3 * sq2r],
[x - sq2r, y + 3 * sq2r],
[x + sq2r, y + 3 * sq2r],
[x + 3 * sq2r, y + 3 * sq2r],
]
positions = positions[:n]
return positions
class CueBall(
Object,
default={
Label: "cueball",
Mass: _DEFAULT_CUEBALL_MASS,
Charge: _DEFAULT_CUEBALL_CHARGE,
Shape: Circle(0.2),
Text: "X",
ID: "cueball",
},
):
pass
billiards_default_config = {
"numbers": [(CueBall, 1)],
"priors": [
Prior(
Size,
GaussianNoise(
_DEFAULT_BALL_SIZE,
0.01,
clip=(_DEFAULT_BALL_SIZE / 2.0, 3 * _DEFAULT_BALL_SIZE / 2.0),
),
entity_type=CueBall,
),
Prior(Size, _DEFAULT_BALL_SIZE, entity_type=Ball),
Prior(Mass, _DEFAULT_BALL_MASS, entity_type=Ball),
Prior(Size, _DEFAULT_HOLE_SIZE, entity_type=Hole),
Prior(Position, RandomBottomLocation(), entity_type=CueBall),
],
}
class BilliardsInitialization(ArenaInitialization):
"""Initialization of billiards derived from arena initialization.
Adds a cueball, holes, and other billiard balls.
"""
def __init__(self, config=None):
self.cueball_id = None
self.ball_ids = []
self.hole_ids = []
super().__init__(config=config)
register_color("cueball", (255, 255, 255))
def sample(self, max_iterations: int = 100) -> list[Entity]:
self.ball_ids.clear()
self.hole_ids.clear()
sampled_things = super().sample(max_iterations=max_iterations)
ball_positions = billiard_ball_positions([0.0, 0.0])
for i, pos in enumerate(ball_positions):
ball = Ball({Position: pos, Text: f"{i + 1}", ID: f"{i + 1}_ball"})
sampled_things.append(ball)
hole_positions = [[-0.9, -0.9], [-0.9, 0.9], [0.9, -0.9], [0.9, 0.9]]
for i, pos in enumerate(hole_positions):
hole = Hole({Position: pos, ID: f"{i}_hole", Size: _DEFAULT_HOLE_SIZE})
sampled_things.append(hole)
has_cueball = False
has_balls = False
has_holes = False
for thing in sampled_things:
if isinstance(thing, CueBall):
has_cueball = True
self.cueball_id = thing[ID]
if isinstance(thing, Ball):
has_balls = True
self.ball_ids.append(thing[ID])
if isinstance(thing, Hole):
has_holes = True
self.hole_ids.append(thing[ID])
if not has_cueball:
raise ValueError("cueball wasn't created.")
if not has_balls:
raise ValueError("balls weren't created.")
if not has_holes:
raise ValueError("holes weren't created.")
return sampled_things
def set_arena(self, init_things: Optional[list[Entity]] = None) -> None:
super().set_arena(init_things)
if self.cueball_id is None:
raise RuntimeError("Cueball was not set in arena.")
if len(self.ball_ids) == 0:
raise RuntimeError("Balls not set in arena.")
if len(self.hole_ids) == 0:
raise RuntimeError("Holes not set in arena.")
class Billiards(Task):
"""Billiards game.
Agent controls the cue ball. Hit the cue ball into billiard balls and
get them into holes. Avoid getting the cue ball into the holes.
"""
def __init__(
self,
initialization: BilliardsInitialization,
action_range: tuple[float, float] = _ACTION_RANGE,
action_shape: tuple[int, ...] = (2,),
dead_reward: float = _DEFAULT_DEAD_REWARD,
hole_distance_threshold: float = _HOLE_DISTANCE_THRESH,
max_ball_at_hole_velocity: float = _MAX_BALL_AT_GOAL_VEL,
):
"""
:param initialization: Initialization object used for initializing
the arena.
:param action_range: Range of actions used by the agent.
:param action_shape: Shape of actions.
:param dead_reward: Reward when cue ball is `dead`.
:param hole_distance_threshold: Distance between billiard ball and hole
under which to stop.
:param max_ball_at_hole_velocity: Max billiard ball velocity under
which to stop.
"""
action_type = np.float16
baseline_action = np.array([0, 0]).astype(action_type)
super().__init__(
action_range=action_range,
action_shape=action_shape,
action_type=action_type,
baseline_action=baseline_action,
initialization=initialization,
)
self._dead_reward = dead_reward
self._hole_distance_threshold = hole_distance_threshold
self._max_ball_at_hole_velocity = max_ball_at_hole_velocity
@property
def cueball_id(self) -> ID:
if not hasattr(self._initialization, "cueball_id"):
raise AttributeError(
"Initialization must define `cueball_id` to " "be compatible with task."
)
cueball_id = self._initialization.cueball_id
if cueball_id is None:
raise ValueError("`cueball_id` is not set yet.")
return cueball_id
@property
def hole_ids(self) -> list[ID]:
if not hasattr(self._initialization, "hole_ids"):
raise AttributeError(
"Initialization must define `hole_ids` to " "be compatible with task."
)
hole_ids = self._initialization.hole_ids
return hole_ids
@property
def ball_ids(self) -> list[ID]:
if not hasattr(self._initialization, "ball_ids"):
raise AttributeError(
"Initialization must define `ball_ids` to " "be compatible with task."
)
ball_ids = self._initialization.ball_ids
return ball_ids
def reward(self, state: dict) -> float:
"""Reward determined by the distance of the billiard balls to the
nearest hold and whether the cue ball is in a hole (dead).
:param state: States
:return: (float) the reward.
"""
ball_state = state["things"][self.cueball_id]
dead_reward = dead_reward_fn(ball_state, self._dead_reward)
# Distance reward is tricky: can't do it directly from states
# because sim owns scaling
distance_reward = 0.0
for ball_id in self.ball_ids:
distance = min([self.sim.l2_distance(ball_id, hole_id) for hole_id in self.hole_ids])
if distance <= self._hole_distance_threshold:
self.sim.change_thing_state(ball_id, Alive, False)
self.sim.change_thing_state(ball_id, Visible, False)
distance_reward += l2_distance_reward_fn(distance)
return dead_reward + distance_reward
def done(self, state: dict) -> bool:
"""Episode is done if the cue ball is dead or if all of the billiard
balls are in the holes.
:param state: The states.
:return: True if the state indicates the environment is done.
"""
ball_state = state["things"][self.cueball_id]
is_finished = ball_state[Done] or not ball_state[Alive]
balls_are_finished = True
for ball_id in self.ball_ids:
ball_state = state["things"][ball_id]
ball_is_finished = ball_state[Done] or not ball_state[Alive]
balls_are_finished = balls_are_finished and ball_is_finished
return is_finished or balls_are_finished
def apply_action(self, force: np.ndarray) -> None:
"""Applies force to the cue ball.
:param force: (np.array) Force to apply
"""
self.sim.add_force(self.cueball_id, force)
def results(self, state: dict) -> dict:
"""Results for monitoring task.
:param state: States
:return: Dictionary of results.
"""
distance = min(
[self.sim.l2_distance(self.cueball_id, hole_id) for hole_id in self.hole_ids]
)
ball_state = state["things"][self.cueball_id]
return dict(
dist_to_goal=distance,
velocity=ball_state[Velocity].norm(),
mass=ball_state[Mass].value,
alive=ball_state[Alive].value,
)
def demo_action(self):
"""Generate an action used for demos
:return: np.array action
"""
return np.random.normal() + np.array((4, 3))
| 2.375
| 2
|
powergate/parse_manually_updated_jobs.py
|
deplatformr/open-images
| 2
|
12782843
|
import os
import sqlite3
from datetime import datetime
abs_path = os.getcwd()
split = os.path.split(abs_path)
workflow_db_path = os.path.join(
split[0], "pipeline/deplatformr_open_images_workflow.sqlite")
workflow_db = sqlite3.connect(workflow_db_path)
cursor = workflow_db.cursor()
utctime = datetime.utcnow()
with open("updated_jobs.txt", "r") as jobs_list:
jobs = jobs_list.readlines()
for job in jobs:
split = job.split(",")
cursor.execute("UPDATE jobs set job_id=?, timestamp=?, status=? WHERE cid=?",
(split[1], utctime, "JOB_STATUS_EXECUTING", split[0],),)
workflow_db.commit()
workflow_db.close()
| 2.5625
| 3
|
nnfs/datasets/mnist.py
|
tblut/NNFS
| 0
|
12782844
|
<filename>nnfs/datasets/mnist.py
import numpy as np
from pathlib import Path
from nnfs.utils import download_file
def _read_images_file(path):
with open(path, mode='rb') as file:
data = file.read()
n_images = int.from_bytes(data[4:8], byteorder='big', signed=True)
width = int.from_bytes(data[8:12], byteorder='big', signed=True)
height = int.from_bytes(data[12:16], byteorder='big', signed=True)
images = np.empty((n_images, width * height), dtype=np.float32)
image_size = width * height
for i in range(n_images):
start = 16 + i * image_size
end = start + image_size
images[i, :] = np.frombuffer(data[start:end], dtype=np.uint8) / 255.0
return images
def _read_labels_file(path):
with open(path, mode='rb') as file:
data = file.read()
n_items = int.from_bytes(data[4:8], byteorder='big', signed=True)
labels = np.frombuffer(data[8:], dtype=np.uint8)
labels = labels.astype(np.int32).reshape((n_items, 1))
return labels
def load_data(cache_dir='.cache'):
cache_file_path = Path(cache_dir, 'mnist.npz')
if not cache_file_path.exists():
url_train_images = "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz"
url_train_labels = "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"
url_test_images = "http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz"
url_test_labels = "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"
train_images_path = download_file(url_train_images, cache_dir, extract=True)
train_labels_path = download_file(url_train_labels, cache_dir, extract=True)
test_images_path = download_file(url_test_images, cache_dir, extract=True)
test_labels_path = download_file(url_test_labels, cache_dir, extract=True)
train_images = _read_images_file(train_images_path)
train_labels = _read_labels_file(train_labels_path)
test_images = _read_images_file(test_images_path)
test_labels = _read_labels_file(test_labels_path)
np.savez(cache_file_path, train_images, train_labels, test_images, test_labels)
data = np.load(cache_file_path)
return (data['train_images'], data['train_labels']), (data['test_images'], data['test_labels'])
| 3.015625
| 3
|
utility/massFractionsEagleXG.py
|
jrminter/dtsa2scripts
| 2
|
12782845
|
# -*- coding: utf-8 -*-
"""
DTSA-II Script - <NAME> - 2018-07-30
massFractionsEagleXG.py
Date Who Comment
---------- --- -----------------------------------------------
2018-07-30 JRM Mass fractions the easy way for EagleXG rev sort
2018-10-02 JRM Change name to remove spaces. This made it easier
to add to the database.
Done!
Elapse: 0:00:00.6 jrmFastMac
Z Sym WF
8 O: 0.51877
14 Si: 0.30139
13 Al: 0.09008
20 Ca: 0.03876
5 B: 0.03266
12 Mg: 0.00773
38 Sr: 0.00696
50 Sn: 0.00120
56 Ba: 0.00109
51 Sb: 0.00039
26 Fe: 0.00036
33 As: 0.00024
22 Ti: 0.00015
density = 2.36
"""
import sys
sys.packageManager.makeJavaPackage("gov.nist.microanalysis.NISTMonte.Gen3", "CharacteristicXRayGeneration3, BremsstrahlungXRayGeneration3,FluorescenceXRayGeneration3, XRayTransport3", None)
import os
import glob
import shutil
import time
import math
import csv
import gov.nist.microanalysis.NISTMonte as nm
import gov.nist.microanalysis.NISTMonte.Gen3 as nm3
import gov.nist.microanalysis.EPQLibrary as epq
import gov.nist.microanalysis.EPQLibrary.Detector as epd
import gov.nist.microanalysis.Utility as epu
import gov.nist.microanalysis.EPQTools as ept
import dtsa2 as dt2
import dtsa2.mcSimulate3 as mc3
gitDir = os.environ['GIT_HOME']
relPrj = "/dtsa2Scripts/utility"
prjDir = gitDir + relPrj
rptDir = prjDir + '/massFractionsEagleXG Results/'
eagleXG = mixture({"SiO2" : 0.6447825,
"Al2O3" : 0.1702057,
"B2O3" : 0.1051482,
"CaO" : 0.0542376,
"MgO" : 0.0128153,
"SrO" : 0.0082368,
"SnO2" : 0.0015215,
"BaO" : 0.0012188,
"Fe2O3" : 0.0005078,
"Sb2O3" : 0.0004635,
"As2O3" : 0.0003145,
"ZrO2" : 0.0002938,
"TiO2" : 0.0002540
},
density=2.36,
name="eagleXG")
wfO = round(eagleXG.weightFractionU(epq.Element.O, True).doubleValue(), 5)
wfSi = round(eagleXG.weightFractionU(epq.Element.Si, True).doubleValue(), 5)
wfAl = round(eagleXG.weightFractionU(epq.Element.Al, True).doubleValue(), 5)
wfB = round(eagleXG.weightFractionU(epq.Element.B, True).doubleValue(), 5)
wfCa = round(eagleXG.weightFractionU(epq.Element.Ca, True).doubleValue(), 5)
wfMg = round(eagleXG.weightFractionU(epq.Element.Mg, True).doubleValue(), 5)
wfSr = round(eagleXG.weightFractionU(epq.Element.Sr, True).doubleValue(), 5)
wfSn = round(eagleXG.weightFractionU(epq.Element.Sn, True).doubleValue(), 5)
wfBa = round(eagleXG.weightFractionU(epq.Element.Ba, True).doubleValue(), 5)
wfFe = round(eagleXG.weightFractionU(epq.Element.Fe, True).doubleValue(), 5)
wfSb = round(eagleXG.weightFractionU(epq.Element.Sb, True).doubleValue(), 5)
wfAs = round(eagleXG.weightFractionU(epq.Element.As, True).doubleValue(), 5)
wfZr = round(eagleXG.weightFractionU(epq.Element.Zr, True).doubleValue(), 5)
wfTi = round(eagleXG.weightFractionU(epq.Element.Ti, True).doubleValue(), 5)
exg = { "O" : wfO,
"Si" : wfSi,
"Al" : wfAl,
"B" : wfB,
"Ca" : wfCa,
"Mg" : wfMg,
"Sr" : wfSr,
"Sn" : wfSn,
"Ba" : wfBa,
"Fe" : wfFe,
"Sb" : wfSb,
"As" : wfAs,
"Ti" : wfTi
}
# a1_sorted_keys = sorted(a1, key=a1.get, reverse=True)
# for r in a1_sorted_keys:
# print r, a1[r]
for key, value in sorted(exg.iteritems(), key=lambda (k,v): (v,k), reverse=True):
print("%s: %.5f" % (key, exg[key]))
# print(exg)
# es = exg.getElementSet()
# print(es)
# clean up cruft
shutil.rmtree(rptDir)
print "Done!"
| 1.210938
| 1
|
6.NAS.py
|
yassine-afrouni/network-automation
| 0
|
12782846
|
# NAS.py
# Network automation sys
#
# Created by yassen on 5/6/20.
# Copyright © 2021 yassen & nouhaila. All rights reserved.
from netmiko import ConnectHandler
from netmiko.ssh_exception import NetMikoTimeoutException, AuthenticationException
from paramiko.ssh_exception import SSHException
import getpass
from time import time, sleep
from datetime import datetime
import os
from simple_term_menu import TerminalMenu
from colorama import Fore, Back, Style
import csv
from threading import Thread
from tabulate import tabulate
###################################################################################################
#SSH CONNECTION
###################################################################################################
def ssh_connection(device):
info_device = {
'device_type':'cisco_ios',
'ip':device[0],
'host':device[1],
'username': 'nouhaila',
'password': '<PASSWORD>'
}
try:
connection = ConnectHandler(**info_device)
except (AuthenticationException):
print('Authentication failure: '+ ip)
except (NetMikoTimeoutException):
print('Timeout to device: ' + ip)
except (EOFError):
print('End of file while attempting device: '+ ip)
except (SSHException):
print('Be sure that SSH is enabled in: '+ ip +'?')
except Exception as unknown_error:
print ('Some other error: '+unknown_error)
return connection
####################################################################################################
#CHECK VERSION
####################################################################################################
def check_version(connection):
list_versions = ['vios_l2-ADVENTERPRISEK9-M', 'VIOS-ADVENTERPRISEK9-M']
output_version = connection.send_command('show version')
for software_ver in list_versions:
int_version = 0
int_version = output_version.find(software_ver)
if int_version > 0:
break
else:
pass
return software_ver
##########################################################################################
#device_input
##########################################################################################
def device_input():
input_list = []
devices_list = []
while True:
ip = input(Back.GREEN +'\nEnter the IP address of the device: '+Style.RESET_ALL)
name = input(Back.GREEN +'Enter the hostname : '+Style.RESET_ALL)
input_list.append(ip)
input_list.append(name)
ask = input("\n Do you want more devices? answer by 'y' or 'n'! : " )
devices_list.append(input_list)
input_list = []
if ask == 'y':
continue
elif ask == 'n':
break
else:
input("\n Do you want more devices? answer by 'y' or 'n'! : " )
return devices_list
####################################################################################################
#CONFIG ALL DEVICES
####################################################################################################
def configuration(device):
ip = device [0]
name = device [1]
connection = ssh_connection(device)
print (Back.GREEN+'\nconnection to '+ name + ' is up' + Style.RESET_ALL)
software_ver = check_version(connection)
if software_ver == 'vios_l2-ADVENTERPRISEK9-M':
print ('Running Switch config file ...')
output = connection.send_config_set(switch_config_file)
print(output)
elif software_ver == 'VIOS-ADVENTERPRISEK9-M':
print ('Running Router config_file ...')
output = connection.send_config_set(router_config_file)
connection.disconnect()
return output
#############################################################################################
#VERIFY ALL DEVICES
#############################################################################################
def verification(device):
ip = device [0]
name = device [1]
print(Back.GREEN +'\n'+80*'#'+ Style.RESET_ALL)
connection = ssh_connection(device)
print (Back.GREEN+'\nconnection to %s ' %name + ' is up'+ Style.RESET_ALL)
software_ver = check_version(connection)
if software_ver == 'vios_l2-ADVENTERPRISEK9-M':
running_config = connection.send_command('show running-config')
length = len(switch_config_file)
count = 0
for item in switch_config_file:
if item in running_config:
count = count + 1
else:
print(Fore.RED+'{'+ item + '} not found in running-config}'+Style.RESET_ALL)
continue
if count == length:
print(Back.GREEN+'\nCONFIGURATION CORRECT'+Style.RESET_ALL)
else:
print(Back.RED+'\nCONFIGURATION NOT CORRECT'+Style.RESET_ALL)
elif software_ver == 'VIOS-ADVENTERPRISEK9-M':
running_config = connection.send_command('show running-config')
length = len(router_config_file)
count = 0
for item in router_config_file:
if item in running_config:
count = count + 1
else:
print(Fore.RED+'{'+ item + '} not found in running-config}'+Style.RESET_ALL)
continue
if count == length:
print(Back.GREEN+'\nCONFIGURATION CORRECT'+Style.RESET_ALL)
else:
print(Back.RED+'\nCONFIGURATION NOT CORRECT'+Style.RESET_ALL)
####################################################################################################
#TESTING CONNECTION
####################################################################################################
def test_connection(source, destination, connection):
ip_source = source [0]
name_source = source [1]
ip_destination = destination[0]
name_destination = destination[1]
command = 'ping '+ ip_destination
output_ping = connection.send_command(command) #delay_factor = 1)
check_list = ['Success rate is 80 percent (4/5)','Success rate is 100 percent (5/5)']
if any (item in output_ping for item in check_list):
print (Back.GREEN+'Connection from '+name_source+' to '+name_destination+' is reachable==> Success rate'+Style.RESET_ALL)
else:
print (Back.RED+'Connection from '+name_source+' to '+name_destination
+' is unreachable ==> Check Interfaces and protocols !'+Style.RESET_ALL)
#############################################################################################
#CONFIRMATION
#############################################################################################
def confirmation(device):
ip = device [0]
name = device [1]
connection = ssh_connection(device)
print (Back.GREEN+'\nconnection to %s ' %name +' is up'+ Style.RESET_ALL)
saving = connection.save_config()
print(saving +'\n--------------------- Succesful-- Saving-------------------------')
connection.disconnect()
#############################################################################################
#BUCKUPS
#############################################################################################
def backups(device):
ip = device [0]
name = device [1]
connection = ssh_connection(device)
Backup = connection.send_command("show running-config")
file = open("%s_backup.txt" %name ,"w")
file.write(Backup)
file.close()
print(Back.GREEN+"\nBackup for %s is done" %name + Style.RESET_ALL)
connection.disconnect()
#############################################################################################
#CHECK INTERFACE
#############################################################################################
def check_interfaces(device):
ip = device[0]
name = device[1]
connection=ssh_connection(device)
print (Back.GREEN+'\nconnection to %s ' %name +' is up' +Style.RESET_ALL)
output_one = connection.send_command('show int', use_textfsm=True)
output_two = connection.send_command('show ip int br', use_textfsm=True)
output_three = connection.send_command('show version', use_textfsm=True)
i = 0
table_data = [[device[1]+'=>'+output_three[0]['version'], 'Interface', 'IP-Address', 'Protocol', 'Status', 'Uptime',
'In Error', 'In Pckts', 'Out Error', 'Out Pckts']]
while i < len(output_one):
int_info = [device[1], output_two[i]['intf'], output_two[i]['ipaddr'],
output_two[i]['proto'], output_two[i]['status'], output_three[0]['uptime'],
output_one[i]['input_errors'], output_one[i]['input_packets'],
output_one[i]['output_errors'], output_one[i]['output_packets']]
int_info[1] = int_info[1].replace('GigabitEthernet', 'Gi')
if int_info[4] == 'administratively down':
int_info[4] = int_info[4].replace('administratively down', 'ad-down')
table_data.append(int_info)
i = i + 1
print(tabulate(table_data, headers="firstrow", tablefmt="fancy_grid", stralign="center", numalign="center"))
return table_data
#############################################################################################
#CHECK ROUTING
#############################################################################################
def check_routing(device):
ip = device[0]
name = device[1]
connection=ssh_connection(device)
software_ver = check_version(connection)
print (Back.GREEN+'\nconnection to %s ' %name +' is up'+ Style.RESET_ALL)
output_one =connection.send_command('show ip route', use_textfsm=True)
output_two =connection.send_command('sh ip ospf database', use_textfsm=True)
output_three =connection.send_command('sh ip ospf neighbor', use_textfsm=True)
if len(output_three) < 1:
output_three = [{'address':'No neighbor'}]
i = 0
table_one = [['Ospf\nR-table=>'+device[1], 'Network', 'Mask', 'Next-Hop', 'Protocol', 'Neighbor']]
while i < len(output_one):
tt = ['', output_one[i]['network'], output_one[i]['mask'], output_one[i]['nexthop_if'],
output_one[i]['protocol'], output_three[0]['address']]
table_one.append(tt)
i = i + 1
print(tabulate(table_one, headers="firstrow", stralign="center", numalign="center", tablefmt="fancy_grid"))
table_two = [['Ospf\nData-base=>'+device[1], 'adv_router', 'age', 'area', 'link_count', 'link_id', 'process_id', 'router_id']]
if len(output_two) > 0:
j = 0
while j < len(output_two):
tt = ['', output_two[j]['adv_router'], output_two[j]['age'], output_two[j]['area'],
output_two[j]['link_count'], output_two[j]['link_id'], output_two[j]['process_id'], output_two[j]['router_id']]
table_two.append(tt)
j = j + 1
print(tabulate(table_two, headers="firstrow", stralign="center", numalign="center", tablefmt="fancy_grid"))
return table_one, table_two
############################################################################################
#CHECK VLAN
############################################################################################
def check_vlan(device):
ip = device[0]
name = device[1]
connection=ssh_connection(device)
software_ver = check_version(connection)
if software_ver == 'vios_l2-ADVENTERPRISEK9-M':
print (Back.GREEN+'\nconnection to %s ' %name +' is up'+ Style.RESET_ALL)
output_one =connection.send_command('show vlan', use_textfsm=True)
output_two =connection.send_command('show vtp status', use_textfsm=True)
i = 0
vlan_data = [[device[1], 'Interfaces', 'Name', 'Status', 'Vlan id', 'VTP-Mode']]
while i < len(output_one):
int_info = ['', output_one[i]['interfaces'], output_one[i]['name'],
output_one[i]['status'], output_one[i]['vlan_id'], output_two[0]['mode']]
int_info[1] = ','.join(int_info[1])
vlan_data.append(int_info)
i = i + 1
print(tabulate(vlan_data, headers="firstrow", stralign="center", numalign="center", tablefmt="fancy_grid"))
else:
vlan_data = [['this option can not'], ['be reported']]
return vlan_data
#############################################################################################
#CSV REPORTING
#############################################################################################
def reporting():
with open('Global_report.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['CHECKING NETWORK IN: ',str(datetime.now())])
writer.writerow([''])
print('\nGenerating a global report of the network ...')
for device in devices_list:
name = device[1]
print (Back.GREEN+'\nconnection to %s for reporting' %name + Style.RESET_ALL)
writer.writerow([''])
writer.writerow(['THIS IS A SPREADSHEET', 'TO GET INTERFACES INFOS FROM %s' %name])
writer.writerow([''])
writer.writerows(check_interfaces(device))
rout_info= check_routing(device)
writer.writerow([''])
writer.writerow(['THIS IS A SPREADSHEET', 'TO GET ROUTING INFOS FROM %s' %name])
writer.writerow([''])
writer.writerows(rout_info[0])
writer.writerow([''])
writer.writerows(rout_info[1])
writer.writerow([''])
writer.writerow(['THIS IS A SPREADSHEET', 'TO GET VLAN INFOS FROM %s' %name])
writer.writerow([''])
writer.writerows(check_vlan(device))
writer.writerow([''])
print('Reporting Done')
#############################################################################################
#UI MENU
#############################################################################################
def main():
main_menu_title = 12 *'*'+Back.CYAN+" 'WELCOME TO THE NETWORK MANAGEMENT PLATFORM MAIN MENU' "+Style.RESET_ALL+ 12 *'*' +"\n"
main_menu_items = ["NETWORK CONFIGURATION", "NETWORK VERIFICATION","NETWORK CONFIRMATION", "CHECKING NETWORK", "QUIT"]
main_menu_exit = False
main_menu = TerminalMenu(menu_entries=main_menu_items, title=main_menu_title)
conf_menu_title = 24*'*'+Back.CYAN+"'NETWORK CONFIGURATION SECTION'"+Style.RESET_ALL+24*'*'+"\n"
conf_menu_items = ["CONFIG ALL DEVICES", "CONFIG SPECIFIC DEVICES", "BACK TO MAIN MENU"]
conf_menu_back = False
conf_menu = TerminalMenu(conf_menu_items, title=conf_menu_title)
ver_menu_title = 24*'*'+Back.CYAN+"'NETWORK VERIFICATION SECTION'"+Style.RESET_ALL+24*'*'+"\n"
ver_menu_items = ["VERIFY All DEVICES","VERIFY SPECIFIC DEVICES", "TEST CONNECTION", "BACK TO MAIN MENU"]
ver_menu_back = False
ver_menu = TerminalMenu(ver_menu_items, title=ver_menu_title)
com_menu_title = 24*'*'+Back.CYAN+"'NETWORK CONFIRMATION SECTION'"+Style.RESET_ALL+24*'*'+"\n"
com_menu_items = ["CONFIRMATION", "BUCKUPS", "BACK TO MAIN MENU"]
com_menu_back = False
com_menu = TerminalMenu(com_menu_items, title=com_menu_title)
ch_menu_title = 24*'*'+Back.CYAN+"'CHECK NETWORK SECTION'"+Style.RESET_ALL+24*'*'+"\n"
ch_menu_items = ["CHECK INTERFACES", "CHECK ROUTING", "CHECK VLAN", "CSV REPORTING","BACK TO MAIN MENU"]
ch_menu_back = False
ch_menu = TerminalMenu(ch_menu_items, title=ch_menu_title)
while not main_menu_exit:
os.system('clear')
main_sel = main_menu.show()
if main_sel == 0:
while not conf_menu_back:
os.system('clear')
conf_sel = conf_menu.show()
if conf_sel == 0:
print("\nConfig All Devices Has Been Selected")
''' Multithreading Integration'''
startTime = time()
threads=[]
for device in devices_list:
t = Thread(target=configuration, args= (device,))
t.start()
threads.append(t)
for t in threads:
t.join()
print("\ntime in second is = ", time() - startTime)
for device in devices_list:
configuration(device=device)
print("time in second is = ", time() - startTime)
sleep(60)
elif conf_sel == 1:
print("\nConfig Specific Devices Has Been Selected")
devices = device_input()
for device in devices:
configuration(device)
elif conf_sel == 2:
conf_menu_back = True
print("\nBack Selected")
conf_menu_back = False
elif main_sel == 1:
while not ver_menu_back:
os.system('clear')
ver_sel = ver_menu.show()
if ver_sel == 0:
print("\nVerify All Devices Has Been Selected")
for device in devices_list:
verification(device)
sleep(60)
elif ver_sel == 1:
print("\nVerify Specific Devices Has Been Selected")
devices = device_input()
for device in devices:
verification(device)
sleep(50)
elif ver_sel == 2:
print("\nTest Connection Has Been Selected")
print(Back.CYAN + "\nGet source ip =>:" +Style.RESET_ALL)
source_ip = device_input()
print(Back.CYAN + "\nGet destination ip =>:" +Style.RESET_ALL)
destination_ip = device_input()
for source in source_ip :
print(Back.YELLOW + '\nConnection to %s' %source[1] +'\n'+Style.RESET_ALL)
connection = ssh_connection(source)
for destination in destination_ip:
test_connection(source, destination, connection)
sleep(60)
elif ver_sel == 3:
ver_menu_back = True
print("\nBack Selected")
ver_menu_back = False
elif main_sel == 2:
while not com_menu_back:
os.system('clear')
com_sel = com_menu.show()
if com_sel == 0:
print("\nConfirm All Devices Has Been Selected")
for device in devices_list:
confirmation(device)
sleep(20)
elif com_sel == 1:
print("\nBackups Has Been Selected")
for device in devices_list:
backups(device)
sleep(20)
elif com_sel == 2:
com_menu_back = True
print("\nBack Selected")
com_menu_back = False
elif main_sel == 3:
while not ch_menu_back:
os.system('clear')
ch_sel = ch_menu.show()
if ch_sel == 0:
print("\nCheck interfaces Has Been Selected")
for device in devices_list:
check_interfaces(device)
sleep(20)
elif ch_sel == 1:
print("\nCheck Routing Has Been Selected")
for device in devices_list:
check_routing(device)
sleep(20)
elif ch_sel == 2:
print("\nCheck Vlan Has Been Selected")
for device in devices_list:
check_vlan(device)
sleep(20)
elif ch_sel == 3:
print("\nCSV Reporting Has Been Selected")
reporting()
elif ch_sel == 4:
ch_menu_back = True
print("\nBack Selected")
ch_menu_back = False
elif main_sel == 4:
main_menu_exit = True
print("\nQuit System Has Been Selected")
###################################################################################################
#MAIN PROGRAMME
###################################################################################################
if __name__ == "__main__":
startTime = time()
with open('switch_config_file') as f:
switch_config_file = f.read().splitlines()
with open('router_config_file') as f:
router_config_file = f.read().splitlines()
with open('devices_file', 'r') as f:
reader = csv.reader(f)
devices_list = [d for d in reader]
while True:
username = getpass.getpass(prompt='Username: ')
password = getpass.getpass(prompt='Password: ')
if username.lower() == 'nouhaila'and password =='<PASSWORD>':
break
else:
print('The answer entered by you is incorrect..!!!')
main()
| 2.125
| 2
|
vendor-local/src/basket-client/basket/__init__.py
|
satdav/mozillians
| 0
|
12782847
|
from base import (send_sms, subscribe, unsubscribe, user,
update_user, debug_user, BasketException)
| 0.960938
| 1
|
hubspot/crm/extensions/accounting/models/accounting_app_urls.py
|
cclauss/hubspot-api-python
| 0
|
12782848
|
# coding: utf-8
"""
Accounting Extension
These APIs allow you to interact with HubSpot's Accounting Extension. It allows you to: * Specify the URLs that HubSpot will use when making webhook requests to your external accounting system. * Respond to webhook calls made to your external accounting system by HubSpot # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.extensions.accounting.configuration import Configuration
class AccountingAppUrls(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'get_invoice_url': 'str',
'search_customer_url': 'str',
'get_invoice_pdf_url': 'str',
'customer_url_template': 'str',
'product_url_template': 'str',
'invoice_url_template': 'str',
'create_invoice_url': 'str',
'search_invoice_url': 'str',
'search_product_url': 'str',
'get_terms_url': 'str',
'create_customer_url': 'str',
'search_tax_url': 'str',
'exchange_rate_url': 'str',
'search_url': 'str',
'search_count_url': 'str'
}
attribute_map = {
'get_invoice_url': 'getInvoiceUrl',
'search_customer_url': 'searchCustomerUrl',
'get_invoice_pdf_url': 'getInvoicePdfUrl',
'customer_url_template': 'customerUrlTemplate',
'product_url_template': 'productUrlTemplate',
'invoice_url_template': 'invoiceUrlTemplate',
'create_invoice_url': 'createInvoiceUrl',
'search_invoice_url': 'searchInvoiceUrl',
'search_product_url': 'searchProductUrl',
'get_terms_url': 'getTermsUrl',
'create_customer_url': 'createCustomerUrl',
'search_tax_url': 'searchTaxUrl',
'exchange_rate_url': 'exchangeRateUrl',
'search_url': 'searchUrl',
'search_count_url': 'searchCountUrl'
}
def __init__(self, get_invoice_url=None, search_customer_url=None, get_invoice_pdf_url=None, customer_url_template=None, product_url_template=None, invoice_url_template=None, create_invoice_url=None, search_invoice_url=None, search_product_url=None, get_terms_url=None, create_customer_url=None, search_tax_url=None, exchange_rate_url=None, search_url=None, search_count_url=None, local_vars_configuration=None): # noqa: E501
"""AccountingAppUrls - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._get_invoice_url = None
self._search_customer_url = None
self._get_invoice_pdf_url = None
self._customer_url_template = None
self._product_url_template = None
self._invoice_url_template = None
self._create_invoice_url = None
self._search_invoice_url = None
self._search_product_url = None
self._get_terms_url = None
self._create_customer_url = None
self._search_tax_url = None
self._exchange_rate_url = None
self._search_url = None
self._search_count_url = None
self.discriminator = None
self.get_invoice_url = get_invoice_url
self.search_customer_url = search_customer_url
self.get_invoice_pdf_url = get_invoice_pdf_url
self.customer_url_template = customer_url_template
self.product_url_template = product_url_template
self.invoice_url_template = invoice_url_template
if create_invoice_url is not None:
self.create_invoice_url = create_invoice_url
if search_invoice_url is not None:
self.search_invoice_url = search_invoice_url
if search_product_url is not None:
self.search_product_url = search_product_url
if get_terms_url is not None:
self.get_terms_url = get_terms_url
if create_customer_url is not None:
self.create_customer_url = create_customer_url
if search_tax_url is not None:
self.search_tax_url = search_tax_url
if exchange_rate_url is not None:
self.exchange_rate_url = exchange_rate_url
if search_url is not None:
self.search_url = search_url
if search_count_url is not None:
self.search_count_url = search_count_url
@property
def get_invoice_url(self):
"""Gets the get_invoice_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where invoices can be retrieved. # noqa: E501
:return: The get_invoice_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._get_invoice_url
@get_invoice_url.setter
def get_invoice_url(self, get_invoice_url):
"""Sets the get_invoice_url of this AccountingAppUrls.
A URL that specifies the endpoint where invoices can be retrieved. # noqa: E501
:param get_invoice_url: The get_invoice_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and get_invoice_url is None: # noqa: E501
raise ValueError("Invalid value for `get_invoice_url`, must not be `None`") # noqa: E501
self._get_invoice_url = get_invoice_url
@property
def search_customer_url(self):
"""Gets the search_customer_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where a customer search can be performed. # noqa: E501
:return: The search_customer_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._search_customer_url
@search_customer_url.setter
def search_customer_url(self, search_customer_url):
"""Sets the search_customer_url of this AccountingAppUrls.
A URL that specifies the endpoint where a customer search can be performed. # noqa: E501
:param search_customer_url: The search_customer_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and search_customer_url is None: # noqa: E501
raise ValueError("Invalid value for `search_customer_url`, must not be `None`") # noqa: E501
self._search_customer_url = search_customer_url
@property
def get_invoice_pdf_url(self):
"""Gets the get_invoice_pdf_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where an invoice PDF can be retrieved. # noqa: E501
:return: The get_invoice_pdf_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._get_invoice_pdf_url
@get_invoice_pdf_url.setter
def get_invoice_pdf_url(self, get_invoice_pdf_url):
"""Sets the get_invoice_pdf_url of this AccountingAppUrls.
A URL that specifies the endpoint where an invoice PDF can be retrieved. # noqa: E501
:param get_invoice_pdf_url: The get_invoice_pdf_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and get_invoice_pdf_url is None: # noqa: E501
raise ValueError("Invalid value for `get_invoice_pdf_url`, must not be `None`") # noqa: E501
self._get_invoice_pdf_url = get_invoice_pdf_url
@property
def customer_url_template(self):
"""Gets the customer_url_template of this AccountingAppUrls. # noqa: E501
A template URL that indicates the endpoint where a customer can be fetched by ID. Note that ${CUSTOMER_ID} in this URL will be replaced by the actual customer ID. For example: https://myapp.com/api/customers/${CUSTOMER_ID} # noqa: E501
:return: The customer_url_template of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._customer_url_template
@customer_url_template.setter
def customer_url_template(self, customer_url_template):
"""Sets the customer_url_template of this AccountingAppUrls.
A template URL that indicates the endpoint where a customer can be fetched by ID. Note that ${CUSTOMER_ID} in this URL will be replaced by the actual customer ID. For example: https://myapp.com/api/customers/${CUSTOMER_ID} # noqa: E501
:param customer_url_template: The customer_url_template of this AccountingAppUrls. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and customer_url_template is None: # noqa: E501
raise ValueError("Invalid value for `customer_url_template`, must not be `None`") # noqa: E501
self._customer_url_template = customer_url_template
@property
def product_url_template(self):
"""Gets the product_url_template of this AccountingAppUrls. # noqa: E501
A template URL that indicates the endpoint where a product can be fetched by ID. Note that ${PRODUCT_ID} in this URL will be replaced by the actual product ID. For example: https://myapp.com/api/products/${PRODUCT_ID} # noqa: E501
:return: The product_url_template of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._product_url_template
@product_url_template.setter
def product_url_template(self, product_url_template):
"""Sets the product_url_template of this AccountingAppUrls.
A template URL that indicates the endpoint where a product can be fetched by ID. Note that ${PRODUCT_ID} in this URL will be replaced by the actual product ID. For example: https://myapp.com/api/products/${PRODUCT_ID} # noqa: E501
:param product_url_template: The product_url_template of this AccountingAppUrls. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and product_url_template is None: # noqa: E501
raise ValueError("Invalid value for `product_url_template`, must not be `None`") # noqa: E501
self._product_url_template = product_url_template
@property
def invoice_url_template(self):
"""Gets the invoice_url_template of this AccountingAppUrls. # noqa: E501
A template URL that indicates the endpoint where an invoice can be fetched by ID. Note that ${INVOICE_ID} in this URL will be replaced by the actual invoice ID. For example: https://myapp.com/api/invoices/${INVOICE_ID} # noqa: E501
:return: The invoice_url_template of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._invoice_url_template
@invoice_url_template.setter
def invoice_url_template(self, invoice_url_template):
"""Sets the invoice_url_template of this AccountingAppUrls.
A template URL that indicates the endpoint where an invoice can be fetched by ID. Note that ${INVOICE_ID} in this URL will be replaced by the actual invoice ID. For example: https://myapp.com/api/invoices/${INVOICE_ID} # noqa: E501
:param invoice_url_template: The invoice_url_template of this AccountingAppUrls. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and invoice_url_template is None: # noqa: E501
raise ValueError("Invalid value for `invoice_url_template`, must not be `None`") # noqa: E501
self._invoice_url_template = invoice_url_template
@property
def create_invoice_url(self):
"""Gets the create_invoice_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where an invoices can be created. # noqa: E501
:return: The create_invoice_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._create_invoice_url
@create_invoice_url.setter
def create_invoice_url(self, create_invoice_url):
"""Sets the create_invoice_url of this AccountingAppUrls.
A URL that specifies the endpoint where an invoices can be created. # noqa: E501
:param create_invoice_url: The create_invoice_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._create_invoice_url = create_invoice_url
@property
def search_invoice_url(self):
"""Gets the search_invoice_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where an invoice search can be performed. # noqa: E501
:return: The search_invoice_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._search_invoice_url
@search_invoice_url.setter
def search_invoice_url(self, search_invoice_url):
"""Sets the search_invoice_url of this AccountingAppUrls.
A URL that specifies the endpoint where an invoice search can be performed. # noqa: E501
:param search_invoice_url: The search_invoice_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._search_invoice_url = search_invoice_url
@property
def search_product_url(self):
"""Gets the search_product_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where a product search can be performed. # noqa: E501
:return: The search_product_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._search_product_url
@search_product_url.setter
def search_product_url(self, search_product_url):
"""Sets the search_product_url of this AccountingAppUrls.
A URL that specifies the endpoint where a product search can be performed. # noqa: E501
:param search_product_url: The search_product_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._search_product_url = search_product_url
@property
def get_terms_url(self):
"""Gets the get_terms_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where payment terms can be retrieved. # noqa: E501
:return: The get_terms_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._get_terms_url
@get_terms_url.setter
def get_terms_url(self, get_terms_url):
"""Sets the get_terms_url of this AccountingAppUrls.
A URL that specifies the endpoint where payment terms can be retrieved. # noqa: E501
:param get_terms_url: The get_terms_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._get_terms_url = get_terms_url
@property
def create_customer_url(self):
"""Gets the create_customer_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where a new customer can be created. # noqa: E501
:return: The create_customer_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._create_customer_url
@create_customer_url.setter
def create_customer_url(self, create_customer_url):
"""Sets the create_customer_url of this AccountingAppUrls.
A URL that specifies the endpoint where a new customer can be created. # noqa: E501
:param create_customer_url: The create_customer_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._create_customer_url = create_customer_url
@property
def search_tax_url(self):
"""Gets the search_tax_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where a tax search can be performed. # noqa: E501
:return: The search_tax_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._search_tax_url
@search_tax_url.setter
def search_tax_url(self, search_tax_url):
"""Sets the search_tax_url of this AccountingAppUrls.
A URL that specifies the endpoint where a tax search can be performed. # noqa: E501
:param search_tax_url: The search_tax_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._search_tax_url = search_tax_url
@property
def exchange_rate_url(self):
"""Gets the exchange_rate_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where exchange rates can be queried. # noqa: E501
:return: The exchange_rate_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._exchange_rate_url
@exchange_rate_url.setter
def exchange_rate_url(self, exchange_rate_url):
"""Sets the exchange_rate_url of this AccountingAppUrls.
A URL that specifies the endpoint where exchange rates can be queried. # noqa: E501
:param exchange_rate_url: The exchange_rate_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._exchange_rate_url = exchange_rate_url
@property
def search_url(self):
"""Gets the search_url of this AccountingAppUrls. # noqa: E501
:return: The search_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._search_url
@search_url.setter
def search_url(self, search_url):
"""Sets the search_url of this AccountingAppUrls.
:param search_url: The search_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._search_url = search_url
@property
def search_count_url(self):
"""Gets the search_count_url of this AccountingAppUrls. # noqa: E501
:return: The search_count_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._search_count_url
@search_count_url.setter
def search_count_url(self, search_count_url):
"""Sets the search_count_url of this AccountingAppUrls.
:param search_count_url: The search_count_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._search_count_url = search_count_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccountingAppUrls):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AccountingAppUrls):
return True
return self.to_dict() != other.to_dict()
| 2.15625
| 2
|
tests/chainer_tests/training_tests/updaters_tests/test_standard_updater.py
|
belldandyxtq/chainer
| 1
|
12782849
|
<gh_stars>1-10
import unittest
import mock
import numpy
import chainer
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer import dataset
from chainer import testing
from chainer.testing import attr
from chainer import training
class DummyIterator(dataset.Iterator):
epoch = 1
is_new_epoch = True
def __init__(self, next_data):
self.finalize_called = 0
self.next_called = 0
self.next_data = next_data
self.serialize_called = []
def finalize(self):
self.finalize_called += 1
def __next__(self):
self.next_called += 1
return self.next_data
def serialize(self, serializer):
self.serialize_called.append(serializer)
class DummyOptimizer(chainer.Optimizer):
def __init__(self):
self.update = mock.MagicMock()
self.serialize_called = []
def serialize(self, serializer):
self.serialize_called.append(serializer)
class DummySerializer(chainer.Serializer):
def __init__(self, path=None):
if path is None:
path = []
self.path = path
self.called = []
def __getitem__(self, key):
return DummySerializer(self.path + [key])
def __call__(self, key, value):
self.called.append((key, value))
class TestStandardUpdater(unittest.TestCase):
def setUp(self):
self.target = chainer.Link()
self.iterator = DummyIterator([(numpy.array(1), numpy.array(2))])
self.optimizer = DummyOptimizer()
self.optimizer.setup(self.target)
self.updater = training.updaters.StandardUpdater(
self.iterator, self.optimizer)
def test_init_values(self):
assert self.updater.device is None
assert self.updater.loss_func is None
assert self.updater.iteration == 0
def test_epoch(self):
assert self.updater.epoch == 1
def test_new_epoch(self):
assert self.updater.is_new_epoch is True
def test_get_iterator(self):
assert self.updater.get_iterator('main') is self.iterator
def test_get_optimizer(self):
assert self.updater.get_optimizer('main') is self.optimizer
def test_get_all_optimizers(self):
assert self.updater.get_all_optimizers() == {'main': self.optimizer}
def test_update(self):
self.updater.update()
assert self.updater.iteration == 1
assert self.optimizer.epoch == 1
assert self.iterator.next_called == 1
def test_use_auto_new_epoch(self):
assert self.optimizer.use_auto_new_epoch is True
def test_finalizer(self):
self.updater.finalize()
assert self.iterator.finalize_called == 1
def test_serialize(self):
serializer = DummySerializer()
self.updater.serialize(serializer)
assert len(self.iterator.serialize_called) == 1
assert self.iterator.serialize_called[0].path == ['iterator:main']
assert len(self.optimizer.serialize_called) == 1
assert self.optimizer.serialize_called[0].path == ['optimizer:main']
assert serializer.called == [('iteration', 0)]
class TestStandardUpdaterDataTypes(unittest.TestCase):
"""Tests several data types with StandardUpdater"""
def setUp(self):
self.target = chainer.Link()
self.optimizer = DummyOptimizer()
self.optimizer.setup(self.target)
def test_update_tuple(self):
iterator = DummyIterator([(numpy.array(1), numpy.array(2))])
updater = training.updaters.StandardUpdater(iterator, self.optimizer)
updater.update_core()
assert self.optimizer.update.call_count == 1
args, kwargs = self.optimizer.update.call_args
assert len(args) == 3
loss, v1, v2 = args
assert len(kwargs) == 0
assert loss is self.optimizer.target
assert isinstance(v1, numpy.ndarray)
assert v1 == 1
assert isinstance(v2, numpy.ndarray)
assert v2 == 2
assert iterator.next_called == 1
def test_update_dict(self):
iterator = DummyIterator([{'x': numpy.array(1), 'y': numpy.array(2)}])
updater = training.updaters.StandardUpdater(iterator, self.optimizer)
updater.update_core()
assert self.optimizer.update.call_count == 1
args, kwargs = self.optimizer.update.call_args
assert len(args) == 1
loss, = args
assert set(kwargs.keys()) == {'x', 'y'}
v1 = kwargs['x']
v2 = kwargs['y']
assert loss is self.optimizer.target
assert isinstance(v1, numpy.ndarray)
assert v1 == 1
assert isinstance(v2, numpy.ndarray)
assert v2 == 2
assert iterator.next_called == 1
def test_update_var(self):
iterator = DummyIterator([numpy.array(1)])
updater = training.updaters.StandardUpdater(iterator, self.optimizer)
updater.update_core()
assert self.optimizer.update.call_count == 1
args, kwargs = self.optimizer.update.call_args
assert len(args) == 2
loss, v1 = args
assert len(kwargs) == 0
assert loss is self.optimizer.target
assert isinstance(v1, numpy.ndarray)
assert v1 == 1
assert iterator.next_called == 1
@testing.parameterize(
{'converter_style': 'old'},
{'converter_style': 'new'})
@chainer.testing.backend.inject_backend_tests(
['test_converter_given_device'],
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# Custom converter is not supported for ChainerX.
])
class TestStandardUpdaterCustomConverter(unittest.TestCase):
"""Tests custom converters of various specs"""
def create_optimizer(self):
target = chainer.Link()
optimizer = DummyOptimizer()
optimizer.setup(target)
return optimizer
def create_updater(self, iterator, optimizer, converter, device):
return training.updaters.StandardUpdater(
iterator, optimizer, converter=converter, device=device)
def test_converter_given_device(self, backend_config):
self.check_converter_all(backend_config.device)
def test_converter_given_none(self):
self.check_converter_all(None)
def test_converter_given_int_negative(self):
self.check_converter_all(-1)
@attr.gpu
def test_converter_given_int_positive(self):
self.check_converter_all(9999)
def check_converter_all(self, device):
self.check_converter_in_arrays(device)
self.check_converter_in_obj(device)
self.check_converter_out_tuple(device)
self.check_converter_out_dict(device)
self.check_converter_out_obj(device)
def get_converter(self, converter_func):
if self.converter_style == 'old':
return converter_func
if self.converter_style == 'new':
@chainer.dataset.converter()
def wrapped_converter(*args, **kwargs):
return converter_func(*args, **kwargs)
return wrapped_converter
assert False
def check_converter_received_device_arg(
self, received_device_arg, device_arg):
new_style = self.converter_style == 'new'
# None
if device_arg is None:
assert received_device_arg is None
return
# Normalize input device types
is_cpu = False
cuda_device_id = None
if isinstance(device_arg, int):
if device_arg < 0:
is_cpu = True
else:
cuda_device_id = device_arg
elif isinstance(device_arg, _cpu.CpuDevice):
is_cpu = True
elif isinstance(device_arg, cuda.GpuDevice):
cuda_device_id = device_arg.device.id
else:
assert False
# Check received device
if is_cpu:
if new_style:
assert received_device_arg == _cpu.CpuDevice()
else:
assert received_device_arg == -1
elif cuda_device_id is not None:
if new_style:
assert (received_device_arg
== cuda.GpuDevice.from_device_id(cuda_device_id))
else:
assert isinstance(received_device_arg, int)
assert received_device_arg == cuda_device_id
else:
assert new_style
assert received_device_arg is device_arg
def check_converter_in_arrays(self, device_arg):
iterator = DummyIterator([(numpy.array(1), numpy.array(2))])
optimizer = self.create_optimizer()
called = [0]
def converter_impl(batch, device):
self.check_converter_received_device_arg(device, device_arg)
assert isinstance(batch, list)
assert len(batch) == 1
samples = batch[0]
assert isinstance(samples, tuple)
assert len(samples) == 2
assert isinstance(samples[0], numpy.ndarray)
assert isinstance(samples[1], numpy.ndarray)
assert samples[0] == 1
assert samples[1] == 2
called[0] += 1
return samples
converter = self.get_converter(converter_impl)
updater = self.create_updater(
iterator, optimizer, converter, device_arg)
updater.update_core()
assert called[0] == 1
def check_converter_in_obj(self, device_arg):
obj1 = object()
obj2 = object()
iterator = DummyIterator([obj1, obj2])
optimizer = self.create_optimizer()
called = [0]
def converter_impl(batch, device):
self.check_converter_received_device_arg(device, device_arg)
assert isinstance(batch, list)
assert len(batch) == 2
assert batch[0] is obj1
assert batch[1] is obj2
called[0] += 1
return obj1, obj2
converter = self.get_converter(converter_impl)
updater = self.create_updater(
iterator, optimizer, converter, device_arg)
updater.update_core()
assert called[0] == 1
def check_converter_out_tuple(self, device_arg):
iterator = DummyIterator([object()])
optimizer = self.create_optimizer()
converter_out = (object(), object())
def converter_impl(batch, device):
self.check_converter_received_device_arg(device, device_arg)
return converter_out
converter = self.get_converter(converter_impl)
updater = self.create_updater(
iterator, optimizer, converter, device_arg)
updater.update_core()
assert optimizer.update.call_count == 1
args, kwargs = optimizer.update.call_args
assert len(args) == 3
loss, v1, v2 = args
assert len(kwargs) == 0
assert loss is optimizer.target
assert v1 is converter_out[0]
assert v2 is converter_out[1]
def check_converter_out_dict(self, device_arg):
iterator = DummyIterator([object()])
optimizer = self.create_optimizer()
converter_out = {'x': object(), 'y': object()}
def converter_impl(batch, device):
self.check_converter_received_device_arg(device, device_arg)
return converter_out
converter = self.get_converter(converter_impl)
updater = self.create_updater(
iterator, optimizer, converter, device_arg)
updater.update_core()
assert optimizer.update.call_count == 1
args, kwargs = optimizer.update.call_args
assert len(args) == 1
loss, = args
assert len(kwargs) == 2
assert loss is optimizer.target
assert sorted(kwargs.keys()) == ['x', 'y']
assert kwargs['x'] is converter_out['x']
assert kwargs['y'] is converter_out['y']
def check_converter_out_obj(self, device_arg):
iterator = DummyIterator([object()])
optimizer = self.create_optimizer()
converter_out = object()
def converter_impl(batch, device):
self.check_converter_received_device_arg(device, device_arg)
return converter_out
converter = self.get_converter(converter_impl)
updater = self.create_updater(
iterator, optimizer, converter, device_arg)
updater.update_core()
assert optimizer.update.call_count == 1
args, kwargs = optimizer.update.call_args
assert len(args) == 2
loss, v1 = args
assert len(kwargs) == 0
assert loss is optimizer.target
assert v1 is converter_out
testing.run_module(__name__, __file__)
| 2.21875
| 2
|
landing/models.py
|
okfnepal/election-nepal
| 31
|
12782850
|
from __future__ import unicode_literals
from mezzanine.core import fields
from mezzanine.pages.models import Page
from django.db import models
DATASET_TYPES = [
('Voters', 'Voters'),
('Candidates', 'Candidates'),
('Political Parties', 'Political Parties'),
('Federal', 'Federal'),
('Results', 'Results'),
('Others', 'Others'),
]
DISTRICT = [
('Achham ', 'Achham'),
('Arghakhanchi', 'Arghakhanchi'),
('Baglung', 'Baglung'),
('Baitadi', 'Baitadi'),
('Bajhang', 'Bajhang'),
('Bajura', 'Bajura'),
('Banke', 'Banke'),
('Bara', 'Bara'),
('Bardiya', 'Bardiya'),
('Bhaktapur', 'Bhaktapur'),
('Bhojpur', 'Bhojpur'),
('Chitwan', 'Chitwan'),
('Dadeldhura', 'Dadeldhura'),
('Dailekh', 'Dailekh'),
('Dang', 'Dang'),
('Darchula', 'Darchula'),
('Dhading', 'Dhading'),
('Dhankuta', 'Dhankuta'),
('Dhanusa', 'Dhanusa'),
('Dolakha', 'Dolakha'),
('Dolpa', 'Dolpa'),
('Doti', 'Doti'),
('Gorkha', 'Gorkha'),
('Gulmi', 'Gulmi'),
('Humla', 'Humla'),
('Ilam', 'Ilam'),
('Jajarkot', 'Jajarkot'),
('Jhapa', 'Jhapa'),
('Jumla', 'Jumla'),
('Kailali', 'Kailali'),
('Kalikot', 'Kalikot'),
('Kanchanpur', 'Kanchanpur'),
('Kapilbastu', 'Kapilbastu'),
('Kaski', 'Kaski'),
('Kathmandu', 'Kathmandu'),
('Kavrepalanchok', 'Kavrepalanchok'),
('Khotang', 'Khotang'),
('Lalitpur', 'Lalitpur'),
('Lamjung', 'Lamjung'),
('Mahottari', 'Mahottari'),
('Makwanpur', 'Makwanpur'),
('Manang', 'Manang'),
('Morang', 'Morang'),
('Mugu', 'Mugu'),
('Mustang', 'Mustang'),
('Myagdi', 'Myagdi'),
('Nawalparasi', 'Nawalparasi'),
('Nuwakot', 'Nuwakot'),
('Okhaldhunga', 'Okhaldhunga'),
('Palpa', 'Palpa'),
('Panchthar', 'Panchthar'),
('Parbat', 'Parbat'),
('Parsa', 'Parsa'),
('Pyuthan', 'Pyuthan'),
('Ramechhap', 'Ramechhap'),
('Rasuwa', 'Rasuwa'),
('Rautahat', 'Rautahat'),
('Rolpa', 'Rolpa'),
('Rukum', 'Rukum'),
('Rupandehi', 'Rupandehi'),
('Salyan', 'Salyan'),
('Sankhuwasabha', 'Sankhuwasabha'),
('Saptari', 'Saptari'),
('Sarlahi', 'Sarlahi'),
('Sindhuli', 'Sindhuli'),
('Sindhupalchok', 'Sindhupalchok'),
('Siraha', 'Siraha'),
('Solukhumbu', 'Solukhumbu'),
('Sunsari', 'Sunsari'),
('Surkhet', 'Surkhet'),
('Syangja', 'Syangja'),
('Tanahu', 'Tanahu'),
('Taplejung', 'Taplejung'),
('Terhathum', 'Terhathum'),
('Udayapur', 'Udayapur'),
]
PROVINCE_NO = [ (1, 1),(2, 2),(3, 3),(4, 4),(5, 5),(6, 6),(7, 7)]
# Create your models here.
class SiteInformation(models.Model):
Logo=fields.FileField("Logo", format="Image")
Site_Title = models.CharField(max_length=100, null=False, blank=False)
Site_Meta_Key = models.CharField(max_length=160, null=False, blank=False)
Site_Meta_Description = models.TextField(max_length=160, null=False, blank=False)
Footer_Logo=fields.FileField("Footer Logo", format="Image")
def __str__(self):
return "Edit Here"
def __unicode__(self):
return "Edit Here"
class Meta:
verbose_name_plural = 'Site Information'
class AboutUs(models.Model):
Content=fields.RichTextField(null=True, blank=True)
def __str__(self):
return "About Us"
def __unicode__(self):
return "About Us"
class Meta:
verbose_name_plural = 'About Us'
class Data_template(Page):
pass
def __str__(self):
return "Projects"
class Meta:
verbose_name = 'Data'
verbose_name_plural = 'Dataset'
class Data(models.Model):
Data_Title = models.CharField(max_length=100, null=False, blank=False)
GitHub_Link = models.URLField()
added = models.DateTimeField(auto_now_add=True)
type=models.CharField(max_length=100, null=True, blank=True, choices=DATASET_TYPES)
district=models.CharField(max_length=100, null=True, blank=True,choices=DISTRICT)
province=models.IntegerField(null=True, blank=True,choices=PROVINCE_NO)
def __str__(self):
return self.Data_Title
def __unicode__(self):
return self.Data_Title
class Visualization_template(Page):
pass
def __str__(self):
return "Visualization"
class Meta:
verbose_name = 'Visualizations'
verbose_name_plural = 'Visualization'
class Visualization(models.Model):
Data_Title = models.CharField(max_length=100, null=False, blank=False)
Inforgraphic =fields.FileField("Viusalization Image", format="Image")
GitHub_Link = models.URLField(null=True, blank=True)
added = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True,)
def __str__(self):
return self.Data_Title
def __unicode__(self):
return self.Data_Title
| 1.804688
| 2
|